mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-16 22:31:32 +00:00
Compare commits
8 Commits
v1.10.0-rc
...
76ed952aa2
Author | SHA1 | Date | |
---|---|---|---|
|
76ed952aa2 | ||
|
b20ea70089 | ||
|
5220abae05 | ||
|
9668056554 | ||
|
1d6ab2c90c | ||
|
115e6b2a71 | ||
|
a5c79c92ed | ||
|
db3cea446e |
@@ -37,6 +37,7 @@ services:
|
|||||||
args:
|
args:
|
||||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||||
|
network: host
|
||||||
image: itzcrazykns1337/perplexica-frontend:main
|
image: itzcrazykns1337/perplexica-frontend:main
|
||||||
depends_on:
|
depends_on:
|
||||||
- perplexica-backend
|
- perplexica-backend
|
||||||
|
@@ -10,23 +10,25 @@ To update Perplexica to the latest version, follow these steps:
|
|||||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Navigate to the Project Directory.
|
2. Navigate to the project directory.
|
||||||
|
|
||||||
3. Pull latest images from registry.
|
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
||||||
|
|
||||||
|
4. Pull the latest images from the registry.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose pull
|
docker compose pull
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Update and Recreate containers.
|
5. Update and recreate the containers.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Once the command completes running go to http://localhost:3000 and verify the latest changes.
|
6. Once the command completes, go to http://localhost:3000 and verify the latest changes.
|
||||||
|
|
||||||
## For non Docker users
|
## For non-Docker users
|
||||||
|
|
||||||
1. Clone the latest version of Perplexica from GitHub:
|
1. Clone the latest version of Perplexica from GitHub:
|
||||||
|
|
||||||
@@ -34,7 +36,14 @@ To update Perplexica to the latest version, follow these steps:
|
|||||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Navigate to the Project Directory
|
2. Navigate to the project directory.
|
||||||
3. Execute `npm i` in both the `ui` folder and the root directory.
|
|
||||||
4. Once packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
|
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
||||||
5. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
|
|
||||||
|
4. Execute `npm i` in both the `ui` folder and the root directory.
|
||||||
|
|
||||||
|
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
|
||||||
|
|
||||||
|
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
|
||||||
|
|
||||||
|
---
|
||||||
|
6912
package-lock.json
generated
Normal file
6912
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -15,12 +15,16 @@ API_KEY = ""
|
|||||||
[MODELS.GEMINI]
|
[MODELS.GEMINI]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
|
|
||||||
[MODELS.CUSTOM_OPENAI]
|
|
||||||
API_KEY = ""
|
|
||||||
API_URL = ""
|
|
||||||
|
|
||||||
[MODELS.OLLAMA]
|
[MODELS.OLLAMA]
|
||||||
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
||||||
|
|
||||||
|
[MODELS.LMSTUDIO]
|
||||||
|
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234
|
||||||
|
|
||||||
|
[MODELS.CUSTOM_OPENAI]
|
||||||
|
API_KEY = ""
|
||||||
|
API_URL = ""
|
||||||
|
MODEL_NAME = ""
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
@@ -26,6 +26,9 @@ interface Config {
|
|||||||
OLLAMA: {
|
OLLAMA: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
};
|
};
|
||||||
|
LMSTUDIO: {
|
||||||
|
API_URL: string;
|
||||||
|
};
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
@@ -66,6 +69,8 @@ export const getSearxngApiEndpoint = () =>
|
|||||||
|
|
||||||
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
|
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
|
||||||
|
|
||||||
|
export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LMSTUDIO.API_URL;
|
||||||
|
|
||||||
export const getCustomOpenaiApiKey = () =>
|
export const getCustomOpenaiApiKey = () =>
|
||||||
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
||||||
|
|
||||||
@@ -76,10 +81,6 @@ export const getCustomOpenaiModelName = () =>
|
|||||||
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
||||||
|
|
||||||
const mergeConfigs = (current: any, update: any): any => {
|
const mergeConfigs = (current: any, update: any): any => {
|
||||||
if (update === null || update === undefined) {
|
|
||||||
return current;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (typeof current !== 'object' || current === null) {
|
if (typeof current !== 'object' || current === null) {
|
||||||
return update;
|
return update;
|
||||||
}
|
}
|
||||||
|
1
src/lib/chat/service.ts
Normal file
1
src/lib/chat/service.ts
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
@@ -4,6 +4,7 @@ import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
|||||||
import { loadAnthropicChatModels } from './anthropic';
|
import { loadAnthropicChatModels } from './anthropic';
|
||||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||||
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
||||||
|
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio';
|
||||||
import {
|
import {
|
||||||
getCustomOpenaiApiKey,
|
getCustomOpenaiApiKey,
|
||||||
getCustomOpenaiApiUrl,
|
getCustomOpenaiApiUrl,
|
||||||
@@ -17,6 +18,7 @@ const chatModelProviders = {
|
|||||||
ollama: loadOllamaChatModels,
|
ollama: loadOllamaChatModels,
|
||||||
anthropic: loadAnthropicChatModels,
|
anthropic: loadAnthropicChatModels,
|
||||||
gemini: loadGeminiChatModels,
|
gemini: loadGeminiChatModels,
|
||||||
|
lm_studio: loadLMStudioChatModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
const embeddingModelProviders = {
|
const embeddingModelProviders = {
|
||||||
@@ -24,6 +26,7 @@ const embeddingModelProviders = {
|
|||||||
local: loadTransformersEmbeddingsModels,
|
local: loadTransformersEmbeddingsModels,
|
||||||
ollama: loadOllamaEmbeddingsModels,
|
ollama: loadOllamaEmbeddingsModels,
|
||||||
gemini: loadGeminiEmbeddingsModels,
|
gemini: loadGeminiEmbeddingsModels,
|
||||||
|
lm_studio: loadLMStudioEmbeddingsModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getAvailableChatModelProviders = async () => {
|
export const getAvailableChatModelProviders = async () => {
|
||||||
|
125
src/lib/providers/lmstudio.ts
Normal file
125
src/lib/providers/lmstudio.ts
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||||
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
|
import { getKeepAlive, getLMStudioApiEndpoint } from '../../config';
|
||||||
|
import logger from '../../utils/logger';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
const ensureV1Endpoint = (endpoint: string): string => {
|
||||||
|
return endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
||||||
|
};
|
||||||
|
|
||||||
|
interface LMStudioModel {
|
||||||
|
id: string;
|
||||||
|
// add other properties if LM Studio API provides them
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ChatModelConfig {
|
||||||
|
displayName: string;
|
||||||
|
model: ChatOpenAI;
|
||||||
|
}
|
||||||
|
|
||||||
|
const checkLMStudioAvailability = async (endpoint: string): Promise<boolean> => {
|
||||||
|
const v1Endpoint = ensureV1Endpoint(endpoint);
|
||||||
|
try {
|
||||||
|
await axios.get(`${v1Endpoint}/models`, {
|
||||||
|
timeout: 1000, // 1 second timeout
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch (err) {
|
||||||
|
logger.debug(`LM Studio server not available at ${endpoint}`);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
|
||||||
|
const lmStudioEndpoint = getLMStudioApiEndpoint();
|
||||||
|
|
||||||
|
if (!lmStudioEndpoint) {
|
||||||
|
logger.debug('LM Studio endpoint not configured, skipping');
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if server is available before attempting to load models
|
||||||
|
const isAvailable = await checkLMStudioAvailability(lmStudioEndpoint);
|
||||||
|
if (!isAvailable) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const v1Endpoint = ensureV1Endpoint(lmStudioEndpoint);
|
||||||
|
const response = await axios.get<{ data: LMStudioModel[] }>(`${v1Endpoint}/models`, {
|
||||||
|
timeout: 5000, // 5 second timeout for model loading
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const lmStudioModels = response.data.data;
|
||||||
|
|
||||||
|
const chatModels = lmStudioModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
|
||||||
|
acc[model.id] = {
|
||||||
|
displayName: model.id,
|
||||||
|
model: new ChatOpenAI({
|
||||||
|
openAIApiKey: 'lm-studio',
|
||||||
|
configuration: {
|
||||||
|
baseURL: ensureV1Endpoint(lmStudioEndpoint),
|
||||||
|
},
|
||||||
|
modelName: model.id,
|
||||||
|
temperature: 0.7,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error loading LM Studio models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLMStudioEmbeddingsModels = async () => {
|
||||||
|
const lmStudioEndpoint = getLMStudioApiEndpoint();
|
||||||
|
|
||||||
|
if (!lmStudioEndpoint) return {};
|
||||||
|
|
||||||
|
// Check if server is available before attempting to load models
|
||||||
|
const isAvailable = await checkLMStudioAvailability(lmStudioEndpoint);
|
||||||
|
if (!isAvailable) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const v1Endpoint = ensureV1Endpoint(lmStudioEndpoint);
|
||||||
|
const response = await axios.get(`${v1Endpoint}/models`, {
|
||||||
|
timeout: 5000, // 5 second timeout for model loading
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const lmStudioModels = response.data.data;
|
||||||
|
|
||||||
|
const embeddingsModels = lmStudioModels.reduce((acc, model) => {
|
||||||
|
acc[model.id] = {
|
||||||
|
displayName: model.id,
|
||||||
|
model: new OpenAIEmbeddings({
|
||||||
|
openAIApiKey: 'lm-studio', // Dummy key required by LangChain
|
||||||
|
configuration: {
|
||||||
|
baseURL: ensureV1Endpoint(lmStudioEndpoint),
|
||||||
|
},
|
||||||
|
modelName: model.id,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
return embeddingsModels;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
@@ -6,6 +6,7 @@ import {
|
|||||||
import {
|
import {
|
||||||
getGroqApiKey,
|
getGroqApiKey,
|
||||||
getOllamaApiEndpoint,
|
getOllamaApiEndpoint,
|
||||||
|
getLMStudioApiEndpoint,
|
||||||
getAnthropicApiKey,
|
getAnthropicApiKey,
|
||||||
getGeminiApiKey,
|
getGeminiApiKey,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
@@ -54,12 +55,13 @@ router.get('/', async (_, res) => {
|
|||||||
|
|
||||||
config['openaiApiKey'] = getOpenaiApiKey();
|
config['openaiApiKey'] = getOpenaiApiKey();
|
||||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||||
|
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
||||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
config['geminiApiKey'] = getGeminiApiKey();
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
||||||
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
||||||
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
config['customOpenaiModelName'] = getCustomOpenaiModelName()
|
||||||
|
|
||||||
res.status(200).json(config);
|
res.status(200).json(config);
|
||||||
} catch (err: any) {
|
} catch (err: any) {
|
||||||
@@ -88,6 +90,9 @@ router.post('/', async (req, res) => {
|
|||||||
OLLAMA: {
|
OLLAMA: {
|
||||||
API_URL: config.ollamaApiUrl,
|
API_URL: config.ollamaApiUrl,
|
||||||
},
|
},
|
||||||
|
LMSTUDIO: {
|
||||||
|
API_URL: config.lmStudioApiUrl,
|
||||||
|
},
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: config.customOpenaiApiUrl,
|
API_URL: config.customOpenaiApiUrl,
|
||||||
API_KEY: config.customOpenaiApiKey,
|
API_KEY: config.customOpenaiApiKey,
|
||||||
|
@@ -85,10 +85,12 @@ router.post('/', async (req, res) => {
|
|||||||
if (body.chatModel?.provider === 'custom_openai') {
|
if (body.chatModel?.provider === 'custom_openai') {
|
||||||
llm = new ChatOpenAI({
|
llm = new ChatOpenAI({
|
||||||
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
|
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
|
||||||
openAIApiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
openAIApiKey:
|
||||||
|
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
baseURL: body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
|
baseURL:
|
||||||
|
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
|
||||||
},
|
},
|
||||||
}) as unknown as BaseChatModel;
|
}) as unknown as BaseChatModel;
|
||||||
} else if (
|
} else if (
|
||||||
|
@@ -223,11 +223,11 @@ const Page = () => {
|
|||||||
setChatModels(data.chatModelProviders || {});
|
setChatModels(data.chatModelProviders || {});
|
||||||
setEmbeddingModels(data.embeddingModelProviders || {});
|
setEmbeddingModels(data.embeddingModelProviders || {});
|
||||||
|
|
||||||
const currentProvider = selectedChatModelProvider;
|
const currentChatProvider = selectedChatModelProvider;
|
||||||
const newProviders = Object.keys(data.chatModelProviders || {});
|
const newChatProviders = Object.keys(data.chatModelProviders || {});
|
||||||
|
|
||||||
if (!currentProvider && newProviders.length > 0) {
|
if (!currentChatProvider && newChatProviders.length > 0) {
|
||||||
const firstProvider = newProviders[0];
|
const firstProvider = newChatProviders[0];
|
||||||
const firstModel = data.chatModelProviders[firstProvider]?.[0]?.name;
|
const firstModel = data.chatModelProviders[firstProvider]?.[0]?.name;
|
||||||
|
|
||||||
if (firstModel) {
|
if (firstModel) {
|
||||||
@@ -237,11 +237,11 @@ const Page = () => {
|
|||||||
localStorage.setItem('chatModel', firstModel);
|
localStorage.setItem('chatModel', firstModel);
|
||||||
}
|
}
|
||||||
} else if (
|
} else if (
|
||||||
currentProvider &&
|
currentChatProvider &&
|
||||||
(!data.chatModelProviders ||
|
(!data.chatModelProviders ||
|
||||||
!data.chatModelProviders[currentProvider] ||
|
!data.chatModelProviders[currentChatProvider] ||
|
||||||
!Array.isArray(data.chatModelProviders[currentProvider]) ||
|
!Array.isArray(data.chatModelProviders[currentChatProvider]) ||
|
||||||
data.chatModelProviders[currentProvider].length === 0)
|
data.chatModelProviders[currentChatProvider].length === 0)
|
||||||
) {
|
) {
|
||||||
const firstValidProvider = Object.entries(
|
const firstValidProvider = Object.entries(
|
||||||
data.chatModelProviders || {},
|
data.chatModelProviders || {},
|
||||||
@@ -267,6 +267,55 @@ const Page = () => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const currentEmbeddingProvider = selectedEmbeddingModelProvider;
|
||||||
|
const newEmbeddingProviders = Object.keys(
|
||||||
|
data.embeddingModelProviders || {},
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!currentEmbeddingProvider && newEmbeddingProviders.length > 0) {
|
||||||
|
const firstProvider = newEmbeddingProviders[0];
|
||||||
|
const firstModel =
|
||||||
|
data.embeddingModelProviders[firstProvider]?.[0]?.name;
|
||||||
|
|
||||||
|
if (firstModel) {
|
||||||
|
setSelectedEmbeddingModelProvider(firstProvider);
|
||||||
|
setSelectedEmbeddingModel(firstModel);
|
||||||
|
localStorage.setItem('embeddingModelProvider', firstProvider);
|
||||||
|
localStorage.setItem('embeddingModel', firstModel);
|
||||||
|
}
|
||||||
|
} else if (
|
||||||
|
currentEmbeddingProvider &&
|
||||||
|
(!data.embeddingModelProviders ||
|
||||||
|
!data.embeddingModelProviders[currentEmbeddingProvider] ||
|
||||||
|
!Array.isArray(
|
||||||
|
data.embeddingModelProviders[currentEmbeddingProvider],
|
||||||
|
) ||
|
||||||
|
data.embeddingModelProviders[currentEmbeddingProvider].length === 0)
|
||||||
|
) {
|
||||||
|
const firstValidProvider = Object.entries(
|
||||||
|
data.embeddingModelProviders || {},
|
||||||
|
).find(
|
||||||
|
([_, models]) => Array.isArray(models) && models.length > 0,
|
||||||
|
)?.[0];
|
||||||
|
|
||||||
|
if (firstValidProvider) {
|
||||||
|
setSelectedEmbeddingModelProvider(firstValidProvider);
|
||||||
|
setSelectedEmbeddingModel(
|
||||||
|
data.embeddingModelProviders[firstValidProvider][0].name,
|
||||||
|
);
|
||||||
|
localStorage.setItem('embeddingModelProvider', firstValidProvider);
|
||||||
|
localStorage.setItem(
|
||||||
|
'embeddingModel',
|
||||||
|
data.embeddingModelProviders[firstValidProvider][0].name,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
setSelectedEmbeddingModelProvider(null);
|
||||||
|
setSelectedEmbeddingModel(null);
|
||||||
|
localStorage.removeItem('embeddingModelProvider');
|
||||||
|
localStorage.removeItem('embeddingModel');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
setConfig(data);
|
setConfig(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -278,6 +327,10 @@ const Page = () => {
|
|||||||
localStorage.setItem('chatModelProvider', value);
|
localStorage.setItem('chatModelProvider', value);
|
||||||
} else if (key === 'chatModel') {
|
} else if (key === 'chatModel') {
|
||||||
localStorage.setItem('chatModel', value);
|
localStorage.setItem('chatModel', value);
|
||||||
|
} else if (key === 'embeddingModelProvider') {
|
||||||
|
localStorage.setItem('embeddingModelProvider', value);
|
||||||
|
} else if (key === 'embeddingModel') {
|
||||||
|
localStorage.setItem('embeddingModel', value);
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to save:', err);
|
console.error('Failed to save:', err);
|
||||||
@@ -436,7 +489,6 @@ const Page = () => {
|
|||||||
const value = e.target.value;
|
const value = e.target.value;
|
||||||
setSelectedChatModelProvider(value);
|
setSelectedChatModelProvider(value);
|
||||||
saveConfig('chatModelProvider', value);
|
saveConfig('chatModelProvider', value);
|
||||||
// Auto-select first model of new provider
|
|
||||||
const firstModel =
|
const firstModel =
|
||||||
config.chatModelProviders[value]?.[0]?.name;
|
config.chatModelProviders[value]?.[0]?.name;
|
||||||
if (firstModel) {
|
if (firstModel) {
|
||||||
@@ -554,6 +606,81 @@ const Page = () => {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{config.embeddingModelProviders && (
|
||||||
|
<div className="flex flex-col space-y-4 mt-4 pt-4 border-t border-light-200 dark:border-dark-200">
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
Embedding Model Provider
|
||||||
|
</p>
|
||||||
|
<Select
|
||||||
|
value={selectedEmbeddingModelProvider ?? undefined}
|
||||||
|
onChange={(e) => {
|
||||||
|
const value = e.target.value;
|
||||||
|
setSelectedEmbeddingModelProvider(value);
|
||||||
|
saveConfig('embeddingModelProvider', value);
|
||||||
|
const firstModel =
|
||||||
|
config.embeddingModelProviders[value]?.[0]?.name;
|
||||||
|
if (firstModel) {
|
||||||
|
setSelectedEmbeddingModel(firstModel);
|
||||||
|
saveConfig('embeddingModel', firstModel);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
options={Object.keys(config.embeddingModelProviders).map(
|
||||||
|
(provider) => ({
|
||||||
|
value: provider,
|
||||||
|
label:
|
||||||
|
provider.charAt(0).toUpperCase() +
|
||||||
|
provider.slice(1),
|
||||||
|
}),
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{selectedEmbeddingModelProvider && (
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
Embedding Model
|
||||||
|
</p>
|
||||||
|
<Select
|
||||||
|
value={selectedEmbeddingModel ?? undefined}
|
||||||
|
onChange={(e) => {
|
||||||
|
const value = e.target.value;
|
||||||
|
setSelectedEmbeddingModel(value);
|
||||||
|
saveConfig('embeddingModel', value);
|
||||||
|
}}
|
||||||
|
options={(() => {
|
||||||
|
const embeddingModelProvider =
|
||||||
|
config.embeddingModelProviders[
|
||||||
|
selectedEmbeddingModelProvider
|
||||||
|
];
|
||||||
|
return embeddingModelProvider
|
||||||
|
? embeddingModelProvider.length > 0
|
||||||
|
? embeddingModelProvider.map((model) => ({
|
||||||
|
value: model.name,
|
||||||
|
label: model.displayName,
|
||||||
|
}))
|
||||||
|
: [
|
||||||
|
{
|
||||||
|
value: '',
|
||||||
|
label: 'No models available',
|
||||||
|
disabled: true,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
: [
|
||||||
|
{
|
||||||
|
value: '',
|
||||||
|
label:
|
||||||
|
'Invalid provider, please check backend logs',
|
||||||
|
disabled: true,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
})()}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</SettingsSection>
|
</SettingsSection>
|
||||||
|
|
||||||
<SettingsSection title="API Keys">
|
<SettingsSection title="API Keys">
|
||||||
|
@@ -11,6 +11,8 @@ import {
|
|||||||
StopCircle,
|
StopCircle,
|
||||||
Layers3,
|
Layers3,
|
||||||
Plus,
|
Plus,
|
||||||
|
Brain,
|
||||||
|
ChevronDown,
|
||||||
} from 'lucide-react';
|
} from 'lucide-react';
|
||||||
import Markdown from 'markdown-to-jsx';
|
import Markdown from 'markdown-to-jsx';
|
||||||
import Copy from './MessageActions/Copy';
|
import Copy from './MessageActions/Copy';
|
||||||
@@ -41,26 +43,58 @@ const MessageBox = ({
|
|||||||
}) => {
|
}) => {
|
||||||
const [parsedMessage, setParsedMessage] = useState(message.content);
|
const [parsedMessage, setParsedMessage] = useState(message.content);
|
||||||
const [speechMessage, setSpeechMessage] = useState(message.content);
|
const [speechMessage, setSpeechMessage] = useState(message.content);
|
||||||
|
const [thinking, setThinking] = useState<string>('');
|
||||||
|
const [answer, setAnswer] = useState<string>('');
|
||||||
|
const [isThinkingExpanded, setIsThinkingExpanded] = useState(false);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const regex = /\[(\d+)\]/g;
|
const regex = /\[(\d+)\]/g;
|
||||||
|
const thinkRegex = /<think>(.*?)(?:<\/think>|$)(.*)/s;
|
||||||
|
|
||||||
if (
|
// Check for thinking content, including partial tags
|
||||||
message.role === 'assistant' &&
|
const match = message.content.match(thinkRegex);
|
||||||
message?.sources &&
|
if (match) {
|
||||||
message.sources.length > 0
|
const [_, thinkingContent, answerContent] = match;
|
||||||
) {
|
|
||||||
return setParsedMessage(
|
// Set thinking content even if </think> hasn't appeared yet
|
||||||
|
if (thinkingContent) {
|
||||||
|
setThinking(thinkingContent.trim());
|
||||||
|
setIsThinkingExpanded(true); // Auto-expand when thinking starts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set answer content if we have it (after </think>)
|
||||||
|
if (answerContent) {
|
||||||
|
setAnswer(answerContent.trim());
|
||||||
|
|
||||||
|
// Process the answer part for sources if needed
|
||||||
|
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
|
||||||
|
setParsedMessage(
|
||||||
|
answerContent.trim().replace(
|
||||||
|
regex,
|
||||||
|
(_, number) =>
|
||||||
|
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
setParsedMessage(answerContent.trim());
|
||||||
|
}
|
||||||
|
setSpeechMessage(answerContent.trim().replace(regex, ''));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No thinking content - process as before
|
||||||
|
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
|
||||||
|
setParsedMessage(
|
||||||
message.content.replace(
|
message.content.replace(
|
||||||
regex,
|
regex,
|
||||||
(_, number) =>
|
(_, number) =>
|
||||||
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
}
|
} else {
|
||||||
|
|
||||||
setSpeechMessage(message.content.replace(regex, ''));
|
|
||||||
setParsedMessage(message.content);
|
setParsedMessage(message.content);
|
||||||
|
}
|
||||||
|
setSpeechMessage(message.content.replace(regex, ''));
|
||||||
|
}
|
||||||
}, [message.content, message.sources, message.role]);
|
}, [message.content, message.sources, message.role]);
|
||||||
|
|
||||||
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
|
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
|
||||||
@@ -92,6 +126,49 @@ const MessageBox = ({
|
|||||||
<MessageSources sources={message.sources} />
|
<MessageSources sources={message.sources} />
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
<div className="flex flex-col space-y-4">
|
||||||
|
{thinking && (
|
||||||
|
<div className="flex flex-col space-y-2 mb-4">
|
||||||
|
<button
|
||||||
|
onClick={() => setIsThinkingExpanded(!isThinkingExpanded)}
|
||||||
|
className="flex flex-row items-center space-x-2 group text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white transition duration-200"
|
||||||
|
>
|
||||||
|
<Brain size={20} />
|
||||||
|
<h3 className="font-medium text-xl">Reasoning</h3>
|
||||||
|
<ChevronDown
|
||||||
|
size={16}
|
||||||
|
className={cn(
|
||||||
|
"transition-transform duration-200",
|
||||||
|
isThinkingExpanded ? "rotate-180" : ""
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{isThinkingExpanded && (
|
||||||
|
<div className="rounded-lg bg-light-secondary/50 dark:bg-dark-secondary/50 p-4">
|
||||||
|
{thinking.split('\n\n').map((paragraph, index) => {
|
||||||
|
if (!paragraph.trim()) return null;
|
||||||
|
|
||||||
|
const content = paragraph.replace(/^[•\-\d.]\s*/, '');
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div key={index} className="mb-2 last:mb-0">
|
||||||
|
<details className="group [&_summary::-webkit-details-marker]:hidden">
|
||||||
|
<summary className="flex items-center cursor-pointer list-none text-sm text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white">
|
||||||
|
<span className="arrow mr-2 inline-block transition-transform duration-200 group-open:rotate-90 group-open:self-start group-open:mt-1">▸</span>
|
||||||
|
<p className="relative whitespace-normal line-clamp-1 group-open:line-clamp-none after:content-['...'] after:inline group-open:after:hidden transition-all duration-200 text-ellipsis overflow-hidden group-open:overflow-visible">
|
||||||
|
{content}
|
||||||
|
</p>
|
||||||
|
</summary>
|
||||||
|
</details>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
<div className="flex flex-col space-y-2">
|
<div className="flex flex-col space-y-2">
|
||||||
<div className="flex flex-row items-center space-x-2">
|
<div className="flex flex-row items-center space-x-2">
|
||||||
<Disc3
|
<Disc3
|
||||||
@@ -113,6 +190,7 @@ const MessageBox = ({
|
|||||||
>
|
>
|
||||||
{parsedMessage}
|
{parsedMessage}
|
||||||
</Markdown>
|
</Markdown>
|
||||||
|
</div>
|
||||||
{loading && isLast ? null : (
|
{loading && isLast ? null : (
|
||||||
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
|
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
|
||||||
<div className="flex flex-row items-center space-x-1">
|
<div className="flex flex-row items-center space-x-1">
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
{
|
{
|
||||||
"compilerOptions": {
|
"compilerOptions": {
|
||||||
|
"target": "es2018",
|
||||||
"lib": ["dom", "dom.iterable", "esnext"],
|
"lib": ["dom", "dom.iterable", "esnext"],
|
||||||
"allowJs": true,
|
"allowJs": true,
|
||||||
"skipLibCheck": true,
|
"skipLibCheck": true,
|
||||||
|
Reference in New Issue
Block a user