mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-23 18:28:34 +00:00
Add DeepSeek and LMStudio providers
- Integrate DeepSeek and LMStudio AI providers - Add message processing utilities for improved handling - Implement reasoning panel for message actions - Add logging functionality to UI - Update configurations and dependencies
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@ -11,6 +11,7 @@ yarn-error.log
|
|||||||
# IDE/Editor specific
|
# IDE/Editor specific
|
||||||
.vscode/
|
.vscode/
|
||||||
.idea/
|
.idea/
|
||||||
|
.qodo/
|
||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
# Environment variables
|
# Environment variables
|
||||||
|
@ -37,6 +37,7 @@ services:
|
|||||||
args:
|
args:
|
||||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||||
|
network: host
|
||||||
image: itzcrazykns1337/perplexica-frontend:main
|
image: itzcrazykns1337/perplexica-frontend:main
|
||||||
depends_on:
|
depends_on:
|
||||||
- perplexica-backend
|
- perplexica-backend
|
||||||
|
@ -15,12 +15,19 @@ API_KEY = ""
|
|||||||
[MODELS.GEMINI]
|
[MODELS.GEMINI]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
|
|
||||||
[MODELS.CUSTOM_OPENAI]
|
[MODELS.DEEPSEEK]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
API_URL = ""
|
|
||||||
|
|
||||||
[MODELS.OLLAMA]
|
[MODELS.OLLAMA]
|
||||||
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
||||||
|
|
||||||
|
[MODELS.LMSTUDIO]
|
||||||
|
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234
|
||||||
|
|
||||||
|
[MODELS.CUSTOM_OPENAI]
|
||||||
|
API_KEY = ""
|
||||||
|
API_URL = ""
|
||||||
|
MODEL_NAME = ""
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
@ -23,9 +23,15 @@ interface Config {
|
|||||||
GEMINI: {
|
GEMINI: {
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
};
|
};
|
||||||
|
DEEPSEEK: {
|
||||||
|
API_KEY: string;
|
||||||
|
};
|
||||||
OLLAMA: {
|
OLLAMA: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
};
|
};
|
||||||
|
LMSTUDIO: {
|
||||||
|
API_URL: string;
|
||||||
|
};
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
@ -61,11 +67,15 @@ export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
|
|||||||
|
|
||||||
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
|
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
|
||||||
|
|
||||||
|
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
|
||||||
|
|
||||||
export const getSearxngApiEndpoint = () =>
|
export const getSearxngApiEndpoint = () =>
|
||||||
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
|
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
|
||||||
|
|
||||||
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
|
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
|
||||||
|
|
||||||
|
export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LMSTUDIO.API_URL;
|
||||||
|
|
||||||
export const getCustomOpenaiApiKey = () =>
|
export const getCustomOpenaiApiKey = () =>
|
||||||
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
||||||
|
|
||||||
|
251
src/lib/deepseekChat.ts
Normal file
251
src/lib/deepseekChat.ts
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
import { BaseChatModel, BaseChatModelCallOptions } from '@langchain/core/language_models/chat_models';
|
||||||
|
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
||||||
|
import { AIMessage, AIMessageChunk, BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||||
|
import { ChatResult, ChatGenerationChunk } from '@langchain/core/outputs';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
|
interface DeepSeekChatParams extends BaseChatModelParams {
|
||||||
|
apiKey: string;
|
||||||
|
baseURL: string;
|
||||||
|
modelName: string;
|
||||||
|
temperature?: number;
|
||||||
|
max_tokens?: number;
|
||||||
|
top_p?: number;
|
||||||
|
frequency_penalty?: number;
|
||||||
|
presence_penalty?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class DeepSeekChat extends BaseChatModel<BaseChatModelCallOptions & { stream?: boolean }> {
|
||||||
|
private apiKey: string;
|
||||||
|
private baseURL: string;
|
||||||
|
private modelName: string;
|
||||||
|
private temperature: number;
|
||||||
|
private maxTokens: number;
|
||||||
|
private topP: number;
|
||||||
|
private frequencyPenalty: number;
|
||||||
|
private presencePenalty: number;
|
||||||
|
|
||||||
|
constructor(params: DeepSeekChatParams) {
|
||||||
|
super(params);
|
||||||
|
this.apiKey = params.apiKey;
|
||||||
|
this.baseURL = params.baseURL;
|
||||||
|
this.modelName = params.modelName;
|
||||||
|
this.temperature = params.temperature ?? 0.7;
|
||||||
|
this.maxTokens = params.max_tokens ?? 8192;
|
||||||
|
this.topP = params.top_p ?? 1;
|
||||||
|
this.frequencyPenalty = params.frequency_penalty ?? 0;
|
||||||
|
this.presencePenalty = params.presence_penalty ?? 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
async _generate(
|
||||||
|
messages: BaseMessage[],
|
||||||
|
options: this['ParsedCallOptions'],
|
||||||
|
runManager?: CallbackManagerForLLMRun
|
||||||
|
): Promise<ChatResult> {
|
||||||
|
const formattedMessages = messages.map(msg => ({
|
||||||
|
role: this.getRole(msg),
|
||||||
|
content: msg.content.toString(),
|
||||||
|
}));
|
||||||
|
const response = await this.callDeepSeekAPI(formattedMessages, options.stream);
|
||||||
|
|
||||||
|
if (options.stream) {
|
||||||
|
return this.processStreamingResponse(response, messages, options, runManager);
|
||||||
|
} else {
|
||||||
|
const choice = response.data.choices[0];
|
||||||
|
let content = choice.message.content || '';
|
||||||
|
if (choice.message.reasoning_content) {
|
||||||
|
content = `<think>\n${choice.message.reasoning_content}\n</think>\n\n${content}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report usage stats if available
|
||||||
|
if (response.data.usage && runManager) {
|
||||||
|
runManager.handleLLMEnd({
|
||||||
|
generations: [],
|
||||||
|
llmOutput: {
|
||||||
|
tokenUsage: {
|
||||||
|
completionTokens: response.data.usage.completion_tokens,
|
||||||
|
promptTokens: response.data.usage.prompt_tokens,
|
||||||
|
totalTokens: response.data.usage.total_tokens
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
generations: [
|
||||||
|
{
|
||||||
|
text: content,
|
||||||
|
message: new AIMessage(content),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private getRole(msg: BaseMessage): string {
|
||||||
|
if (msg instanceof SystemMessage) return 'system';
|
||||||
|
if (msg instanceof HumanMessage) return 'user';
|
||||||
|
if (msg instanceof AIMessage) return 'assistant';
|
||||||
|
return 'user'; // Default to user
|
||||||
|
}
|
||||||
|
|
||||||
|
private async callDeepSeekAPI(messages: Array<{ role: string; content: string }>, streaming?: boolean) {
|
||||||
|
return axios.post(
|
||||||
|
`${this.baseURL}/chat/completions`,
|
||||||
|
{
|
||||||
|
messages,
|
||||||
|
model: this.modelName,
|
||||||
|
stream: streaming,
|
||||||
|
temperature: this.temperature,
|
||||||
|
max_tokens: this.maxTokens,
|
||||||
|
top_p: this.topP,
|
||||||
|
frequency_penalty: this.frequencyPenalty,
|
||||||
|
presence_penalty: this.presencePenalty,
|
||||||
|
response_format: { type: 'text' },
|
||||||
|
...(streaming && {
|
||||||
|
stream_options: {
|
||||||
|
include_usage: true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
},
|
||||||
|
{
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': `Bearer ${this.apiKey}`,
|
||||||
|
},
|
||||||
|
responseType: streaming ? 'text' : 'json',
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async *_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun) {
|
||||||
|
const response = await this.callDeepSeekAPI(messages.map(msg => ({
|
||||||
|
role: this.getRole(msg),
|
||||||
|
content: msg.content.toString(),
|
||||||
|
})), true);
|
||||||
|
|
||||||
|
let thinkState = -1; // -1: not started, 0: thinking, 1: answered
|
||||||
|
let currentContent = '';
|
||||||
|
|
||||||
|
// Split the response into lines
|
||||||
|
const lines = response.data.split('\n');
|
||||||
|
for (const line of lines) {
|
||||||
|
if (!line.startsWith('data: ')) continue;
|
||||||
|
const jsonStr = line.slice(6);
|
||||||
|
if (jsonStr === '[DONE]') break;
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log('Received chunk:', jsonStr);
|
||||||
|
const chunk = JSON.parse(jsonStr);
|
||||||
|
const delta = chunk.choices[0].delta;
|
||||||
|
console.log('Parsed delta:', delta);
|
||||||
|
|
||||||
|
// Handle usage stats in final chunk
|
||||||
|
if (chunk.usage && !chunk.choices?.length) {
|
||||||
|
runManager?.handleLLMEnd?.({
|
||||||
|
generations: [],
|
||||||
|
llmOutput: {
|
||||||
|
tokenUsage: {
|
||||||
|
completionTokens: chunk.usage.completion_tokens,
|
||||||
|
promptTokens: chunk.usage.prompt_tokens,
|
||||||
|
totalTokens: chunk.usage.total_tokens
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle reasoning content
|
||||||
|
if (delta.reasoning_content) {
|
||||||
|
if (thinkState === -1) {
|
||||||
|
thinkState = 0;
|
||||||
|
const startTag = '<think>\n';
|
||||||
|
currentContent += startTag;
|
||||||
|
console.log('Emitting think start:', startTag);
|
||||||
|
runManager?.handleLLMNewToken(startTag);
|
||||||
|
const chunk = new ChatGenerationChunk({
|
||||||
|
text: startTag,
|
||||||
|
message: new AIMessageChunk(startTag),
|
||||||
|
generationInfo: {}
|
||||||
|
});
|
||||||
|
yield chunk;
|
||||||
|
}
|
||||||
|
currentContent += delta.reasoning_content;
|
||||||
|
console.log('Emitting reasoning:', delta.reasoning_content);
|
||||||
|
runManager?.handleLLMNewToken(delta.reasoning_content);
|
||||||
|
const chunk = new ChatGenerationChunk({
|
||||||
|
text: delta.reasoning_content,
|
||||||
|
message: new AIMessageChunk(delta.reasoning_content),
|
||||||
|
generationInfo: {}
|
||||||
|
});
|
||||||
|
yield chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle regular content
|
||||||
|
if (delta.content) {
|
||||||
|
if (thinkState === 0) {
|
||||||
|
thinkState = 1;
|
||||||
|
const endTag = '\n</think>\n\n';
|
||||||
|
currentContent += endTag;
|
||||||
|
console.log('Emitting think end:', endTag);
|
||||||
|
runManager?.handleLLMNewToken(endTag);
|
||||||
|
const chunk = new ChatGenerationChunk({
|
||||||
|
text: endTag,
|
||||||
|
message: new AIMessageChunk(endTag),
|
||||||
|
generationInfo: {}
|
||||||
|
});
|
||||||
|
yield chunk;
|
||||||
|
}
|
||||||
|
currentContent += delta.content;
|
||||||
|
console.log('Emitting content:', delta.content);
|
||||||
|
runManager?.handleLLMNewToken(delta.content);
|
||||||
|
const chunk = new ChatGenerationChunk({
|
||||||
|
text: delta.content,
|
||||||
|
message: new AIMessageChunk(delta.content),
|
||||||
|
generationInfo: {}
|
||||||
|
});
|
||||||
|
yield chunk;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : 'Failed to parse chunk';
|
||||||
|
console.error(`Streaming error: ${errorMessage}`);
|
||||||
|
if (error instanceof Error && error.message.includes('DeepSeek API Error')) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle any unclosed think block
|
||||||
|
if (thinkState === 0) {
|
||||||
|
const endTag = '\n</think>\n\n';
|
||||||
|
currentContent += endTag;
|
||||||
|
runManager?.handleLLMNewToken(endTag);
|
||||||
|
const chunk = new ChatGenerationChunk({
|
||||||
|
text: endTag,
|
||||||
|
message: new AIMessageChunk(endTag),
|
||||||
|
generationInfo: {}
|
||||||
|
});
|
||||||
|
yield chunk;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async processStreamingResponse(response: any, messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): Promise<ChatResult> {
|
||||||
|
let accumulatedContent = '';
|
||||||
|
for await (const chunk of this._streamResponseChunks(messages, options, runManager)) {
|
||||||
|
accumulatedContent += chunk.message.content;
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
generations: [
|
||||||
|
{
|
||||||
|
text: accumulatedContent,
|
||||||
|
message: new AIMessage(accumulatedContent),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
_llmType(): string {
|
||||||
|
return 'deepseek';
|
||||||
|
}
|
||||||
|
}
|
69
src/lib/providers/deepseek.ts
Normal file
69
src/lib/providers/deepseek.ts
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
import { DeepSeekChat } from '../deepseekChat';
|
||||||
|
import logger from '../../utils/logger';
|
||||||
|
import { getDeepseekApiKey } from '../../config';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
interface DeepSeekModel {
|
||||||
|
id: string;
|
||||||
|
object: string;
|
||||||
|
owned_by: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ModelListResponse {
|
||||||
|
object: 'list';
|
||||||
|
data: DeepSeekModel[];
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ChatModelConfig {
|
||||||
|
displayName: string;
|
||||||
|
model: DeepSeekChat;
|
||||||
|
}
|
||||||
|
|
||||||
|
const MODEL_DISPLAY_NAMES: Record<string, string> = {
|
||||||
|
'deepseek-reasoner': 'DeepSeek R1',
|
||||||
|
'deepseek-chat': 'DeepSeek V3'
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadDeepSeekChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
|
||||||
|
const deepSeekEndpoint = 'https://api.deepseek.com';
|
||||||
|
|
||||||
|
const apiKey = getDeepseekApiKey();
|
||||||
|
if (!apiKey) return {};
|
||||||
|
|
||||||
|
if (!deepSeekEndpoint || !apiKey) {
|
||||||
|
logger.debug('DeepSeek endpoint or API key not configured, skipping');
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get<{ data: DeepSeekModel[] }>(`${deepSeekEndpoint}/models`, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': `Bearer ${apiKey}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const deepSeekModels = response.data.data;
|
||||||
|
|
||||||
|
const chatModels = deepSeekModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
|
||||||
|
// Only include models we have display names for
|
||||||
|
if (model.id in MODEL_DISPLAY_NAMES) {
|
||||||
|
acc[model.id] = {
|
||||||
|
displayName: MODEL_DISPLAY_NAMES[model.id],
|
||||||
|
model: new DeepSeekChat({
|
||||||
|
apiKey,
|
||||||
|
baseURL: deepSeekEndpoint,
|
||||||
|
modelName: model.id,
|
||||||
|
temperature: 0.7,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error loading DeepSeek models: ${String(err)}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
@ -4,6 +4,8 @@ import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
|||||||
import { loadAnthropicChatModels } from './anthropic';
|
import { loadAnthropicChatModels } from './anthropic';
|
||||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||||
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
||||||
|
import { loadDeepSeekChatModels } from './deepseek';
|
||||||
|
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio';
|
||||||
import {
|
import {
|
||||||
getCustomOpenaiApiKey,
|
getCustomOpenaiApiKey,
|
||||||
getCustomOpenaiApiUrl,
|
getCustomOpenaiApiUrl,
|
||||||
@ -17,6 +19,8 @@ const chatModelProviders = {
|
|||||||
ollama: loadOllamaChatModels,
|
ollama: loadOllamaChatModels,
|
||||||
anthropic: loadAnthropicChatModels,
|
anthropic: loadAnthropicChatModels,
|
||||||
gemini: loadGeminiChatModels,
|
gemini: loadGeminiChatModels,
|
||||||
|
deepseek: loadDeepSeekChatModels,
|
||||||
|
lm_studio: loadLMStudioChatModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
const embeddingModelProviders = {
|
const embeddingModelProviders = {
|
||||||
@ -24,6 +28,7 @@ const embeddingModelProviders = {
|
|||||||
local: loadTransformersEmbeddingsModels,
|
local: loadTransformersEmbeddingsModels,
|
||||||
ollama: loadOllamaEmbeddingsModels,
|
ollama: loadOllamaEmbeddingsModels,
|
||||||
gemini: loadGeminiEmbeddingsModels,
|
gemini: loadGeminiEmbeddingsModels,
|
||||||
|
lm_studio: loadLMStudioEmbeddingsModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getAvailableChatModelProviders = async () => {
|
export const getAvailableChatModelProviders = async () => {
|
||||||
|
96
src/lib/providers/lmstudio.ts
Normal file
96
src/lib/providers/lmstudio.ts
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||||
|
import { getLMStudioApiEndpoint, getKeepAlive } from '../../config';
|
||||||
|
import logger from '../../utils/logger';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
interface LMStudioModel {
|
||||||
|
id: string;
|
||||||
|
name?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ensureV1Endpoint = (endpoint: string): string =>
|
||||||
|
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
||||||
|
|
||||||
|
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
|
||||||
|
try {
|
||||||
|
const keepAlive = getKeepAlive();
|
||||||
|
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
timeout: parseInt(keepAlive) * 1000 || 5000,
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLMStudioChatModels = async () => {
|
||||||
|
const endpoint = getLMStudioApiEndpoint();
|
||||||
|
const keepAlive = getKeepAlive();
|
||||||
|
|
||||||
|
if (!endpoint) return {};
|
||||||
|
if (!await checkServerAvailability(endpoint)) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
timeout: parseInt(keepAlive) * 1000 || 5000,
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
|
||||||
|
const chatModels = response.data.data.reduce((acc: Record<string, any>, model: LMStudioModel) => {
|
||||||
|
acc[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new ChatOpenAI({
|
||||||
|
openAIApiKey: 'lm-studio',
|
||||||
|
configuration: {
|
||||||
|
baseURL: ensureV1Endpoint(endpoint),
|
||||||
|
},
|
||||||
|
modelName: model.id,
|
||||||
|
temperature: 0.7,
|
||||||
|
streaming: true,
|
||||||
|
maxRetries: 3
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error loading LM Studio models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLMStudioEmbeddingsModels = async () => {
|
||||||
|
const endpoint = getLMStudioApiEndpoint();
|
||||||
|
const keepAlive = getKeepAlive();
|
||||||
|
|
||||||
|
if (!endpoint) return {};
|
||||||
|
if (!await checkServerAvailability(endpoint)) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
timeout: parseInt(keepAlive) * 1000 || 5000,
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
|
||||||
|
const embeddingsModels = response.data.data.reduce((acc: Record<string, any>, model: LMStudioModel) => {
|
||||||
|
acc[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new OpenAIEmbeddings({
|
||||||
|
openAIApiKey: 'lm-studio',
|
||||||
|
configuration: {
|
||||||
|
baseURL: ensureV1Endpoint(endpoint),
|
||||||
|
},
|
||||||
|
modelName: model.id,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
return embeddingsModels;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
@ -9,6 +9,7 @@ import {
|
|||||||
getAnthropicApiKey,
|
getAnthropicApiKey,
|
||||||
getGeminiApiKey,
|
getGeminiApiKey,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
|
getDeepseekApiKey,
|
||||||
updateConfig,
|
updateConfig,
|
||||||
getCustomOpenaiApiUrl,
|
getCustomOpenaiApiUrl,
|
||||||
getCustomOpenaiApiKey,
|
getCustomOpenaiApiKey,
|
||||||
@ -57,6 +58,7 @@ router.get('/', async (_, res) => {
|
|||||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
config['geminiApiKey'] = getGeminiApiKey();
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
|
config['deepseekApiKey'] = getDeepseekApiKey();
|
||||||
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
||||||
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
||||||
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
||||||
@ -85,6 +87,9 @@ router.post('/', async (req, res) => {
|
|||||||
GEMINI: {
|
GEMINI: {
|
||||||
API_KEY: config.geminiApiKey,
|
API_KEY: config.geminiApiKey,
|
||||||
},
|
},
|
||||||
|
DEEPSEEK: {
|
||||||
|
API_KEY: config.deepseekApiKey,
|
||||||
|
},
|
||||||
OLLAMA: {
|
OLLAMA: {
|
||||||
API_URL: config.ollamaApiUrl,
|
API_URL: config.ollamaApiUrl,
|
||||||
},
|
},
|
||||||
|
@ -11,7 +11,7 @@ import {
|
|||||||
RunnableMap,
|
RunnableMap,
|
||||||
RunnableSequence,
|
RunnableSequence,
|
||||||
} from '@langchain/core/runnables';
|
} from '@langchain/core/runnables';
|
||||||
import { BaseMessage } from '@langchain/core/messages';
|
import { BaseMessage, SystemMessage, HumanMessage } from '@langchain/core/messages';
|
||||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||||
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
|
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
|
||||||
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
|
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
|
||||||
@ -23,6 +23,7 @@ import fs from 'fs';
|
|||||||
import computeSimilarity from '../utils/computeSimilarity';
|
import computeSimilarity from '../utils/computeSimilarity';
|
||||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||||
import eventEmitter from 'events';
|
import eventEmitter from 'events';
|
||||||
|
import { getMessageProcessor } from '../utils/messageProcessor';
|
||||||
import { StreamEvent } from '@langchain/core/tracers/log_stream';
|
import { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||||
import { IterableReadableStream } from '@langchain/core/utils/stream';
|
import { IterableReadableStream } from '@langchain/core/utils/stream';
|
||||||
|
|
||||||
@ -475,10 +476,41 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
optimizationMode,
|
optimizationMode,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Create all messages including system prompt and new query
|
||||||
|
const allMessages = [
|
||||||
|
new SystemMessage(this.config.responsePrompt),
|
||||||
|
...history,
|
||||||
|
new HumanMessage(message)
|
||||||
|
];
|
||||||
|
|
||||||
|
// Get message processor if model needs it
|
||||||
|
const messageProcessor = getMessageProcessor((llm as any).modelName);
|
||||||
|
const processedMessages = messageProcessor
|
||||||
|
? messageProcessor.processMessages(allMessages)
|
||||||
|
: allMessages;
|
||||||
|
|
||||||
|
// Extract system message and chat history
|
||||||
|
const systemMessage = processedMessages[0];
|
||||||
|
const chatHistory = processedMessages.slice(1, -1);
|
||||||
|
const userQuery = processedMessages[processedMessages.length - 1];
|
||||||
|
|
||||||
|
// Extract string content from message
|
||||||
|
const getStringContent = (content: any): string => {
|
||||||
|
if (typeof content === 'string') return content;
|
||||||
|
if (Array.isArray(content)) return content.map(getStringContent).join('\n');
|
||||||
|
if (typeof content === 'object' && content !== null) {
|
||||||
|
if ('text' in content) return content.text;
|
||||||
|
if ('value' in content) return content.value;
|
||||||
|
}
|
||||||
|
return String(content || '');
|
||||||
|
};
|
||||||
|
|
||||||
|
const queryContent = getStringContent(userQuery.content);
|
||||||
|
|
||||||
const stream = answeringChain.streamEvents(
|
const stream = answeringChain.streamEvents(
|
||||||
{
|
{
|
||||||
chat_history: history,
|
chat_history: chatHistory,
|
||||||
query: message,
|
query: queryContent,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
version: 'v1',
|
version: 'v1',
|
||||||
|
95
src/utils/messageProcessor.ts
Normal file
95
src/utils/messageProcessor.ts
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
// Using the import paths that have been working for you
|
||||||
|
import { BaseMessage, HumanMessage, AIMessage, SystemMessage } from "@langchain/core/messages";
|
||||||
|
import logger from "./logger";
|
||||||
|
|
||||||
|
export interface MessageValidationRules {
|
||||||
|
requireAlternating?: boolean;
|
||||||
|
firstMessageType?: typeof HumanMessage | typeof AIMessage;
|
||||||
|
allowSystem?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class MessageProcessor {
|
||||||
|
private rules: MessageValidationRules;
|
||||||
|
private modelName: string;
|
||||||
|
|
||||||
|
constructor(modelName: string, rules: MessageValidationRules) {
|
||||||
|
this.rules = rules;
|
||||||
|
this.modelName = modelName;
|
||||||
|
}
|
||||||
|
|
||||||
|
processMessages(messages: BaseMessage[]): BaseMessage[] {
|
||||||
|
// Always respect requireAlternating for models that need it
|
||||||
|
if (!this.rules.requireAlternating) {
|
||||||
|
return messages;
|
||||||
|
}
|
||||||
|
|
||||||
|
const processedMessages: BaseMessage[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < messages.length; i++) {
|
||||||
|
const currentMsg = messages[i];
|
||||||
|
|
||||||
|
// Handle system messages
|
||||||
|
if (currentMsg instanceof SystemMessage) {
|
||||||
|
if (this.rules.allowSystem) {
|
||||||
|
processedMessages.push(currentMsg);
|
||||||
|
} else {
|
||||||
|
logger.warn(`${this.modelName}: Skipping system message - not allowed`);
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle first non-system message
|
||||||
|
if (processedMessages.length === 0 ||
|
||||||
|
processedMessages[processedMessages.length - 1] instanceof SystemMessage) {
|
||||||
|
if (this.rules.firstMessageType &&
|
||||||
|
!(currentMsg instanceof this.rules.firstMessageType)) {
|
||||||
|
logger.warn(`${this.modelName}: Converting first message to required type`);
|
||||||
|
processedMessages.push(new this.rules.firstMessageType({
|
||||||
|
content: currentMsg.content,
|
||||||
|
additional_kwargs: currentMsg.additional_kwargs
|
||||||
|
}));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle alternating pattern
|
||||||
|
const lastMsg = processedMessages[processedMessages.length - 1];
|
||||||
|
if (lastMsg instanceof HumanMessage && currentMsg instanceof HumanMessage) {
|
||||||
|
logger.warn(`${this.modelName}: Skipping consecutive human message`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (lastMsg instanceof AIMessage && currentMsg instanceof AIMessage) {
|
||||||
|
logger.warn(`${this.modelName}: Skipping consecutive AI message`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For deepseek-reasoner, strip out reasoning_content from message history
|
||||||
|
if (this.modelName === 'deepseek-reasoner' && currentMsg instanceof AIMessage) {
|
||||||
|
const { reasoning_content, ...cleanedKwargs } = currentMsg.additional_kwargs;
|
||||||
|
processedMessages.push(new AIMessage({
|
||||||
|
content: currentMsg.content,
|
||||||
|
additional_kwargs: cleanedKwargs
|
||||||
|
}));
|
||||||
|
} else {
|
||||||
|
processedMessages.push(currentMsg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return processedMessages;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-configured processors for specific models
|
||||||
|
export const getMessageProcessor = (modelName: string): MessageProcessor | null => {
|
||||||
|
const processors: Record<string, MessageValidationRules> = {
|
||||||
|
'deepseek-reasoner': {
|
||||||
|
requireAlternating: true,
|
||||||
|
firstMessageType: HumanMessage,
|
||||||
|
allowSystem: true
|
||||||
|
},
|
||||||
|
// Add more model configurations as needed
|
||||||
|
};
|
||||||
|
|
||||||
|
const rules = processors[modelName];
|
||||||
|
return rules ? new MessageProcessor(modelName, rules) : null;
|
||||||
|
};
|
@ -19,6 +19,7 @@ interface SettingsType {
|
|||||||
groqApiKey: string;
|
groqApiKey: string;
|
||||||
anthropicApiKey: string;
|
anthropicApiKey: string;
|
||||||
geminiApiKey: string;
|
geminiApiKey: string;
|
||||||
|
deepseekApiKey: string;
|
||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
customOpenaiApiKey: string;
|
customOpenaiApiKey: string;
|
||||||
customOpenaiApiUrl: string;
|
customOpenaiApiUrl: string;
|
||||||
@ -791,6 +792,25 @@ const Page = () => {
|
|||||||
onSave={(value) => saveConfig('geminiApiKey', value)}
|
onSave={(value) => saveConfig('geminiApiKey', value)}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
DeepSeek API Key
|
||||||
|
</p>
|
||||||
|
<Input
|
||||||
|
type="text"
|
||||||
|
placeholder="DeepSeek API key"
|
||||||
|
value={config.deepseekApiKey}
|
||||||
|
isSaving={savingStates['deepseekApiKey']}
|
||||||
|
onChange={(e) => {
|
||||||
|
setConfig((prev) => ({
|
||||||
|
...prev!,
|
||||||
|
deepseekApiKey: e.target.value,
|
||||||
|
}));
|
||||||
|
}}
|
||||||
|
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</SettingsSection>
|
</SettingsSection>
|
||||||
</div>
|
</div>
|
||||||
|
120
ui/components/MessageActions/ReasoningPanel.tsx
Normal file
120
ui/components/MessageActions/ReasoningPanel.tsx
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
'use client';
|
||||||
|
|
||||||
|
import * as React from 'react';
|
||||||
|
import { Brain, ChevronDown, Maximize2, Minimize2 } from 'lucide-react';
|
||||||
|
import { cn } from '@/lib/utils';
|
||||||
|
import Markdown from 'markdown-to-jsx';
|
||||||
|
import logger from '@/lib/logger';
|
||||||
|
|
||||||
|
interface ReasoningPanelProps {
|
||||||
|
thinking: string;
|
||||||
|
className?: string;
|
||||||
|
isExpanded?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ReasoningPanel = ({ thinking, className, isExpanded: propExpanded }: ReasoningPanelProps): React.ReactElement => {
|
||||||
|
const [isExpanded, setIsExpanded] = React.useState(true);
|
||||||
|
const [detailsRefs, setDetailsRefs] = React.useState<HTMLDetailsElement[]>([]);
|
||||||
|
|
||||||
|
logger.info('ReasoningPanel rendering with:', {
|
||||||
|
thinking: thinking,
|
||||||
|
isExpanded: propExpanded,
|
||||||
|
detailsRefsCount: detailsRefs.length
|
||||||
|
});
|
||||||
|
|
||||||
|
React.useEffect(() => {
|
||||||
|
if (propExpanded !== undefined) {
|
||||||
|
logger.info('Updating expansion state:', propExpanded);
|
||||||
|
setIsExpanded(propExpanded);
|
||||||
|
}
|
||||||
|
}, [propExpanded]);
|
||||||
|
|
||||||
|
const addDetailsRef = React.useCallback((element: HTMLDetailsElement | null) => {
|
||||||
|
if (element) {
|
||||||
|
setDetailsRefs(refs => {
|
||||||
|
if (!refs.includes(element)) {
|
||||||
|
logger.info('Adding new details ref');
|
||||||
|
return [...refs, element];
|
||||||
|
}
|
||||||
|
return refs;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const expandAll = () => {
|
||||||
|
logger.info('Expanding all details');
|
||||||
|
detailsRefs.forEach(ref => ref.open = true);
|
||||||
|
};
|
||||||
|
const collapseAll = () => {
|
||||||
|
logger.info('Collapsing all details');
|
||||||
|
detailsRefs.forEach(ref => ref.open = false);
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className={cn("flex flex-col space-y-2 mb-4", className)}>
|
||||||
|
<button
|
||||||
|
onClick={() => setIsExpanded(!isExpanded)}
|
||||||
|
className="flex flex-row items-center space-x-2 group text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white transition duration-200"
|
||||||
|
type="button"
|
||||||
|
>
|
||||||
|
<Brain size={20} />
|
||||||
|
<h3 className="font-medium text-xl">Reasoning</h3>
|
||||||
|
<ChevronDown
|
||||||
|
size={16}
|
||||||
|
className={cn(
|
||||||
|
"transition-transform duration-200",
|
||||||
|
isExpanded ? "rotate-180" : ""
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{isExpanded && (
|
||||||
|
<div className="rounded-lg bg-light-secondary/50 dark:bg-dark-secondary/50 p-4">
|
||||||
|
{thinking.split('\n\n').map((paragraph, index) => {
|
||||||
|
if (!paragraph.trim()) return null;
|
||||||
|
|
||||||
|
// Extract content without the bullet prefix
|
||||||
|
const content = paragraph.replace(/^[•\-\d.]\s*/, '');
|
||||||
|
logger.info(`Processing paragraph ${index}:`, content);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div key={index} className="mb-2 last:mb-0">
|
||||||
|
<details
|
||||||
|
ref={addDetailsRef}
|
||||||
|
className="group [&_summary::-webkit-details-marker]:hidden"
|
||||||
|
>
|
||||||
|
<summary className="flex items-center cursor-pointer list-none text-sm text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white">
|
||||||
|
<span className="arrow mr-2 inline-block transition-transform duration-200 group-open:rotate-90 group-open:self-start group-open:mt-1">▸</span>
|
||||||
|
<p className="relative whitespace-normal line-clamp-1 group-open:line-clamp-none after:content-['...'] after:inline group-open:after:hidden transition-all duration-200 text-ellipsis overflow-hidden group-open:overflow-visible">
|
||||||
|
{content}
|
||||||
|
</p>
|
||||||
|
</summary>
|
||||||
|
{/* Content is shown in the summary when expanded - no need to render it again */}
|
||||||
|
</details>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
<div className="flex justify-end space-x-2 mt-4 text-sm text-black/70 dark:text-white/70">
|
||||||
|
<button
|
||||||
|
onClick={expandAll}
|
||||||
|
className="flex items-center space-x-1 hover:text-[#24A0ED] transition-colors"
|
||||||
|
>
|
||||||
|
<Maximize2 size={10} />
|
||||||
|
<span className="text-xs">Expand all</span>
|
||||||
|
</button>
|
||||||
|
<span>•</span>
|
||||||
|
<button
|
||||||
|
onClick={collapseAll}
|
||||||
|
className="flex items-center space-x-1 hover:text-[#24A0ED] transition-colors"
|
||||||
|
>
|
||||||
|
<Minimize2 size={10} />
|
||||||
|
<span className="text-xs">Collapse all</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default ReasoningPanel;
|
@ -4,6 +4,7 @@
|
|||||||
import React, { MutableRefObject, useEffect, useState } from 'react';
|
import React, { MutableRefObject, useEffect, useState } from 'react';
|
||||||
import { Message } from './ChatWindow';
|
import { Message } from './ChatWindow';
|
||||||
import { cn } from '@/lib/utils';
|
import { cn } from '@/lib/utils';
|
||||||
|
import logger from '@/lib/logger';
|
||||||
import {
|
import {
|
||||||
BookCopy,
|
BookCopy,
|
||||||
Disc3,
|
Disc3,
|
||||||
@ -12,6 +13,7 @@ import {
|
|||||||
Layers3,
|
Layers3,
|
||||||
Plus,
|
Plus,
|
||||||
} from 'lucide-react';
|
} from 'lucide-react';
|
||||||
|
import ReasoningPanel from './MessageActions/ReasoningPanel';
|
||||||
import Markdown from 'markdown-to-jsx';
|
import Markdown from 'markdown-to-jsx';
|
||||||
import Copy from './MessageActions/Copy';
|
import Copy from './MessageActions/Copy';
|
||||||
import Rewrite from './MessageActions/Rewrite';
|
import Rewrite from './MessageActions/Rewrite';
|
||||||
@ -41,26 +43,66 @@ const MessageBox = ({
|
|||||||
}) => {
|
}) => {
|
||||||
const [parsedMessage, setParsedMessage] = useState(message.content);
|
const [parsedMessage, setParsedMessage] = useState(message.content);
|
||||||
const [speechMessage, setSpeechMessage] = useState(message.content);
|
const [speechMessage, setSpeechMessage] = useState(message.content);
|
||||||
|
const [thinking, setThinking] = useState<string>('');
|
||||||
|
const [isThinkingExpanded, setIsThinkingExpanded] = useState(true);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const regex = /\[(\d+)\]/g;
|
logger.info(`Processing message:`, {
|
||||||
|
content: message.content,
|
||||||
|
role: message.role,
|
||||||
|
messageId: message.messageId
|
||||||
|
});
|
||||||
|
|
||||||
if (
|
const regex = /\[(\d+)\]/g;
|
||||||
message.role === 'assistant' &&
|
const thinkRegex = /<think>(.*?)(?:<\/think>|$)(.*)/s;
|
||||||
message?.sources &&
|
|
||||||
message.sources.length > 0
|
// Check for thinking content, including partial tags
|
||||||
) {
|
const match = message.content.match(thinkRegex);
|
||||||
return setParsedMessage(
|
logger.info(`Think tag match:`, match);
|
||||||
|
|
||||||
|
if (match) {
|
||||||
|
const [_, thinkingContent, answerContent] = match;
|
||||||
|
|
||||||
|
// Set thinking content even if </think> hasn't appeared yet
|
||||||
|
if (thinkingContent) {
|
||||||
|
logger.info(`Found thinking content:`, thinkingContent.trim());
|
||||||
|
setThinking(thinkingContent.trim());
|
||||||
|
setIsThinkingExpanded(true); // Expand when thinking starts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set answer content if we have it (after </think>)
|
||||||
|
if (answerContent) {
|
||||||
|
logger.info(`Found answer content:`, answerContent.trim());
|
||||||
|
setIsThinkingExpanded(false); // Collapse when thinking is done
|
||||||
|
// Process the answer part for sources if needed
|
||||||
|
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
|
||||||
|
setParsedMessage(
|
||||||
|
answerContent.trim().replace(
|
||||||
|
regex,
|
||||||
|
(_, number) =>
|
||||||
|
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
setParsedMessage(answerContent.trim());
|
||||||
|
}
|
||||||
|
setSpeechMessage(answerContent.trim().replace(regex, ''));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No thinking content - process as before
|
||||||
|
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
|
||||||
|
setParsedMessage(
|
||||||
message.content.replace(
|
message.content.replace(
|
||||||
regex,
|
regex,
|
||||||
(_, number) =>
|
(_, number) =>
|
||||||
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
}
|
} else {
|
||||||
|
|
||||||
setSpeechMessage(message.content.replace(regex, ''));
|
|
||||||
setParsedMessage(message.content);
|
setParsedMessage(message.content);
|
||||||
|
}
|
||||||
|
setSpeechMessage(message.content.replace(regex, ''));
|
||||||
|
}
|
||||||
}, [message.content, message.sources, message.role]);
|
}, [message.content, message.sources, message.role]);
|
||||||
|
|
||||||
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
|
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
|
||||||
@ -81,6 +123,7 @@ const MessageBox = ({
|
|||||||
ref={dividerRef}
|
ref={dividerRef}
|
||||||
className="flex flex-col space-y-6 w-full lg:w-9/12"
|
className="flex flex-col space-y-6 w-full lg:w-9/12"
|
||||||
>
|
>
|
||||||
|
{thinking && <ReasoningPanel thinking={thinking} isExpanded={isThinkingExpanded} />}
|
||||||
{message.sources && message.sources.length > 0 && (
|
{message.sources && message.sources.length > 0 && (
|
||||||
<div className="flex flex-col space-y-2">
|
<div className="flex flex-col space-y-2">
|
||||||
<div className="flex flex-row items-center space-x-2">
|
<div className="flex flex-row items-center space-x-2">
|
||||||
|
13
ui/lib/logger.ts
Normal file
13
ui/lib/logger.ts
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
const logger = {
|
||||||
|
info: (...args: any[]) => {
|
||||||
|
console.log('[INFO]', ...args);
|
||||||
|
},
|
||||||
|
warn: (...args: any[]) => {
|
||||||
|
console.warn('[WARN]', ...args);
|
||||||
|
},
|
||||||
|
error: (...args: any[]) => {
|
||||||
|
console.error('[ERROR]', ...args);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export default logger;
|
6961
ui/package-lock.json
generated
Normal file
6961
ui/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,6 @@
|
|||||||
{
|
{
|
||||||
"compilerOptions": {
|
"compilerOptions": {
|
||||||
|
"target": "es2018",
|
||||||
"lib": ["dom", "dom.iterable", "esnext"],
|
"lib": ["dom", "dom.iterable", "esnext"],
|
||||||
"allowJs": true,
|
"allowJs": true,
|
||||||
"skipLibCheck": true,
|
"skipLibCheck": true,
|
||||||
|
1090
ui/yarn.lock
1090
ui/yarn.lock
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user