mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-20 16:58:30 +00:00
refactor: remove unused deepseekChat.ts in favor
of reasoningChatModel.ts and messageProcessor.ts in favor of alternaitngMessageValidator.ts - Removed src/lib/deepseekChat.ts as it was duplicative - All functionality is now handled by reasoningChatModel.ts - No imports or references to deepseekChat.ts found in codebase - Removed src/utils/messageProcessor.ts as it was duplicative - All functionality is now handled by alternatingMessaageValidator.ts - No imports or references messageProcessor.ts found in codebase
This commit is contained in:
@ -71,7 +71,7 @@ export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
|
||||
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
|
||||
|
||||
export const getDeepseekStreamDelay = () =>
|
||||
loadConfig().MODELS.DEEPSEEK.STREAM_DELAY || 20; // Default to 20ms if not specified
|
||||
loadConfig().MODELS.DEEPSEEK.STREAM_DELAY || 5; // Default to 5ms if not specified
|
||||
|
||||
export const getSearxngApiEndpoint = () =>
|
||||
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
|
||||
|
@ -1,251 +0,0 @@
|
||||
import { BaseChatModel, BaseChatModelCallOptions } from '@langchain/core/language_models/chat_models';
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
||||
import { AIMessage, AIMessageChunk, BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||
import { ChatResult, ChatGenerationChunk } from '@langchain/core/outputs';
|
||||
import axios from 'axios';
|
||||
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
interface DeepSeekChatParams extends BaseChatModelParams {
|
||||
apiKey: string;
|
||||
baseURL: string;
|
||||
modelName: string;
|
||||
temperature?: number;
|
||||
max_tokens?: number;
|
||||
top_p?: number;
|
||||
frequency_penalty?: number;
|
||||
presence_penalty?: number;
|
||||
}
|
||||
|
||||
export class DeepSeekChat extends BaseChatModel<BaseChatModelCallOptions & { stream?: boolean }> {
|
||||
private apiKey: string;
|
||||
private baseURL: string;
|
||||
private modelName: string;
|
||||
private temperature: number;
|
||||
private maxTokens: number;
|
||||
private topP: number;
|
||||
private frequencyPenalty: number;
|
||||
private presencePenalty: number;
|
||||
|
||||
constructor(params: DeepSeekChatParams) {
|
||||
super(params);
|
||||
this.apiKey = params.apiKey;
|
||||
this.baseURL = params.baseURL;
|
||||
this.modelName = params.modelName;
|
||||
this.temperature = params.temperature ?? 0.7;
|
||||
this.maxTokens = params.max_tokens ?? 8192;
|
||||
this.topP = params.top_p ?? 1;
|
||||
this.frequencyPenalty = params.frequency_penalty ?? 0;
|
||||
this.presencePenalty = params.presence_penalty ?? 0;
|
||||
}
|
||||
|
||||
async _generate(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): Promise<ChatResult> {
|
||||
const formattedMessages = messages.map(msg => ({
|
||||
role: this.getRole(msg),
|
||||
content: msg.content.toString(),
|
||||
}));
|
||||
const response = await this.callDeepSeekAPI(formattedMessages, options.stream);
|
||||
|
||||
if (options.stream) {
|
||||
return this.processStreamingResponse(response, messages, options, runManager);
|
||||
} else {
|
||||
const choice = response.data.choices[0];
|
||||
let content = choice.message.content || '';
|
||||
if (choice.message.reasoning_content) {
|
||||
content = `<think>\n${choice.message.reasoning_content}\n</think>\n\n${content}`;
|
||||
}
|
||||
|
||||
// Report usage stats if available
|
||||
if (response.data.usage && runManager) {
|
||||
runManager.handleLLMEnd({
|
||||
generations: [],
|
||||
llmOutput: {
|
||||
tokenUsage: {
|
||||
completionTokens: response.data.usage.completion_tokens,
|
||||
promptTokens: response.data.usage.prompt_tokens,
|
||||
totalTokens: response.data.usage.total_tokens
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return {
|
||||
generations: [
|
||||
{
|
||||
text: content,
|
||||
message: new AIMessage(content),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private getRole(msg: BaseMessage): string {
|
||||
if (msg instanceof SystemMessage) return 'system';
|
||||
if (msg instanceof HumanMessage) return 'user';
|
||||
if (msg instanceof AIMessage) return 'assistant';
|
||||
return 'user'; // Default to user
|
||||
}
|
||||
|
||||
private async callDeepSeekAPI(messages: Array<{ role: string; content: string }>, streaming?: boolean) {
|
||||
return axios.post(
|
||||
`${this.baseURL}/chat/completions`,
|
||||
{
|
||||
messages,
|
||||
model: this.modelName,
|
||||
stream: streaming,
|
||||
temperature: this.temperature,
|
||||
max_tokens: this.maxTokens,
|
||||
top_p: this.topP,
|
||||
frequency_penalty: this.frequencyPenalty,
|
||||
presence_penalty: this.presencePenalty,
|
||||
response_format: { type: 'text' },
|
||||
...(streaming && {
|
||||
stream_options: {
|
||||
include_usage: true
|
||||
}
|
||||
})
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${this.apiKey}`,
|
||||
},
|
||||
responseType: streaming ? 'text' : 'json',
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public async *_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun) {
|
||||
const response = await this.callDeepSeekAPI(messages.map(msg => ({
|
||||
role: this.getRole(msg),
|
||||
content: msg.content.toString(),
|
||||
})), true);
|
||||
|
||||
let thinkState = -1; // -1: not started, 0: thinking, 1: answered
|
||||
let currentContent = '';
|
||||
|
||||
// Split the response into lines
|
||||
const lines = response.data.split('\n');
|
||||
for (const line of lines) {
|
||||
if (!line.startsWith('data: ')) continue;
|
||||
const jsonStr = line.slice(6);
|
||||
if (jsonStr === '[DONE]') break;
|
||||
|
||||
try {
|
||||
console.log('Received chunk:', jsonStr);
|
||||
const chunk = JSON.parse(jsonStr);
|
||||
const delta = chunk.choices[0].delta;
|
||||
console.log('Parsed delta:', delta);
|
||||
|
||||
// Handle usage stats in final chunk
|
||||
if (chunk.usage && !chunk.choices?.length) {
|
||||
runManager?.handleLLMEnd?.({
|
||||
generations: [],
|
||||
llmOutput: {
|
||||
tokenUsage: {
|
||||
completionTokens: chunk.usage.completion_tokens,
|
||||
promptTokens: chunk.usage.prompt_tokens,
|
||||
totalTokens: chunk.usage.total_tokens
|
||||
}
|
||||
}
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle reasoning content
|
||||
if (delta.reasoning_content) {
|
||||
if (thinkState === -1) {
|
||||
thinkState = 0;
|
||||
const startTag = '<think>\n';
|
||||
currentContent += startTag;
|
||||
console.log('Emitting think start:', startTag);
|
||||
runManager?.handleLLMNewToken(startTag);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: startTag,
|
||||
message: new AIMessageChunk(startTag),
|
||||
generationInfo: {}
|
||||
});
|
||||
yield chunk;
|
||||
}
|
||||
currentContent += delta.reasoning_content;
|
||||
console.log('Emitting reasoning:', delta.reasoning_content);
|
||||
runManager?.handleLLMNewToken(delta.reasoning_content);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: delta.reasoning_content,
|
||||
message: new AIMessageChunk(delta.reasoning_content),
|
||||
generationInfo: {}
|
||||
});
|
||||
yield chunk;
|
||||
}
|
||||
|
||||
// Handle regular content
|
||||
if (delta.content) {
|
||||
if (thinkState === 0) {
|
||||
thinkState = 1;
|
||||
const endTag = '\n</think>\n\n';
|
||||
currentContent += endTag;
|
||||
console.log('Emitting think end:', endTag);
|
||||
runManager?.handleLLMNewToken(endTag);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: endTag,
|
||||
message: new AIMessageChunk(endTag),
|
||||
generationInfo: {}
|
||||
});
|
||||
yield chunk;
|
||||
}
|
||||
currentContent += delta.content;
|
||||
console.log('Emitting content:', delta.content);
|
||||
runManager?.handleLLMNewToken(delta.content);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: delta.content,
|
||||
message: new AIMessageChunk(delta.content),
|
||||
generationInfo: {}
|
||||
});
|
||||
yield chunk;
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Failed to parse chunk';
|
||||
console.error(`Streaming error: ${errorMessage}`);
|
||||
if (error instanceof Error && error.message.includes('DeepSeek API Error')) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle any unclosed think block
|
||||
if (thinkState === 0) {
|
||||
const endTag = '\n</think>\n\n';
|
||||
currentContent += endTag;
|
||||
runManager?.handleLLMNewToken(endTag);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: endTag,
|
||||
message: new AIMessageChunk(endTag),
|
||||
generationInfo: {}
|
||||
});
|
||||
yield chunk;
|
||||
}
|
||||
}
|
||||
|
||||
private async processStreamingResponse(response: any, messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): Promise<ChatResult> {
|
||||
let accumulatedContent = '';
|
||||
for await (const chunk of this._streamResponseChunks(messages, options, runManager)) {
|
||||
accumulatedContent += chunk.message.content;
|
||||
}
|
||||
return {
|
||||
generations: [
|
||||
{
|
||||
text: accumulatedContent,
|
||||
message: new AIMessage(accumulatedContent),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
_llmType(): string {
|
||||
return 'deepseek';
|
||||
}
|
||||
}
|
@ -20,7 +20,6 @@ interface ChatModelConfig {
|
||||
model: ReasoningChatModel | ChatOpenAI;
|
||||
}
|
||||
|
||||
// Define which models require reasoning capabilities
|
||||
const REASONING_MODELS = ['deepseek-reasoner'];
|
||||
|
||||
const MODEL_DISPLAY_NAMES: Record<string, string> = {
|
||||
@ -35,7 +34,6 @@ export const loadDeepSeekChatModels = async (): Promise<Record<string, ChatModel
|
||||
if (!apiKey) return {};
|
||||
|
||||
if (!deepSeekEndpoint || !apiKey) {
|
||||
logger.debug('DeepSeek endpoint or API key not configured, skipping');
|
||||
return {};
|
||||
}
|
||||
|
||||
@ -50,12 +48,10 @@ export const loadDeepSeekChatModels = async (): Promise<Record<string, ChatModel
|
||||
const deepSeekModels = response.data.data;
|
||||
|
||||
const chatModels = deepSeekModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
|
||||
// Only include models we have display names for
|
||||
if (model.id in MODEL_DISPLAY_NAMES) {
|
||||
// Use ReasoningChatModel for models that need reasoning capabilities
|
||||
if (REASONING_MODELS.includes(model.id)) {
|
||||
const streamDelay = getDeepseekStreamDelay();
|
||||
logger.debug(`Using stream delay of ${streamDelay}ms for ${model.id}`);
|
||||
|
||||
acc[model.id] = {
|
||||
displayName: MODEL_DISPLAY_NAMES[model.id],
|
||||
|
@ -139,10 +139,8 @@ export class ReasoningChatModel extends BaseChatModel<BaseChatModelCallOptions &
|
||||
if (jsonStr === '[DONE]') break;
|
||||
|
||||
try {
|
||||
console.log('Received chunk:', jsonStr);
|
||||
const chunk = JSON.parse(jsonStr);
|
||||
const delta = chunk.choices[0].delta;
|
||||
console.log('Parsed delta:', delta);
|
||||
|
||||
// Handle usage stats in final chunk
|
||||
if (chunk.usage && !chunk.choices?.length) {
|
||||
@ -165,7 +163,6 @@ export class ReasoningChatModel extends BaseChatModel<BaseChatModelCallOptions &
|
||||
thinkState = 0;
|
||||
const startTag = '<think>\n';
|
||||
currentContent += startTag;
|
||||
console.log('Emitting think start:', startTag);
|
||||
runManager?.handleLLMNewToken(startTag);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: startTag,
|
||||
@ -181,7 +178,6 @@ export class ReasoningChatModel extends BaseChatModel<BaseChatModelCallOptions &
|
||||
yield chunk;
|
||||
}
|
||||
currentContent += delta.reasoning_content;
|
||||
console.log('Emitting reasoning:', delta.reasoning_content);
|
||||
runManager?.handleLLMNewToken(delta.reasoning_content);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: delta.reasoning_content,
|
||||
@ -203,7 +199,6 @@ export class ReasoningChatModel extends BaseChatModel<BaseChatModelCallOptions &
|
||||
thinkState = 1;
|
||||
const endTag = '\n</think>\n\n';
|
||||
currentContent += endTag;
|
||||
console.log('Emitting think end:', endTag);
|
||||
runManager?.handleLLMNewToken(endTag);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: endTag,
|
||||
@ -219,7 +214,6 @@ export class ReasoningChatModel extends BaseChatModel<BaseChatModelCallOptions &
|
||||
yield chunk;
|
||||
}
|
||||
currentContent += delta.content;
|
||||
console.log('Emitting content:', delta.content);
|
||||
runManager?.handleLLMNewToken(delta.content);
|
||||
const chunk = new ChatGenerationChunk({
|
||||
text: delta.content,
|
||||
|
@ -1,4 +1,3 @@
|
||||
// Using the import paths that have been working for you
|
||||
import { BaseMessage, HumanMessage, AIMessage, SystemMessage } from "@langchain/core/messages";
|
||||
import logger from "./logger";
|
||||
|
||||
@ -18,7 +17,6 @@ export class AlternatingMessageValidator {
|
||||
}
|
||||
|
||||
processMessages(messages: BaseMessage[]): BaseMessage[] {
|
||||
// Always respect requireAlternating for models that need it
|
||||
if (!this.rules.requireAlternating) {
|
||||
return messages;
|
||||
}
|
||||
@ -27,8 +25,7 @@ export class AlternatingMessageValidator {
|
||||
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const currentMsg = messages[i];
|
||||
|
||||
// Handle system messages
|
||||
|
||||
if (currentMsg instanceof SystemMessage) {
|
||||
if (this.rules.allowSystem) {
|
||||
processedMessages.push(currentMsg);
|
||||
@ -38,7 +35,6 @@ export class AlternatingMessageValidator {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle first non-system message
|
||||
if (processedMessages.length === 0 ||
|
||||
processedMessages[processedMessages.length - 1] instanceof SystemMessage) {
|
||||
if (this.rules.firstMessageType &&
|
||||
@ -52,7 +48,6 @@ export class AlternatingMessageValidator {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle alternating pattern
|
||||
const lastMsg = processedMessages[processedMessages.length - 1];
|
||||
if (lastMsg instanceof HumanMessage && currentMsg instanceof HumanMessage) {
|
||||
logger.warn(`${this.modelName}: Skipping consecutive human message`);
|
||||
@ -63,7 +58,6 @@ export class AlternatingMessageValidator {
|
||||
continue;
|
||||
}
|
||||
|
||||
// For deepseek-reasoner, strip out reasoning_content from message history
|
||||
if (this.modelName === 'deepseek-reasoner' && currentMsg instanceof AIMessage) {
|
||||
const { reasoning_content, ...cleanedKwargs } = currentMsg.additional_kwargs;
|
||||
processedMessages.push(new AIMessage({
|
||||
@ -79,7 +73,6 @@ export class AlternatingMessageValidator {
|
||||
}
|
||||
}
|
||||
|
||||
// Pre-configured validators for specific models
|
||||
export const getMessageValidator = (modelName: string): AlternatingMessageValidator | null => {
|
||||
const validators: Record<string, MessageValidationRules> = {
|
||||
'deepseek-reasoner': {
|
||||
|
@ -1,95 +0,0 @@
|
||||
// Using the import paths that have been working for you
|
||||
import { BaseMessage, HumanMessage, AIMessage, SystemMessage } from "@langchain/core/messages";
|
||||
import logger from "./logger";
|
||||
|
||||
export interface MessageValidationRules {
|
||||
requireAlternating?: boolean;
|
||||
firstMessageType?: typeof HumanMessage | typeof AIMessage;
|
||||
allowSystem?: boolean;
|
||||
}
|
||||
|
||||
export class MessageProcessor {
|
||||
private rules: MessageValidationRules;
|
||||
private modelName: string;
|
||||
|
||||
constructor(modelName: string, rules: MessageValidationRules) {
|
||||
this.rules = rules;
|
||||
this.modelName = modelName;
|
||||
}
|
||||
|
||||
processMessages(messages: BaseMessage[]): BaseMessage[] {
|
||||
// Always respect requireAlternating for models that need it
|
||||
if (!this.rules.requireAlternating) {
|
||||
return messages;
|
||||
}
|
||||
|
||||
const processedMessages: BaseMessage[] = [];
|
||||
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const currentMsg = messages[i];
|
||||
|
||||
// Handle system messages
|
||||
if (currentMsg instanceof SystemMessage) {
|
||||
if (this.rules.allowSystem) {
|
||||
processedMessages.push(currentMsg);
|
||||
} else {
|
||||
logger.warn(`${this.modelName}: Skipping system message - not allowed`);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle first non-system message
|
||||
if (processedMessages.length === 0 ||
|
||||
processedMessages[processedMessages.length - 1] instanceof SystemMessage) {
|
||||
if (this.rules.firstMessageType &&
|
||||
!(currentMsg instanceof this.rules.firstMessageType)) {
|
||||
logger.warn(`${this.modelName}: Converting first message to required type`);
|
||||
processedMessages.push(new this.rules.firstMessageType({
|
||||
content: currentMsg.content,
|
||||
additional_kwargs: currentMsg.additional_kwargs
|
||||
}));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle alternating pattern
|
||||
const lastMsg = processedMessages[processedMessages.length - 1];
|
||||
if (lastMsg instanceof HumanMessage && currentMsg instanceof HumanMessage) {
|
||||
logger.warn(`${this.modelName}: Skipping consecutive human message`);
|
||||
continue;
|
||||
}
|
||||
if (lastMsg instanceof AIMessage && currentMsg instanceof AIMessage) {
|
||||
logger.warn(`${this.modelName}: Skipping consecutive AI message`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// For deepseek-reasoner, strip out reasoning_content from message history
|
||||
if (this.modelName === 'deepseek-reasoner' && currentMsg instanceof AIMessage) {
|
||||
const { reasoning_content, ...cleanedKwargs } = currentMsg.additional_kwargs;
|
||||
processedMessages.push(new AIMessage({
|
||||
content: currentMsg.content,
|
||||
additional_kwargs: cleanedKwargs
|
||||
}));
|
||||
} else {
|
||||
processedMessages.push(currentMsg);
|
||||
}
|
||||
}
|
||||
|
||||
return processedMessages;
|
||||
}
|
||||
}
|
||||
|
||||
// Pre-configured processors for specific models
|
||||
export const getMessageProcessor = (modelName: string): MessageProcessor | null => {
|
||||
const processors: Record<string, MessageValidationRules> = {
|
||||
'deepseek-reasoner': {
|
||||
requireAlternating: true,
|
||||
firstMessageType: HumanMessage,
|
||||
allowSystem: true
|
||||
},
|
||||
// Add more model configurations as needed
|
||||
};
|
||||
|
||||
const rules = processors[modelName];
|
||||
return rules ? new MessageProcessor(modelName, rules) : null;
|
||||
};
|
@ -12,14 +12,12 @@ interface Discover {
|
||||
thumbnail: string;
|
||||
}
|
||||
|
||||
// List of available categories
|
||||
const categories = [
|
||||
'For You', 'AI', 'Technology', 'Current News', 'Sports',
|
||||
'Money', 'Gaming', 'Entertainment', 'Art and Culture',
|
||||
'Science', 'Health', 'Travel'
|
||||
];
|
||||
|
||||
// Memoized header component that won't re-render when content changes
|
||||
const DiscoverHeader = memo(({
|
||||
activeCategory,
|
||||
setActiveCategory,
|
||||
@ -31,7 +29,6 @@ const DiscoverHeader = memo(({
|
||||
}) => {
|
||||
const categoryContainerRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
// Function to scroll categories horizontally
|
||||
const scrollCategories = (direction: 'left' | 'right') => {
|
||||
const container = categoryContainerRef.current;
|
||||
if (!container) return;
|
||||
@ -63,7 +60,6 @@ const DiscoverHeader = memo(({
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Category Navigation with Buttons */}
|
||||
<div className="relative flex items-center py-4">
|
||||
<button
|
||||
className="absolute left-0 z-10 p-1 rounded-full bg-light-secondary dark:bg-dark-secondary hover:bg-light-primary/80 hover:dark:bg-dark-primary/80 transition-colors"
|
||||
@ -111,7 +107,6 @@ const DiscoverHeader = memo(({
|
||||
|
||||
DiscoverHeader.displayName = 'DiscoverHeader';
|
||||
|
||||
// Memoized content component that handles its own loading state
|
||||
const DiscoverContent = memo(({
|
||||
activeCategory,
|
||||
userPreferences,
|
||||
@ -124,7 +119,6 @@ const DiscoverContent = memo(({
|
||||
const [discover, setDiscover] = useState<Discover[] | null>(null);
|
||||
const [contentLoading, setContentLoading] = useState(true);
|
||||
|
||||
// Fetch data based on active category, user preferences, and language
|
||||
useEffect(() => {
|
||||
const fetchData = async () => {
|
||||
setContentLoading(true);
|
||||
@ -232,7 +226,6 @@ const DiscoverContent = memo(({
|
||||
|
||||
DiscoverContent.displayName = 'DiscoverContent';
|
||||
|
||||
// Preferences modal component
|
||||
const PreferencesModal = memo(({
|
||||
showPreferences,
|
||||
setShowPreferences,
|
||||
@ -253,7 +246,6 @@ const PreferencesModal = memo(({
|
||||
const [tempPreferences, setTempPreferences] = useState<string[]>([]);
|
||||
const [tempLanguages, setTempLanguages] = useState<string[]>([]);
|
||||
|
||||
// Initialize temp preferences when modal opens
|
||||
useEffect(() => {
|
||||
if (showPreferences) {
|
||||
setTempPreferences([...userPreferences]);
|
||||
@ -261,7 +253,6 @@ const PreferencesModal = memo(({
|
||||
}
|
||||
}, [showPreferences, userPreferences, preferredLanguages]);
|
||||
|
||||
// Save user preferences
|
||||
const saveUserPreferences = async (preferences: string[], languages: string[]) => {
|
||||
try {
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover/preferences`, {
|
||||
@ -393,16 +384,13 @@ const PreferencesModal = memo(({
|
||||
|
||||
PreferencesModal.displayName = 'PreferencesModal';
|
||||
|
||||
// Main page component
|
||||
const Page = () => {
|
||||
// State for the entire page
|
||||
const [activeCategory, setActiveCategory] = useState('For You');
|
||||
const [showPreferences, setShowPreferences] = useState(false);
|
||||
const [userPreferences, setUserPreferences] = useState<string[]>(['AI', 'Technology']);
|
||||
const [preferredLanguages, setPreferredLanguages] = useState<string[]>(['en']); // Default to English
|
||||
const [initialLoading, setInitialLoading] = useState(true);
|
||||
|
||||
// Load user preferences on component mount
|
||||
useEffect(() => {
|
||||
const loadUserPreferences = async () => {
|
||||
try {
|
||||
@ -420,7 +408,6 @@ const Page = () => {
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error('Error loading preferences:', err.message);
|
||||
// Use default preferences if loading fails
|
||||
} finally {
|
||||
setInitialLoading(false);
|
||||
}
|
||||
@ -454,21 +441,18 @@ const Page = () => {
|
||||
|
||||
return (
|
||||
<div>
|
||||
{/* Static header that doesn't re-render when content changes */}
|
||||
<DiscoverHeader
|
||||
activeCategory={activeCategory}
|
||||
setActiveCategory={setActiveCategory}
|
||||
setShowPreferences={setShowPreferences}
|
||||
/>
|
||||
|
||||
{/* Dynamic content that updates independently */}
|
||||
<DiscoverContent
|
||||
activeCategory={activeCategory}
|
||||
userPreferences={userPreferences}
|
||||
preferredLanguages={preferredLanguages}
|
||||
/>
|
||||
|
||||
{/* Preferences modal */}
|
||||
<PreferencesModal
|
||||
showPreferences={showPreferences}
|
||||
setShowPreferences={setShowPreferences}
|
||||
|
@ -35,7 +35,6 @@ const BatchDeleteChats = ({
|
||||
|
||||
setLoading(true);
|
||||
try {
|
||||
// Delete chats one by one
|
||||
for (const chatId of chatIds) {
|
||||
await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`, {
|
||||
method: 'DELETE',
|
||||
@ -45,7 +44,6 @@ const BatchDeleteChats = ({
|
||||
});
|
||||
}
|
||||
|
||||
// Update local state
|
||||
const newChats = chats.filter(chat => !chatIds.includes(chat.id));
|
||||
setChats(newChats);
|
||||
|
||||
|
@ -39,11 +39,11 @@ const useSocket = (
|
||||
const retryCountRef = useRef(0);
|
||||
const isCleaningUpRef = useRef(false);
|
||||
const MAX_RETRIES = 3;
|
||||
const INITIAL_BACKOFF = 1000; // 1 second
|
||||
const INITIAL_BACKOFF = 1000;
|
||||
const isConnectionErrorRef = useRef(false);
|
||||
|
||||
const getBackoffDelay = (retryCount: number) => {
|
||||
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000); // Cap at 10 seconds
|
||||
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
|
@ -16,15 +16,8 @@ const ReasoningPanel = ({ thinking, className, isExpanded: propExpanded }: Reaso
|
||||
const [isExpanded, setIsExpanded] = React.useState(true);
|
||||
const [detailsRefs, setDetailsRefs] = React.useState<HTMLDetailsElement[]>([]);
|
||||
|
||||
logger.info('ReasoningPanel rendering with:', {
|
||||
thinking: thinking,
|
||||
isExpanded: propExpanded,
|
||||
detailsRefsCount: detailsRefs.length
|
||||
});
|
||||
|
||||
React.useEffect(() => {
|
||||
if (propExpanded !== undefined) {
|
||||
logger.info('Updating expansion state:', propExpanded);
|
||||
setIsExpanded(propExpanded);
|
||||
}
|
||||
}, [propExpanded]);
|
||||
@ -33,7 +26,6 @@ const ReasoningPanel = ({ thinking, className, isExpanded: propExpanded }: Reaso
|
||||
if (element) {
|
||||
setDetailsRefs(refs => {
|
||||
if (!refs.includes(element)) {
|
||||
logger.info('Adding new details ref');
|
||||
return [...refs, element];
|
||||
}
|
||||
return refs;
|
||||
@ -42,11 +34,9 @@ const ReasoningPanel = ({ thinking, className, isExpanded: propExpanded }: Reaso
|
||||
}, []);
|
||||
|
||||
const expandAll = () => {
|
||||
logger.info('Expanding all details');
|
||||
detailsRefs.forEach(ref => ref.open = true);
|
||||
};
|
||||
const collapseAll = () => {
|
||||
logger.info('Collapsing all details');
|
||||
detailsRefs.forEach(ref => ref.open = false);
|
||||
};
|
||||
|
||||
@ -73,9 +63,7 @@ const ReasoningPanel = ({ thinking, className, isExpanded: propExpanded }: Reaso
|
||||
{thinking.split('\n\n').map((paragraph, index) => {
|
||||
if (!paragraph.trim()) return null;
|
||||
|
||||
// Extract content without the bullet prefix
|
||||
const content = paragraph.replace(/^[•\-\d.]\s*/, '');
|
||||
logger.info(`Processing paragraph ${index}:`, content);
|
||||
|
||||
return (
|
||||
<div key={index} className="mb-2 last:mb-0">
|
||||
@ -117,4 +105,4 @@ const ReasoningPanel = ({ thinking, className, isExpanded: propExpanded }: Reaso
|
||||
);
|
||||
};
|
||||
|
||||
export default ReasoningPanel;
|
||||
export default ReasoningPanel;
|
||||
|
@ -47,34 +47,21 @@ const MessageBox = ({
|
||||
const [isThinkingExpanded, setIsThinkingExpanded] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
logger.info(`Processing message:`, {
|
||||
content: message.content,
|
||||
role: message.role,
|
||||
messageId: message.messageId
|
||||
});
|
||||
|
||||
const regex = /\[(\d+)\]/g;
|
||||
const thinkRegex = /<think>(.*?)(?:<\/think>|$)(.*)/s;
|
||||
|
||||
// Check for thinking content, including partial tags
|
||||
const match = message.content.match(thinkRegex);
|
||||
logger.info(`Think tag match:`, match);
|
||||
|
||||
if (match) {
|
||||
const [_, thinkingContent, answerContent] = match;
|
||||
|
||||
// Set thinking content even if </think> hasn't appeared yet
|
||||
if (thinkingContent) {
|
||||
logger.info(`Found thinking content:`, thinkingContent.trim());
|
||||
setThinking(thinkingContent.trim());
|
||||
setIsThinkingExpanded(true); // Expand when thinking starts
|
||||
setIsThinkingExpanded(true);
|
||||
}
|
||||
|
||||
// Only set answer content if we have it (after </think>)
|
||||
if (answerContent) {
|
||||
logger.info(`Found answer content:`, answerContent.trim());
|
||||
setIsThinkingExpanded(false); // Collapse when thinking is done
|
||||
// Process the answer part for sources if needed
|
||||
setIsThinkingExpanded(false);
|
||||
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
|
||||
setParsedMessage(
|
||||
answerContent.trim().replace(
|
||||
@ -89,7 +76,6 @@ const MessageBox = ({
|
||||
setSpeechMessage(answerContent.trim().replace(regex, ''));
|
||||
}
|
||||
} else {
|
||||
// No thinking content - process as before
|
||||
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
|
||||
setParsedMessage(
|
||||
message.content.replace(
|
||||
|
Reference in New Issue
Block a user