Files
Perplexica/src/lib/providers/deepseek.ts
haddadrm f473a581ce implemented a refactoring plan with the
configurable delay feature.

1. Created AlternatingMessageValidator
(renamed from MessageProcessor):

-Focused on handling alternating message patterns
-Made it model-agnostic with configuration-driven approach
-Kept the core validation logic intact

2. Created ReasoningChatModel
(renamed from DeepSeekChat):

-Made it generic for any model with reasoning/thinking capabilities
-Added configurable streaming delay parameter (streamDelay)
-Implemented delay logic in the streaming process

3. Updated the DeepSeek provider:

-Now uses ReasoningChatModel for deepseek-reasoner with a 50ms delay
-Uses standard ChatOpenAI for deepseek-chat
-Added a clear distinction between models that need reasoning capabilities
Updated references in metaSearchAgent.ts:

4. Changed import from messageProcessor to alternatingMessageValidator

-Updated function calls to use the new validator
-The configurable delay implementation allows
to control the speed of token generation, which
can help with the issue you were seeing. The
delay is set to 20ms by default for the
deepseek-reasoner model, but you can adjust
his value in the deepseek.ts provider file
to find the optimal speed.

This refactoring maintains all the existing
functionality while making the code more
maintainable and future-proof. The separation of
concerns between message validation and model
implementation will make it easier to add support
for other models with similar requirements in the future.
2025-02-25 10:13:54 +04:00

91 lines
2.6 KiB
TypeScript

import { ReasoningChatModel } from '../reasoningChatModel';
import { ChatOpenAI } from '@langchain/openai';
import logger from '../../utils/logger';
import { getDeepseekApiKey } from '../../config';
import axios from 'axios';
interface DeepSeekModel {
id: string;
object: string;
owned_by: string;
}
interface ModelListResponse {
object: 'list';
data: DeepSeekModel[];
}
interface ChatModelConfig {
displayName: string;
model: ReasoningChatModel | ChatOpenAI;
}
// Define which models require reasoning capabilities
const REASONING_MODELS = ['deepseek-reasoner'];
const MODEL_DISPLAY_NAMES: Record<string, string> = {
'deepseek-reasoner': 'DeepSeek R1',
'deepseek-chat': 'DeepSeek V3'
};
export const loadDeepSeekChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
const deepSeekEndpoint = 'https://api.deepseek.com';
const apiKey = getDeepseekApiKey();
if (!apiKey) return {};
if (!deepSeekEndpoint || !apiKey) {
logger.debug('DeepSeek endpoint or API key not configured, skipping');
return {};
}
try {
const response = await axios.get<{ data: DeepSeekModel[] }>(`${deepSeekEndpoint}/models`, {
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${apiKey}`,
},
});
const deepSeekModels = response.data.data;
const chatModels = deepSeekModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
// Only include models we have display names for
if (model.id in MODEL_DISPLAY_NAMES) {
// Use ReasoningChatModel for models that need reasoning capabilities
if (REASONING_MODELS.includes(model.id)) {
acc[model.id] = {
displayName: MODEL_DISPLAY_NAMES[model.id],
model: new ReasoningChatModel({
apiKey,
baseURL: deepSeekEndpoint,
modelName: model.id,
temperature: 0.7,
streamDelay: 50, // Add a small delay to control streaming speed
}),
};
} else {
// Use standard ChatOpenAI for other models
acc[model.id] = {
displayName: MODEL_DISPLAY_NAMES[model.id],
model: new ChatOpenAI({
openAIApiKey: apiKey,
configuration: {
baseURL: deepSeekEndpoint,
},
modelName: model.id,
temperature: 0.7,
}),
};
}
}
return acc;
}, {});
return chatModels;
} catch (err) {
logger.error(`Error loading DeepSeek models: ${String(err)}`);
return {};
}
};