mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-19 07:41:33 +00:00
Add DeepSeek and LMStudio providers
- Integrate DeepSeek and LMStudio AI providers - Add message processing utilities for improved handling - Implement reasoning panel for message actions - Add logging functionality to UI - Update configurations and dependencies
This commit is contained in:
69
src/lib/providers/deepseek.ts
Normal file
69
src/lib/providers/deepseek.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import { DeepSeekChat } from '../deepseekChat';
|
||||
import logger from '../../utils/logger';
|
||||
import { getDeepseekApiKey } from '../../config';
|
||||
import axios from 'axios';
|
||||
|
||||
interface DeepSeekModel {
|
||||
id: string;
|
||||
object: string;
|
||||
owned_by: string;
|
||||
}
|
||||
|
||||
interface ModelListResponse {
|
||||
object: 'list';
|
||||
data: DeepSeekModel[];
|
||||
}
|
||||
|
||||
interface ChatModelConfig {
|
||||
displayName: string;
|
||||
model: DeepSeekChat;
|
||||
}
|
||||
|
||||
const MODEL_DISPLAY_NAMES: Record<string, string> = {
|
||||
'deepseek-reasoner': 'DeepSeek R1',
|
||||
'deepseek-chat': 'DeepSeek V3'
|
||||
};
|
||||
|
||||
export const loadDeepSeekChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
|
||||
const deepSeekEndpoint = 'https://api.deepseek.com';
|
||||
|
||||
const apiKey = getDeepseekApiKey();
|
||||
if (!apiKey) return {};
|
||||
|
||||
if (!deepSeekEndpoint || !apiKey) {
|
||||
logger.debug('DeepSeek endpoint or API key not configured, skipping');
|
||||
return {};
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.get<{ data: DeepSeekModel[] }>(`${deepSeekEndpoint}/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const deepSeekModels = response.data.data;
|
||||
|
||||
const chatModels = deepSeekModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
|
||||
// Only include models we have display names for
|
||||
if (model.id in MODEL_DISPLAY_NAMES) {
|
||||
acc[model.id] = {
|
||||
displayName: MODEL_DISPLAY_NAMES[model.id],
|
||||
model: new DeepSeekChat({
|
||||
apiKey,
|
||||
baseURL: deepSeekEndpoint,
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
}),
|
||||
};
|
||||
}
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading DeepSeek models: ${String(err)}`);
|
||||
return {};
|
||||
}
|
||||
};
|
@@ -4,6 +4,8 @@ import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
||||
import { loadAnthropicChatModels } from './anthropic';
|
||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
||||
import { loadDeepSeekChatModels } from './deepseek';
|
||||
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
@@ -17,6 +19,8 @@ const chatModelProviders = {
|
||||
ollama: loadOllamaChatModels,
|
||||
anthropic: loadAnthropicChatModels,
|
||||
gemini: loadGeminiChatModels,
|
||||
deepseek: loadDeepSeekChatModels,
|
||||
lm_studio: loadLMStudioChatModels,
|
||||
};
|
||||
|
||||
const embeddingModelProviders = {
|
||||
@@ -24,6 +28,7 @@ const embeddingModelProviders = {
|
||||
local: loadTransformersEmbeddingsModels,
|
||||
ollama: loadOllamaEmbeddingsModels,
|
||||
gemini: loadGeminiEmbeddingsModels,
|
||||
lm_studio: loadLMStudioEmbeddingsModels,
|
||||
};
|
||||
|
||||
export const getAvailableChatModelProviders = async () => {
|
||||
|
96
src/lib/providers/lmstudio.ts
Normal file
96
src/lib/providers/lmstudio.ts
Normal file
@@ -0,0 +1,96 @@
|
||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getLMStudioApiEndpoint, getKeepAlive } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
import axios from 'axios';
|
||||
|
||||
interface LMStudioModel {
|
||||
id: string;
|
||||
name?: string;
|
||||
}
|
||||
|
||||
const ensureV1Endpoint = (endpoint: string): string =>
|
||||
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
||||
|
||||
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
|
||||
try {
|
||||
const keepAlive = getKeepAlive();
|
||||
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
timeout: parseInt(keepAlive) * 1000 || 5000,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioChatModels = async () => {
|
||||
const endpoint = getLMStudioApiEndpoint();
|
||||
const keepAlive = getKeepAlive();
|
||||
|
||||
if (!endpoint) return {};
|
||||
if (!await checkServerAvailability(endpoint)) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
timeout: parseInt(keepAlive) * 1000 || 5000,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
const chatModels = response.data.data.reduce((acc: Record<string, any>, model: LMStudioModel) => {
|
||||
acc[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
streaming: true,
|
||||
maxRetries: 3
|
||||
}),
|
||||
};
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading LM Studio models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioEmbeddingsModels = async () => {
|
||||
const endpoint = getLMStudioApiEndpoint();
|
||||
const keepAlive = getKeepAlive();
|
||||
|
||||
if (!endpoint) return {};
|
||||
if (!await checkServerAvailability(endpoint)) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
timeout: parseInt(keepAlive) * 1000 || 5000,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
const embeddingsModels = response.data.data.reduce((acc: Record<string, any>, model: LMStudioModel) => {
|
||||
acc[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
modelName: model.id,
|
||||
}),
|
||||
};
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return embeddingsModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
Reference in New Issue
Block a user