mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-08-16 04:38:58 +00:00
Merge 6edac6938c
into f37686189e
This commit is contained in:
@ -16,9 +16,10 @@ interface Config {
|
||||
ANTHROPIC: string;
|
||||
GEMINI: string;
|
||||
};
|
||||
API_ENDPOINTS: {
|
||||
SEARXNG: string;
|
||||
API_ENDPOINTS: {
|
||||
OLLAMA: string;
|
||||
LMSTUDIO: string;
|
||||
SEARXNG: string;
|
||||
};
|
||||
}
|
||||
|
||||
@ -51,6 +52,8 @@ export const getSearxngApiEndpoint = () =>
|
||||
|
||||
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
|
||||
|
||||
export const getLMStudioApiEndpoint = () => loadConfig().API_ENDPOINTS.LMSTUDIO;
|
||||
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
|
||||
@ -72,6 +75,27 @@ export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
|
||||
// Merge existing config with new values
|
||||
const mergedConfig: RecursivePartial<Config> = {
|
||||
GENERAL: {
|
||||
...currentConfig.GENERAL,
|
||||
...config.GENERAL,
|
||||
},
|
||||
API_KEYS: {
|
||||
...currentConfig.API_KEYS,
|
||||
...config.API_KEYS,
|
||||
},
|
||||
API_ENDPOINTS: {
|
||||
...currentConfig.API_ENDPOINTS,
|
||||
...config.API_ENDPOINTS,
|
||||
},
|
||||
};
|
||||
*/
|
||||
|
||||
fs.writeFileSync(
|
||||
path.join(__dirname, `../${configFileName}`),
|
||||
toml.stringify(config),
|
||||
|
@ -4,6 +4,7 @@ import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
||||
import { loadAnthropicChatModels } from './anthropic';
|
||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
||||
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio';
|
||||
|
||||
const chatModelProviders = {
|
||||
openai: loadOpenAIChatModels,
|
||||
@ -11,6 +12,7 @@ const chatModelProviders = {
|
||||
ollama: loadOllamaChatModels,
|
||||
anthropic: loadAnthropicChatModels,
|
||||
gemini: loadGeminiChatModels,
|
||||
lm_studio: loadLMStudioChatModels,
|
||||
};
|
||||
|
||||
const embeddingModelProviders = {
|
||||
@ -18,6 +20,7 @@ const embeddingModelProviders = {
|
||||
local: loadTransformersEmbeddingsModels,
|
||||
ollama: loadOllamaEmbeddingsModels,
|
||||
gemini: loadGeminiEmbeddingsModels,
|
||||
lm_studio: loadLMStudioEmbeddingsModels,
|
||||
};
|
||||
|
||||
export const getAvailableChatModelProviders = async () => {
|
||||
|
89
src/lib/providers/lmstudio.ts
Normal file
89
src/lib/providers/lmstudio.ts
Normal file
@ -0,0 +1,89 @@
|
||||
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { getKeepAlive, getLMStudioApiEndpoint } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
import axios from 'axios';
|
||||
|
||||
interface LMStudioModel {
|
||||
id: string;
|
||||
// add other properties if LM Studio API provides them
|
||||
}
|
||||
|
||||
interface ChatModelConfig {
|
||||
displayName: string;
|
||||
model: ChatOpenAI;
|
||||
}
|
||||
|
||||
export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
|
||||
const lmStudioEndpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!lmStudioEndpoint) {
|
||||
logger.debug('LM Studio endpoint not configured, skipping');
|
||||
return {};
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.get<{ data: LMStudioModel[] }>(`${lmStudioEndpoint}/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const lmStudioModels = response.data.data;
|
||||
|
||||
const chatModels = lmStudioModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
|
||||
acc[model.id] = {
|
||||
displayName: model.id,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: lmStudioEndpoint,
|
||||
},
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
}),
|
||||
};
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading LM Studio models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioEmbeddingsModels = async () => {
|
||||
const lmStudioEndpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!lmStudioEndpoint) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${lmStudioEndpoint}/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const lmStudioModels = response.data.data;
|
||||
|
||||
const embeddingsModels = lmStudioModels.reduce((acc, model) => {
|
||||
acc[model.id] = {
|
||||
displayName: model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: 'lm-studio', // Dummy key required by LangChain
|
||||
configuration: {
|
||||
baseURL: lmStudioEndpoint,
|
||||
},
|
||||
modelName: model.id,
|
||||
}),
|
||||
};
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return embeddingsModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
@ -6,6 +6,7 @@ import {
|
||||
import {
|
||||
getGroqApiKey,
|
||||
getOllamaApiEndpoint,
|
||||
getLMStudioApiEndpoint,
|
||||
getAnthropicApiKey,
|
||||
getGeminiApiKey,
|
||||
getOpenaiApiKey,
|
||||
@ -51,6 +52,7 @@ router.get('/', async (_, res) => {
|
||||
|
||||
config['openaiApiKey'] = getOpenaiApiKey();
|
||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||
config['groqApiKey'] = getGroqApiKey();
|
||||
config['geminiApiKey'] = getGeminiApiKey();
|
||||
@ -74,6 +76,7 @@ router.post('/', async (req, res) => {
|
||||
},
|
||||
API_ENDPOINTS: {
|
||||
OLLAMA: config.ollamaApiUrl,
|
||||
LMSTUDIO: config.lmStudioApiUrl,
|
||||
},
|
||||
};
|
||||
|
||||
|
Reference in New Issue
Block a user