From aa240009ab615311e62c31314ef8b9258179c01b Mon Sep 17 00:00:00 2001 From: haddadrm <121486289+haddadrm@users.noreply.github.com> Date: Tue, 1 Apr 2025 17:23:28 +0400 Subject: [PATCH 1/4] Feature: Add LM Studio provider integration - Added LM Studio provider to support OpenAI compatible API - Implemented chat and embeddings model loading - Updated config to include LM Studio API endpoint --- sample.config.toml | 5 +- src/lib/config.ts | 5 ++ src/lib/providers/index.ts | 3 + src/lib/providers/lmstudio.ts | 101 ++++++++++++++++++++++++++++++++++ 4 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 src/lib/providers/lmstudio.ts diff --git a/sample.config.toml b/sample.config.toml index 980e99d..1db2125 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434 [MODELS.DEEPSEEK] API_KEY = "" +[MODELS.LM_STUDIO] +API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 + [API_ENDPOINTS] -SEARXNG = "" # SearxNG API URL - http://localhost:32768 \ No newline at end of file +SEARXNG = "" # SearxNG API URL - http://localhost:32768 diff --git a/src/lib/config.ts b/src/lib/config.ts index 2831214..7c6d495 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -28,6 +28,9 @@ interface Config { DEEPSEEK: { API_KEY: string; }; + LM_STUDIO: { + API_URL: string; + }; CUSTOM_OPENAI: { API_URL: string; API_KEY: string; @@ -77,6 +80,8 @@ export const getCustomOpenaiApiUrl = () => export const getCustomOpenaiModelName = () => loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME; +export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LM_STUDIO.API_URL; + const mergeConfigs = (current: any, update: any): any => { if (update === null || update === undefined) { return current; diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index eef212f..0a4a6db 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -13,6 +13,7 @@ import { loadAnthropicChatModels } from './anthropic'; import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; import { loadTransformersEmbeddingsModels } from './transformers'; import { loadDeepseekChatModels } from './deepseek'; +import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio'; export interface ChatModel { displayName: string; @@ -34,6 +35,7 @@ export const chatModelProviders: Record< anthropic: loadAnthropicChatModels, gemini: loadGeminiChatModels, deepseek: loadDeepseekChatModels, + lmstudio: loadLMStudioChatModels, }; export const embeddingModelProviders: Record< @@ -44,6 +46,7 @@ export const embeddingModelProviders: Record< ollama: loadOllamaEmbeddingModels, gemini: loadGeminiEmbeddingModels, transformers: loadTransformersEmbeddingsModels, + lmstudio: loadLMStudioEmbeddingsModels, }; export const getAvailableChatModelProviders = async () => { diff --git a/src/lib/providers/lmstudio.ts b/src/lib/providers/lmstudio.ts new file mode 100644 index 0000000..fd8eb75 --- /dev/null +++ b/src/lib/providers/lmstudio.ts @@ -0,0 +1,101 @@ +import { getKeepAlive, getLMStudioApiEndpoint } from '../config'; +import axios from 'axios'; +import { ChatModel, EmbeddingModel } from '.'; +import { ChatOpenAI } from '@langchain/openai'; +import { OpenAIEmbeddings } from '@langchain/openai'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Embeddings } from '@langchain/core/embeddings'; + +interface LMStudioModel { + id: string; + name?: string; +} + +const ensureV1Endpoint = (endpoint: string): string => + endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`; + +const checkServerAvailability = async (endpoint: string): Promise => { + try { + const keepAlive = getKeepAlive(); + await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + timeout: parseInt(keepAlive) * 1000 || 5000, + headers: { 'Content-Type': 'application/json' }, + }); + return true; + } catch { + return false; + } +}; + +export const loadLMStudioChatModels = async () => { + const endpoint = getLMStudioApiEndpoint(); + const keepAlive = getKeepAlive(); + + if (!endpoint) return {}; + if (!await checkServerAvailability(endpoint)) return {}; + + try { + const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + timeout: parseInt(keepAlive) * 1000 || 5000, + headers: { 'Content-Type': 'application/json' }, + }); + + const chatModels: Record = {}; + + response.data.data.forEach((model: LMStudioModel) => { + chatModels[model.id] = { + displayName: model.name || model.id, + model: new ChatOpenAI({ + openAIApiKey: 'lm-studio', + configuration: { + baseURL: ensureV1Endpoint(endpoint), + }, + modelName: model.id, + temperature: 0.7, + streaming: true, + maxRetries: 3 + }) as unknown as BaseChatModel, + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading LM Studio models: ${err}`); + return {}; + } +}; + +export const loadLMStudioEmbeddingsModels = async () => { + const endpoint = getLMStudioApiEndpoint(); + const keepAlive = getKeepAlive(); + + if (!endpoint) return {}; + if (!await checkServerAvailability(endpoint)) return {}; + + try { + const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + timeout: parseInt(keepAlive) * 1000 || 5000, + headers: { 'Content-Type': 'application/json' }, + }); + + const embeddingsModels: Record = {}; + + response.data.data.forEach((model: LMStudioModel) => { + embeddingsModels[model.id] = { + displayName: model.name || model.id, + model: new OpenAIEmbeddings({ + openAIApiKey: 'lm-studio', + configuration: { + baseURL: ensureV1Endpoint(endpoint), + }, + modelName: model.id, + }) as unknown as Embeddings, + }; + }); + + return embeddingsModels; + } catch (err) { + console.error(`Error loading LM Studio embeddings model: ${err}`); + return {}; + } +}; From 7e1dc33a081c563991d2fc084b7b7e3dd143e04e Mon Sep 17 00:00:00 2001 From: haddadrm <121486289+haddadrm@users.noreply.github.com> Date: Tue, 1 Apr 2025 19:26:15 +0400 Subject: [PATCH 2/4] Implement provider formatting improvements and fix client-side compatibility - Add PROVIDER_INFO metadata to each provider file with proper display names - Create centralized PROVIDER_METADATA in index.ts for consistent reference - Update settings UI to use provider metadata for display names - Fix client/server compatibility for Node.js modules in config.ts --- src/app/settings/page.tsx | 11 ++++----- src/lib/config.ts | 41 ++++++++++++++++++++++--------- src/lib/providers/anthropic.ts | 5 ++++ src/lib/providers/deepseek.ts | 5 ++++ src/lib/providers/gemini.ts | 5 ++++ src/lib/providers/groq.ts | 5 ++++ src/lib/providers/index.ts | 31 +++++++++++++++++------ src/lib/providers/lmstudio.ts | 5 ++++ src/lib/providers/ollama.ts | 5 ++++ src/lib/providers/openai.ts | 5 ++++ src/lib/providers/transformers.ts | 5 ++++ 11 files changed, 97 insertions(+), 26 deletions(-) diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 8eee9a4..919304b 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react'; import ThemeSwitcher from '@/components/theme/Switcher'; import { ImagesIcon, VideoIcon } from 'lucide-react'; import Link from 'next/link'; +import { PROVIDER_METADATA } from '@/lib/providers'; interface SettingsType { chatModelProviders: { @@ -547,9 +548,8 @@ const Page = () => { options={Object.keys(config.chatModelProviders).map( (provider) => ({ value: provider, - label: - provider.charAt(0).toUpperCase() + - provider.slice(1), + label: (PROVIDER_METADATA as any)[provider]?.displayName || + provider.charAt(0).toUpperCase() + provider.slice(1), }), )} /> @@ -689,9 +689,8 @@ const Page = () => { options={Object.keys(config.embeddingModelProviders).map( (provider) => ({ value: provider, - label: - provider.charAt(0).toUpperCase() + - provider.slice(1), + label: (PROVIDER_METADATA as any)[provider]?.displayName || + provider.charAt(0).toUpperCase() + provider.slice(1), }), )} /> diff --git a/src/lib/config.ts b/src/lib/config.ts index 7c6d495..e3f2680 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -1,7 +1,14 @@ -import fs from 'fs'; -import path from 'path'; import toml from '@iarna/toml'; +// Use dynamic imports for Node.js modules to prevent client-side errors +let fs: any; +let path: any; +if (typeof window === 'undefined') { + // We're on the server + fs = require('fs'); + path = require('path'); +} + const configFileName = 'config.toml'; interface Config { @@ -46,10 +53,17 @@ type RecursivePartial = { [P in keyof T]?: RecursivePartial; }; -const loadConfig = () => - toml.parse( - fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), - ) as any as Config; +const loadConfig = () => { + // Server-side only + if (typeof window === 'undefined') { + return toml.parse( + fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), + ) as any as Config; + } + + // Client-side fallback - settings will be loaded via API + return {} as Config; +}; export const getSimilarityMeasure = () => loadConfig().GENERAL.SIMILARITY_MEASURE; @@ -114,10 +128,13 @@ const mergeConfigs = (current: any, update: any): any => { }; export const updateConfig = (config: RecursivePartial) => { - const currentConfig = loadConfig(); - const mergedConfig = mergeConfigs(currentConfig, config); - fs.writeFileSync( - path.join(path.join(process.cwd(), `${configFileName}`)), - toml.stringify(mergedConfig), - ); + // Server-side only + if (typeof window === 'undefined') { + const currentConfig = loadConfig(); + const mergedConfig = mergeConfigs(currentConfig, config); + fs.writeFileSync( + path.join(path.join(process.cwd(), `${configFileName}`)), + toml.stringify(mergedConfig), + ); + } }; diff --git a/src/lib/providers/anthropic.ts b/src/lib/providers/anthropic.ts index 7ecde4b..e434b32 100644 --- a/src/lib/providers/anthropic.ts +++ b/src/lib/providers/anthropic.ts @@ -1,6 +1,11 @@ import { ChatAnthropic } from '@langchain/anthropic'; import { ChatModel } from '.'; import { getAnthropicApiKey } from '../config'; + +export const PROVIDER_INFO = { + key: 'anthropic', + displayName: 'Anthropic' +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; const anthropicChatModels: Record[] = [ diff --git a/src/lib/providers/deepseek.ts b/src/lib/providers/deepseek.ts index 88f02ec..b272801 100644 --- a/src/lib/providers/deepseek.ts +++ b/src/lib/providers/deepseek.ts @@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config'; import { ChatModel } from '.'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +export const PROVIDER_INFO = { + key: 'deepseek', + displayName: 'Deepseek AI' +}; + const deepseekChatModels: Record[] = [ { displayName: 'Deepseek Chat (Deepseek V3)', diff --git a/src/lib/providers/gemini.ts b/src/lib/providers/gemini.ts index 2a88015..6af9fb2 100644 --- a/src/lib/providers/gemini.ts +++ b/src/lib/providers/gemini.ts @@ -4,6 +4,11 @@ import { } from '@langchain/google-genai'; import { getGeminiApiKey } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'gemini', + displayName: 'Google Gemini' +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { Embeddings } from '@langchain/core/embeddings'; diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts index 85c75f4..62481d4 100644 --- a/src/lib/providers/groq.ts +++ b/src/lib/providers/groq.ts @@ -1,6 +1,11 @@ import { ChatOpenAI } from '@langchain/openai'; import { getGroqApiKey } from '../config'; import { ChatModel } from '.'; + +export const PROVIDER_INFO = { + key: 'groq', + displayName: 'Groq' +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; const groqChatModels: Record[] = [ diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index 0a4a6db..073bd61 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -1,19 +1,34 @@ import { Embeddings } from '@langchain/core/embeddings'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai'; +import { loadOpenAIChatModels, loadOpenAIEmbeddingModels, PROVIDER_INFO as OpenAIInfo, PROVIDER_INFO } from './openai'; import { getCustomOpenaiApiKey, getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '../config'; import { ChatOpenAI } from '@langchain/openai'; -import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama'; -import { loadGroqChatModels } from './groq'; -import { loadAnthropicChatModels } from './anthropic'; -import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; -import { loadTransformersEmbeddingsModels } from './transformers'; -import { loadDeepseekChatModels } from './deepseek'; -import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio'; +import { loadOllamaChatModels, loadOllamaEmbeddingModels, PROVIDER_INFO as OllamaInfo } from './ollama'; +import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq'; +import { loadAnthropicChatModels, PROVIDER_INFO as AnthropicInfo } from './anthropic'; +import { loadGeminiChatModels, loadGeminiEmbeddingModels, PROVIDER_INFO as GeminiInfo } from './gemini'; +import { loadTransformersEmbeddingsModels, PROVIDER_INFO as TransformersInfo } from './transformers'; +import { loadDeepseekChatModels, PROVIDER_INFO as DeepseekInfo } from './deepseek'; +import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels, PROVIDER_INFO as LMStudioInfo } from './lmstudio'; + +export const PROVIDER_METADATA = { + openai: OpenAIInfo, + ollama: OllamaInfo, + groq: GroqInfo, + anthropic: AnthropicInfo, + gemini: GeminiInfo, + transformers: TransformersInfo, + deepseek: DeepseekInfo, + lmstudio: LMStudioInfo, + custom_openai: { + key: 'custom_openai', + displayName: 'Custom OpenAI' + } +}; export interface ChatModel { displayName: string; diff --git a/src/lib/providers/lmstudio.ts b/src/lib/providers/lmstudio.ts index fd8eb75..f7be638 100644 --- a/src/lib/providers/lmstudio.ts +++ b/src/lib/providers/lmstudio.ts @@ -1,6 +1,11 @@ import { getKeepAlive, getLMStudioApiEndpoint } from '../config'; import axios from 'axios'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'lmstudio', + displayName: 'LM Studio' +}; import { ChatOpenAI } from '@langchain/openai'; import { OpenAIEmbeddings } from '@langchain/openai'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts index 92e98e4..beab58f 100644 --- a/src/lib/providers/ollama.ts +++ b/src/lib/providers/ollama.ts @@ -1,6 +1,11 @@ import axios from 'axios'; import { getKeepAlive, getOllamaApiEndpoint } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'ollama', + displayName: 'Ollama' +}; import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts index 01bacc6..36f7e29 100644 --- a/src/lib/providers/openai.ts +++ b/src/lib/providers/openai.ts @@ -1,6 +1,11 @@ import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { getOpenaiApiKey } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'openai', + displayName: 'OpenAI' +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { Embeddings } from '@langchain/core/embeddings'; diff --git a/src/lib/providers/transformers.ts b/src/lib/providers/transformers.ts index a06dd12..fd7cb9e 100644 --- a/src/lib/providers/transformers.ts +++ b/src/lib/providers/transformers.ts @@ -1,5 +1,10 @@ import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; +export const PROVIDER_INFO = { + key: 'transformers', + displayName: 'Hugging Face' +}; + export const loadTransformersEmbeddingsModels = async () => { try { const embeddingModels = { From 073b5e897cc5c2081b6963fa4d3b8b4ce1cc6cfc Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Sat, 12 Apr 2025 11:58:52 +0530 Subject: [PATCH 3/4] feat(app): lint & beautify --- src/app/api/config/route.ts | 5 ++++ src/app/settings/page.tsx | 13 ++++++--- src/lib/config.ts | 5 ++-- src/lib/providers/anthropic.ts | 2 +- src/lib/providers/deepseek.ts | 2 +- src/lib/providers/gemini.ts | 2 +- src/lib/providers/groq.ts | 2 +- src/lib/providers/index.ts | 44 ++++++++++++++++++++++++------- src/lib/providers/lmstudio.ts | 20 +++++--------- src/lib/providers/ollama.ts | 2 +- src/lib/providers/openai.ts | 2 +- src/lib/providers/transformers.ts | 2 +- 12 files changed, 66 insertions(+), 35 deletions(-) diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index 39c1f84..c1e5bbd 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -8,6 +8,7 @@ import { getOllamaApiEndpoint, getOpenaiApiKey, getDeepseekApiKey, + getLMStudioApiEndpoint, updateConfig, } from '@/lib/config'; import { @@ -51,6 +52,7 @@ export const GET = async (req: Request) => { config['openaiApiKey'] = getOpenaiApiKey(); config['ollamaApiUrl'] = getOllamaApiEndpoint(); + config['lmStudioApiUrl'] = getLMStudioApiEndpoint(); config['anthropicApiKey'] = getAnthropicApiKey(); config['groqApiKey'] = getGroqApiKey(); config['geminiApiKey'] = getGeminiApiKey(); @@ -93,6 +95,9 @@ export const POST = async (req: Request) => { DEEPSEEK: { API_KEY: config.deepseekApiKey, }, + LM_STUDIO: { + API_URL: config.lmStudioApiUrl, + }, CUSTOM_OPENAI: { API_URL: config.customOpenaiApiUrl, API_KEY: config.customOpenaiApiKey, diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 919304b..0385944 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -21,6 +21,7 @@ interface SettingsType { anthropicApiKey: string; geminiApiKey: string; ollamaApiUrl: string; + lmStudioApiUrl: string; deepseekApiKey: string; customOpenaiApiKey: string; customOpenaiApiUrl: string; @@ -548,8 +549,10 @@ const Page = () => { options={Object.keys(config.chatModelProviders).map( (provider) => ({ value: provider, - label: (PROVIDER_METADATA as any)[provider]?.displayName || - provider.charAt(0).toUpperCase() + provider.slice(1), + label: + (PROVIDER_METADATA as any)[provider]?.displayName || + provider.charAt(0).toUpperCase() + + provider.slice(1), }), )} /> @@ -689,8 +692,10 @@ const Page = () => { options={Object.keys(config.embeddingModelProviders).map( (provider) => ({ value: provider, - label: (PROVIDER_METADATA as any)[provider]?.displayName || - provider.charAt(0).toUpperCase() + provider.slice(1), + label: + (PROVIDER_METADATA as any)[provider]?.displayName || + provider.charAt(0).toUpperCase() + + provider.slice(1), }), )} /> diff --git a/src/lib/config.ts b/src/lib/config.ts index e3f2680..78ad09c 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -60,7 +60,7 @@ const loadConfig = () => { fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), ) as any as Config; } - + // Client-side fallback - settings will be loaded via API return {} as Config; }; @@ -94,7 +94,8 @@ export const getCustomOpenaiApiUrl = () => export const getCustomOpenaiModelName = () => loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME; -export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LM_STUDIO.API_URL; +export const getLMStudioApiEndpoint = () => + loadConfig().MODELS.LM_STUDIO.API_URL; const mergeConfigs = (current: any, update: any): any => { if (update === null || update === undefined) { diff --git a/src/lib/providers/anthropic.ts b/src/lib/providers/anthropic.ts index e434b32..2b0f2cc 100644 --- a/src/lib/providers/anthropic.ts +++ b/src/lib/providers/anthropic.ts @@ -4,7 +4,7 @@ import { getAnthropicApiKey } from '../config'; export const PROVIDER_INFO = { key: 'anthropic', - displayName: 'Anthropic' + displayName: 'Anthropic', }; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; diff --git a/src/lib/providers/deepseek.ts b/src/lib/providers/deepseek.ts index b272801..46f2398 100644 --- a/src/lib/providers/deepseek.ts +++ b/src/lib/providers/deepseek.ts @@ -5,7 +5,7 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'; export const PROVIDER_INFO = { key: 'deepseek', - displayName: 'Deepseek AI' + displayName: 'Deepseek AI', }; const deepseekChatModels: Record[] = [ diff --git a/src/lib/providers/gemini.ts b/src/lib/providers/gemini.ts index 6af9fb2..6cf2243 100644 --- a/src/lib/providers/gemini.ts +++ b/src/lib/providers/gemini.ts @@ -7,7 +7,7 @@ import { ChatModel, EmbeddingModel } from '.'; export const PROVIDER_INFO = { key: 'gemini', - displayName: 'Google Gemini' + displayName: 'Google Gemini', }; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { Embeddings } from '@langchain/core/embeddings'; diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts index 62481d4..4b0ca92 100644 --- a/src/lib/providers/groq.ts +++ b/src/lib/providers/groq.ts @@ -4,7 +4,7 @@ import { ChatModel } from '.'; export const PROVIDER_INFO = { key: 'groq', - displayName: 'Groq' + displayName: 'Groq', }; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index 073bd61..e536431 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -1,19 +1,45 @@ import { Embeddings } from '@langchain/core/embeddings'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { loadOpenAIChatModels, loadOpenAIEmbeddingModels, PROVIDER_INFO as OpenAIInfo, PROVIDER_INFO } from './openai'; +import { + loadOpenAIChatModels, + loadOpenAIEmbeddingModels, + PROVIDER_INFO as OpenAIInfo, + PROVIDER_INFO, +} from './openai'; import { getCustomOpenaiApiKey, getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '../config'; import { ChatOpenAI } from '@langchain/openai'; -import { loadOllamaChatModels, loadOllamaEmbeddingModels, PROVIDER_INFO as OllamaInfo } from './ollama'; +import { + loadOllamaChatModels, + loadOllamaEmbeddingModels, + PROVIDER_INFO as OllamaInfo, +} from './ollama'; import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq'; -import { loadAnthropicChatModels, PROVIDER_INFO as AnthropicInfo } from './anthropic'; -import { loadGeminiChatModels, loadGeminiEmbeddingModels, PROVIDER_INFO as GeminiInfo } from './gemini'; -import { loadTransformersEmbeddingsModels, PROVIDER_INFO as TransformersInfo } from './transformers'; -import { loadDeepseekChatModels, PROVIDER_INFO as DeepseekInfo } from './deepseek'; -import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels, PROVIDER_INFO as LMStudioInfo } from './lmstudio'; +import { + loadAnthropicChatModels, + PROVIDER_INFO as AnthropicInfo, +} from './anthropic'; +import { + loadGeminiChatModels, + loadGeminiEmbeddingModels, + PROVIDER_INFO as GeminiInfo, +} from './gemini'; +import { + loadTransformersEmbeddingsModels, + PROVIDER_INFO as TransformersInfo, +} from './transformers'; +import { + loadDeepseekChatModels, + PROVIDER_INFO as DeepseekInfo, +} from './deepseek'; +import { + loadLMStudioChatModels, + loadLMStudioEmbeddingsModels, + PROVIDER_INFO as LMStudioInfo, +} from './lmstudio'; export const PROVIDER_METADATA = { openai: OpenAIInfo, @@ -26,8 +52,8 @@ export const PROVIDER_METADATA = { lmstudio: LMStudioInfo, custom_openai: { key: 'custom_openai', - displayName: 'Custom OpenAI' - } + displayName: 'Custom OpenAI', + }, }; export interface ChatModel { diff --git a/src/lib/providers/lmstudio.ts b/src/lib/providers/lmstudio.ts index f7be638..811208f 100644 --- a/src/lib/providers/lmstudio.ts +++ b/src/lib/providers/lmstudio.ts @@ -4,7 +4,7 @@ import { ChatModel, EmbeddingModel } from '.'; export const PROVIDER_INFO = { key: 'lmstudio', - displayName: 'LM Studio' + displayName: 'LM Studio', }; import { ChatOpenAI } from '@langchain/openai'; import { OpenAIEmbeddings } from '@langchain/openai'; @@ -16,14 +16,12 @@ interface LMStudioModel { name?: string; } -const ensureV1Endpoint = (endpoint: string): string => +const ensureV1Endpoint = (endpoint: string): string => endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`; const checkServerAvailability = async (endpoint: string): Promise => { try { - const keepAlive = getKeepAlive(); await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { - timeout: parseInt(keepAlive) * 1000 || 5000, headers: { 'Content-Type': 'application/json' }, }); return true; @@ -34,14 +32,12 @@ const checkServerAvailability = async (endpoint: string): Promise => { export const loadLMStudioChatModels = async () => { const endpoint = getLMStudioApiEndpoint(); - const keepAlive = getKeepAlive(); - + if (!endpoint) return {}; - if (!await checkServerAvailability(endpoint)) return {}; + if (!(await checkServerAvailability(endpoint))) return {}; try { const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { - timeout: parseInt(keepAlive) * 1000 || 5000, headers: { 'Content-Type': 'application/json' }, }); @@ -58,7 +54,7 @@ export const loadLMStudioChatModels = async () => { modelName: model.id, temperature: 0.7, streaming: true, - maxRetries: 3 + maxRetries: 3, }) as unknown as BaseChatModel, }; }); @@ -72,14 +68,12 @@ export const loadLMStudioChatModels = async () => { export const loadLMStudioEmbeddingsModels = async () => { const endpoint = getLMStudioApiEndpoint(); - const keepAlive = getKeepAlive(); - + if (!endpoint) return {}; - if (!await checkServerAvailability(endpoint)) return {}; + if (!(await checkServerAvailability(endpoint))) return {}; try { const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { - timeout: parseInt(keepAlive) * 1000 || 5000, headers: { 'Content-Type': 'application/json' }, }); diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts index beab58f..cca2142 100644 --- a/src/lib/providers/ollama.ts +++ b/src/lib/providers/ollama.ts @@ -4,7 +4,7 @@ import { ChatModel, EmbeddingModel } from '.'; export const PROVIDER_INFO = { key: 'ollama', - displayName: 'Ollama' + displayName: 'Ollama', }; import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts index 36f7e29..61621c3 100644 --- a/src/lib/providers/openai.ts +++ b/src/lib/providers/openai.ts @@ -4,7 +4,7 @@ import { ChatModel, EmbeddingModel } from '.'; export const PROVIDER_INFO = { key: 'openai', - displayName: 'OpenAI' + displayName: 'OpenAI', }; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { Embeddings } from '@langchain/core/embeddings'; diff --git a/src/lib/providers/transformers.ts b/src/lib/providers/transformers.ts index fd7cb9e..3098d9f 100644 --- a/src/lib/providers/transformers.ts +++ b/src/lib/providers/transformers.ts @@ -2,7 +2,7 @@ import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; export const PROVIDER_INFO = { key: 'transformers', - displayName: 'Hugging Face' + displayName: 'Hugging Face', }; export const loadTransformersEmbeddingsModels = async () => { From 186249149674df5938faecabb3a3b7c48d9bce71 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Sat, 12 Apr 2025 11:59:05 +0530 Subject: [PATCH 4/4] feat(settings): add LM Studio API URL --- src/app/settings/page.tsx | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 0385944..05338c3 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -862,6 +862,25 @@ const Page = () => { onSave={(value) => saveConfig('deepseekApiKey', value)} /> + +
+

+ LM Studio API URL +

+ { + setConfig((prev) => ({ + ...prev!, + lmStudioApiUrl: e.target.value, + })); + }} + onSave={(value) => saveConfig('lmStudioApiUrl', value)} + /> +