From aa240009ab615311e62c31314ef8b9258179c01b Mon Sep 17 00:00:00 2001 From: haddadrm <121486289+haddadrm@users.noreply.github.com> Date: Tue, 1 Apr 2025 17:23:28 +0400 Subject: [PATCH 1/2] Feature: Add LM Studio provider integration - Added LM Studio provider to support OpenAI compatible API - Implemented chat and embeddings model loading - Updated config to include LM Studio API endpoint --- sample.config.toml | 5 +- src/lib/config.ts | 5 ++ src/lib/providers/index.ts | 3 + src/lib/providers/lmstudio.ts | 101 ++++++++++++++++++++++++++++++++++ 4 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 src/lib/providers/lmstudio.ts diff --git a/sample.config.toml b/sample.config.toml index 980e99d..1db2125 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434 [MODELS.DEEPSEEK] API_KEY = "" +[MODELS.LM_STUDIO] +API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 + [API_ENDPOINTS] -SEARXNG = "" # SearxNG API URL - http://localhost:32768 \ No newline at end of file +SEARXNG = "" # SearxNG API URL - http://localhost:32768 diff --git a/src/lib/config.ts b/src/lib/config.ts index 2831214..7c6d495 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -28,6 +28,9 @@ interface Config { DEEPSEEK: { API_KEY: string; }; + LM_STUDIO: { + API_URL: string; + }; CUSTOM_OPENAI: { API_URL: string; API_KEY: string; @@ -77,6 +80,8 @@ export const getCustomOpenaiApiUrl = () => export const getCustomOpenaiModelName = () => loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME; +export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LM_STUDIO.API_URL; + const mergeConfigs = (current: any, update: any): any => { if (update === null || update === undefined) { return current; diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index eef212f..0a4a6db 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -13,6 +13,7 @@ import { loadAnthropicChatModels } from './anthropic'; import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; import { loadTransformersEmbeddingsModels } from './transformers'; import { loadDeepseekChatModels } from './deepseek'; +import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio'; export interface ChatModel { displayName: string; @@ -34,6 +35,7 @@ export const chatModelProviders: Record< anthropic: loadAnthropicChatModels, gemini: loadGeminiChatModels, deepseek: loadDeepseekChatModels, + lmstudio: loadLMStudioChatModels, }; export const embeddingModelProviders: Record< @@ -44,6 +46,7 @@ export const embeddingModelProviders: Record< ollama: loadOllamaEmbeddingModels, gemini: loadGeminiEmbeddingModels, transformers: loadTransformersEmbeddingsModels, + lmstudio: loadLMStudioEmbeddingsModels, }; export const getAvailableChatModelProviders = async () => { diff --git a/src/lib/providers/lmstudio.ts b/src/lib/providers/lmstudio.ts new file mode 100644 index 0000000..fd8eb75 --- /dev/null +++ b/src/lib/providers/lmstudio.ts @@ -0,0 +1,101 @@ +import { getKeepAlive, getLMStudioApiEndpoint } from '../config'; +import axios from 'axios'; +import { ChatModel, EmbeddingModel } from '.'; +import { ChatOpenAI } from '@langchain/openai'; +import { OpenAIEmbeddings } from '@langchain/openai'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Embeddings } from '@langchain/core/embeddings'; + +interface LMStudioModel { + id: string; + name?: string; +} + +const ensureV1Endpoint = (endpoint: string): string => + endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`; + +const checkServerAvailability = async (endpoint: string): Promise => { + try { + const keepAlive = getKeepAlive(); + await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + timeout: parseInt(keepAlive) * 1000 || 5000, + headers: { 'Content-Type': 'application/json' }, + }); + return true; + } catch { + return false; + } +}; + +export const loadLMStudioChatModels = async () => { + const endpoint = getLMStudioApiEndpoint(); + const keepAlive = getKeepAlive(); + + if (!endpoint) return {}; + if (!await checkServerAvailability(endpoint)) return {}; + + try { + const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + timeout: parseInt(keepAlive) * 1000 || 5000, + headers: { 'Content-Type': 'application/json' }, + }); + + const chatModels: Record = {}; + + response.data.data.forEach((model: LMStudioModel) => { + chatModels[model.id] = { + displayName: model.name || model.id, + model: new ChatOpenAI({ + openAIApiKey: 'lm-studio', + configuration: { + baseURL: ensureV1Endpoint(endpoint), + }, + modelName: model.id, + temperature: 0.7, + streaming: true, + maxRetries: 3 + }) as unknown as BaseChatModel, + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading LM Studio models: ${err}`); + return {}; + } +}; + +export const loadLMStudioEmbeddingsModels = async () => { + const endpoint = getLMStudioApiEndpoint(); + const keepAlive = getKeepAlive(); + + if (!endpoint) return {}; + if (!await checkServerAvailability(endpoint)) return {}; + + try { + const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + timeout: parseInt(keepAlive) * 1000 || 5000, + headers: { 'Content-Type': 'application/json' }, + }); + + const embeddingsModels: Record = {}; + + response.data.data.forEach((model: LMStudioModel) => { + embeddingsModels[model.id] = { + displayName: model.name || model.id, + model: new OpenAIEmbeddings({ + openAIApiKey: 'lm-studio', + configuration: { + baseURL: ensureV1Endpoint(endpoint), + }, + modelName: model.id, + }) as unknown as Embeddings, + }; + }); + + return embeddingsModels; + } catch (err) { + console.error(`Error loading LM Studio embeddings model: ${err}`); + return {}; + } +}; From 7e1dc33a081c563991d2fc084b7b7e3dd143e04e Mon Sep 17 00:00:00 2001 From: haddadrm <121486289+haddadrm@users.noreply.github.com> Date: Tue, 1 Apr 2025 19:26:15 +0400 Subject: [PATCH 2/2] Implement provider formatting improvements and fix client-side compatibility - Add PROVIDER_INFO metadata to each provider file with proper display names - Create centralized PROVIDER_METADATA in index.ts for consistent reference - Update settings UI to use provider metadata for display names - Fix client/server compatibility for Node.js modules in config.ts --- src/app/settings/page.tsx | 11 ++++----- src/lib/config.ts | 41 ++++++++++++++++++++++--------- src/lib/providers/anthropic.ts | 5 ++++ src/lib/providers/deepseek.ts | 5 ++++ src/lib/providers/gemini.ts | 5 ++++ src/lib/providers/groq.ts | 5 ++++ src/lib/providers/index.ts | 31 +++++++++++++++++------ src/lib/providers/lmstudio.ts | 5 ++++ src/lib/providers/ollama.ts | 5 ++++ src/lib/providers/openai.ts | 5 ++++ src/lib/providers/transformers.ts | 5 ++++ 11 files changed, 97 insertions(+), 26 deletions(-) diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 8eee9a4..919304b 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react'; import ThemeSwitcher from '@/components/theme/Switcher'; import { ImagesIcon, VideoIcon } from 'lucide-react'; import Link from 'next/link'; +import { PROVIDER_METADATA } from '@/lib/providers'; interface SettingsType { chatModelProviders: { @@ -547,9 +548,8 @@ const Page = () => { options={Object.keys(config.chatModelProviders).map( (provider) => ({ value: provider, - label: - provider.charAt(0).toUpperCase() + - provider.slice(1), + label: (PROVIDER_METADATA as any)[provider]?.displayName || + provider.charAt(0).toUpperCase() + provider.slice(1), }), )} /> @@ -689,9 +689,8 @@ const Page = () => { options={Object.keys(config.embeddingModelProviders).map( (provider) => ({ value: provider, - label: - provider.charAt(0).toUpperCase() + - provider.slice(1), + label: (PROVIDER_METADATA as any)[provider]?.displayName || + provider.charAt(0).toUpperCase() + provider.slice(1), }), )} /> diff --git a/src/lib/config.ts b/src/lib/config.ts index 7c6d495..e3f2680 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -1,7 +1,14 @@ -import fs from 'fs'; -import path from 'path'; import toml from '@iarna/toml'; +// Use dynamic imports for Node.js modules to prevent client-side errors +let fs: any; +let path: any; +if (typeof window === 'undefined') { + // We're on the server + fs = require('fs'); + path = require('path'); +} + const configFileName = 'config.toml'; interface Config { @@ -46,10 +53,17 @@ type RecursivePartial = { [P in keyof T]?: RecursivePartial; }; -const loadConfig = () => - toml.parse( - fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), - ) as any as Config; +const loadConfig = () => { + // Server-side only + if (typeof window === 'undefined') { + return toml.parse( + fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), + ) as any as Config; + } + + // Client-side fallback - settings will be loaded via API + return {} as Config; +}; export const getSimilarityMeasure = () => loadConfig().GENERAL.SIMILARITY_MEASURE; @@ -114,10 +128,13 @@ const mergeConfigs = (current: any, update: any): any => { }; export const updateConfig = (config: RecursivePartial) => { - const currentConfig = loadConfig(); - const mergedConfig = mergeConfigs(currentConfig, config); - fs.writeFileSync( - path.join(path.join(process.cwd(), `${configFileName}`)), - toml.stringify(mergedConfig), - ); + // Server-side only + if (typeof window === 'undefined') { + const currentConfig = loadConfig(); + const mergedConfig = mergeConfigs(currentConfig, config); + fs.writeFileSync( + path.join(path.join(process.cwd(), `${configFileName}`)), + toml.stringify(mergedConfig), + ); + } }; diff --git a/src/lib/providers/anthropic.ts b/src/lib/providers/anthropic.ts index 7ecde4b..e434b32 100644 --- a/src/lib/providers/anthropic.ts +++ b/src/lib/providers/anthropic.ts @@ -1,6 +1,11 @@ import { ChatAnthropic } from '@langchain/anthropic'; import { ChatModel } from '.'; import { getAnthropicApiKey } from '../config'; + +export const PROVIDER_INFO = { + key: 'anthropic', + displayName: 'Anthropic' +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; const anthropicChatModels: Record[] = [ diff --git a/src/lib/providers/deepseek.ts b/src/lib/providers/deepseek.ts index 88f02ec..b272801 100644 --- a/src/lib/providers/deepseek.ts +++ b/src/lib/providers/deepseek.ts @@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config'; import { ChatModel } from '.'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +export const PROVIDER_INFO = { + key: 'deepseek', + displayName: 'Deepseek AI' +}; + const deepseekChatModels: Record[] = [ { displayName: 'Deepseek Chat (Deepseek V3)', diff --git a/src/lib/providers/gemini.ts b/src/lib/providers/gemini.ts index 2a88015..6af9fb2 100644 --- a/src/lib/providers/gemini.ts +++ b/src/lib/providers/gemini.ts @@ -4,6 +4,11 @@ import { } from '@langchain/google-genai'; import { getGeminiApiKey } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'gemini', + displayName: 'Google Gemini' +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { Embeddings } from '@langchain/core/embeddings'; diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts index 85c75f4..62481d4 100644 --- a/src/lib/providers/groq.ts +++ b/src/lib/providers/groq.ts @@ -1,6 +1,11 @@ import { ChatOpenAI } from '@langchain/openai'; import { getGroqApiKey } from '../config'; import { ChatModel } from '.'; + +export const PROVIDER_INFO = { + key: 'groq', + displayName: 'Groq' +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; const groqChatModels: Record[] = [ diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index 0a4a6db..073bd61 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -1,19 +1,34 @@ import { Embeddings } from '@langchain/core/embeddings'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai'; +import { loadOpenAIChatModels, loadOpenAIEmbeddingModels, PROVIDER_INFO as OpenAIInfo, PROVIDER_INFO } from './openai'; import { getCustomOpenaiApiKey, getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '../config'; import { ChatOpenAI } from '@langchain/openai'; -import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama'; -import { loadGroqChatModels } from './groq'; -import { loadAnthropicChatModels } from './anthropic'; -import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; -import { loadTransformersEmbeddingsModels } from './transformers'; -import { loadDeepseekChatModels } from './deepseek'; -import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio'; +import { loadOllamaChatModels, loadOllamaEmbeddingModels, PROVIDER_INFO as OllamaInfo } from './ollama'; +import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq'; +import { loadAnthropicChatModels, PROVIDER_INFO as AnthropicInfo } from './anthropic'; +import { loadGeminiChatModels, loadGeminiEmbeddingModels, PROVIDER_INFO as GeminiInfo } from './gemini'; +import { loadTransformersEmbeddingsModels, PROVIDER_INFO as TransformersInfo } from './transformers'; +import { loadDeepseekChatModels, PROVIDER_INFO as DeepseekInfo } from './deepseek'; +import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels, PROVIDER_INFO as LMStudioInfo } from './lmstudio'; + +export const PROVIDER_METADATA = { + openai: OpenAIInfo, + ollama: OllamaInfo, + groq: GroqInfo, + anthropic: AnthropicInfo, + gemini: GeminiInfo, + transformers: TransformersInfo, + deepseek: DeepseekInfo, + lmstudio: LMStudioInfo, + custom_openai: { + key: 'custom_openai', + displayName: 'Custom OpenAI' + } +}; export interface ChatModel { displayName: string; diff --git a/src/lib/providers/lmstudio.ts b/src/lib/providers/lmstudio.ts index fd8eb75..f7be638 100644 --- a/src/lib/providers/lmstudio.ts +++ b/src/lib/providers/lmstudio.ts @@ -1,6 +1,11 @@ import { getKeepAlive, getLMStudioApiEndpoint } from '../config'; import axios from 'axios'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'lmstudio', + displayName: 'LM Studio' +}; import { ChatOpenAI } from '@langchain/openai'; import { OpenAIEmbeddings } from '@langchain/openai'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts index 92e98e4..beab58f 100644 --- a/src/lib/providers/ollama.ts +++ b/src/lib/providers/ollama.ts @@ -1,6 +1,11 @@ import axios from 'axios'; import { getKeepAlive, getOllamaApiEndpoint } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'ollama', + displayName: 'Ollama' +}; import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts index 01bacc6..36f7e29 100644 --- a/src/lib/providers/openai.ts +++ b/src/lib/providers/openai.ts @@ -1,6 +1,11 @@ import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { getOpenaiApiKey } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'openai', + displayName: 'OpenAI' +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { Embeddings } from '@langchain/core/embeddings'; diff --git a/src/lib/providers/transformers.ts b/src/lib/providers/transformers.ts index a06dd12..fd7cb9e 100644 --- a/src/lib/providers/transformers.ts +++ b/src/lib/providers/transformers.ts @@ -1,5 +1,10 @@ import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; +export const PROVIDER_INFO = { + key: 'transformers', + displayName: 'Hugging Face' +}; + export const loadTransformersEmbeddingsModels = async () => { try { const embeddingModels = {