diff --git a/sample.config.toml b/sample.config.toml
index 980e99d..1db2125 100644
--- a/sample.config.toml
+++ b/sample.config.toml
@@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK]
API_KEY = ""
+[MODELS.LM_STUDIO]
+API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
+
[API_ENDPOINTS]
-SEARXNG = "" # SearxNG API URL - http://localhost:32768
\ No newline at end of file
+SEARXNG = "" # SearxNG API URL - http://localhost:32768
diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts
index 39c1f84..c1e5bbd 100644
--- a/src/app/api/config/route.ts
+++ b/src/app/api/config/route.ts
@@ -8,6 +8,7 @@ import {
getOllamaApiEndpoint,
getOpenaiApiKey,
getDeepseekApiKey,
+ getLMStudioApiEndpoint,
updateConfig,
} from '@/lib/config';
import {
@@ -51,6 +52,7 @@ export const GET = async (req: Request) => {
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
+ config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
@@ -93,6 +95,9 @@ export const POST = async (req: Request) => {
DEEPSEEK: {
API_KEY: config.deepseekApiKey,
},
+ LM_STUDIO: {
+ API_URL: config.lmStudioApiUrl,
+ },
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx
index 8eee9a4..05338c3 100644
--- a/src/app/settings/page.tsx
+++ b/src/app/settings/page.tsx
@@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
import ThemeSwitcher from '@/components/theme/Switcher';
import { ImagesIcon, VideoIcon } from 'lucide-react';
import Link from 'next/link';
+import { PROVIDER_METADATA } from '@/lib/providers';
interface SettingsType {
chatModelProviders: {
@@ -20,6 +21,7 @@ interface SettingsType {
anthropicApiKey: string;
geminiApiKey: string;
ollamaApiUrl: string;
+ lmStudioApiUrl: string;
deepseekApiKey: string;
customOpenaiApiKey: string;
customOpenaiApiUrl: string;
@@ -548,8 +550,9 @@ const Page = () => {
(provider) => ({
value: provider,
label:
+ (PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() +
- provider.slice(1),
+ provider.slice(1),
}),
)}
/>
@@ -690,8 +693,9 @@ const Page = () => {
(provider) => ({
value: provider,
label:
+ (PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() +
- provider.slice(1),
+ provider.slice(1),
}),
)}
/>
@@ -858,6 +862,25 @@ const Page = () => {
onSave={(value) => saveConfig('deepseekApiKey', value)}
/>
+
+
+
+ LM Studio API URL
+
+
{
+ setConfig((prev) => ({
+ ...prev!,
+ lmStudioApiUrl: e.target.value,
+ }));
+ }}
+ onSave={(value) => saveConfig('lmStudioApiUrl', value)}
+ />
+
diff --git a/src/lib/config.ts b/src/lib/config.ts
index 2831214..78ad09c 100644
--- a/src/lib/config.ts
+++ b/src/lib/config.ts
@@ -1,7 +1,14 @@
-import fs from 'fs';
-import path from 'path';
import toml from '@iarna/toml';
+// Use dynamic imports for Node.js modules to prevent client-side errors
+let fs: any;
+let path: any;
+if (typeof window === 'undefined') {
+ // We're on the server
+ fs = require('fs');
+ path = require('path');
+}
+
const configFileName = 'config.toml';
interface Config {
@@ -28,6 +35,9 @@ interface Config {
DEEPSEEK: {
API_KEY: string;
};
+ LM_STUDIO: {
+ API_URL: string;
+ };
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
@@ -43,10 +53,17 @@ type RecursivePartial = {
[P in keyof T]?: RecursivePartial;
};
-const loadConfig = () =>
- toml.parse(
- fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
- ) as any as Config;
+const loadConfig = () => {
+ // Server-side only
+ if (typeof window === 'undefined') {
+ return toml.parse(
+ fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
+ ) as any as Config;
+ }
+
+ // Client-side fallback - settings will be loaded via API
+ return {} as Config;
+};
export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE;
@@ -77,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
+export const getLMStudioApiEndpoint = () =>
+ loadConfig().MODELS.LM_STUDIO.API_URL;
+
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
@@ -109,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
};
export const updateConfig = (config: RecursivePartial) => {
- const currentConfig = loadConfig();
- const mergedConfig = mergeConfigs(currentConfig, config);
- fs.writeFileSync(
- path.join(path.join(process.cwd(), `${configFileName}`)),
- toml.stringify(mergedConfig),
- );
+ // Server-side only
+ if (typeof window === 'undefined') {
+ const currentConfig = loadConfig();
+ const mergedConfig = mergeConfigs(currentConfig, config);
+ fs.writeFileSync(
+ path.join(path.join(process.cwd(), `${configFileName}`)),
+ toml.stringify(mergedConfig),
+ );
+ }
};
diff --git a/src/lib/providers/anthropic.ts b/src/lib/providers/anthropic.ts
index 7ecde4b..2b0f2cc 100644
--- a/src/lib/providers/anthropic.ts
+++ b/src/lib/providers/anthropic.ts
@@ -1,6 +1,11 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config';
+
+export const PROVIDER_INFO = {
+ key: 'anthropic',
+ displayName: 'Anthropic',
+};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record[] = [
diff --git a/src/lib/providers/deepseek.ts b/src/lib/providers/deepseek.ts
index 88f02ec..46f2398 100644
--- a/src/lib/providers/deepseek.ts
+++ b/src/lib/providers/deepseek.ts
@@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
+export const PROVIDER_INFO = {
+ key: 'deepseek',
+ displayName: 'Deepseek AI',
+};
+
const deepseekChatModels: Record[] = [
{
displayName: 'Deepseek Chat (Deepseek V3)',
diff --git a/src/lib/providers/gemini.ts b/src/lib/providers/gemini.ts
index 2a88015..6cf2243 100644
--- a/src/lib/providers/gemini.ts
+++ b/src/lib/providers/gemini.ts
@@ -4,6 +4,11 @@ import {
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
+
+export const PROVIDER_INFO = {
+ key: 'gemini',
+ displayName: 'Google Gemini',
+};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts
index 85c75f4..4b0ca92 100644
--- a/src/lib/providers/groq.ts
+++ b/src/lib/providers/groq.ts
@@ -1,6 +1,11 @@
import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../config';
import { ChatModel } from '.';
+
+export const PROVIDER_INFO = {
+ key: 'groq',
+ displayName: 'Groq',
+};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record[] = [
diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts
index eef212f..e536431 100644
--- a/src/lib/providers/index.ts
+++ b/src/lib/providers/index.ts
@@ -1,18 +1,60 @@
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
-import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
+import {
+ loadOpenAIChatModels,
+ loadOpenAIEmbeddingModels,
+ PROVIDER_INFO as OpenAIInfo,
+ PROVIDER_INFO,
+} from './openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
import { ChatOpenAI } from '@langchain/openai';
-import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
-import { loadGroqChatModels } from './groq';
-import { loadAnthropicChatModels } from './anthropic';
-import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
-import { loadTransformersEmbeddingsModels } from './transformers';
-import { loadDeepseekChatModels } from './deepseek';
+import {
+ loadOllamaChatModels,
+ loadOllamaEmbeddingModels,
+ PROVIDER_INFO as OllamaInfo,
+} from './ollama';
+import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
+import {
+ loadAnthropicChatModels,
+ PROVIDER_INFO as AnthropicInfo,
+} from './anthropic';
+import {
+ loadGeminiChatModels,
+ loadGeminiEmbeddingModels,
+ PROVIDER_INFO as GeminiInfo,
+} from './gemini';
+import {
+ loadTransformersEmbeddingsModels,
+ PROVIDER_INFO as TransformersInfo,
+} from './transformers';
+import {
+ loadDeepseekChatModels,
+ PROVIDER_INFO as DeepseekInfo,
+} from './deepseek';
+import {
+ loadLMStudioChatModels,
+ loadLMStudioEmbeddingsModels,
+ PROVIDER_INFO as LMStudioInfo,
+} from './lmstudio';
+
+export const PROVIDER_METADATA = {
+ openai: OpenAIInfo,
+ ollama: OllamaInfo,
+ groq: GroqInfo,
+ anthropic: AnthropicInfo,
+ gemini: GeminiInfo,
+ transformers: TransformersInfo,
+ deepseek: DeepseekInfo,
+ lmstudio: LMStudioInfo,
+ custom_openai: {
+ key: 'custom_openai',
+ displayName: 'Custom OpenAI',
+ },
+};
export interface ChatModel {
displayName: string;
@@ -34,6 +76,7 @@ export const chatModelProviders: Record<
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels,
+ lmstudio: loadLMStudioChatModels,
};
export const embeddingModelProviders: Record<
@@ -44,6 +87,7 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels,
+ lmstudio: loadLMStudioEmbeddingsModels,
};
export const getAvailableChatModelProviders = async () => {
diff --git a/src/lib/providers/lmstudio.ts b/src/lib/providers/lmstudio.ts
new file mode 100644
index 0000000..811208f
--- /dev/null
+++ b/src/lib/providers/lmstudio.ts
@@ -0,0 +1,100 @@
+import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
+import axios from 'axios';
+import { ChatModel, EmbeddingModel } from '.';
+
+export const PROVIDER_INFO = {
+ key: 'lmstudio',
+ displayName: 'LM Studio',
+};
+import { ChatOpenAI } from '@langchain/openai';
+import { OpenAIEmbeddings } from '@langchain/openai';
+import { BaseChatModel } from '@langchain/core/language_models/chat_models';
+import { Embeddings } from '@langchain/core/embeddings';
+
+interface LMStudioModel {
+ id: string;
+ name?: string;
+}
+
+const ensureV1Endpoint = (endpoint: string): string =>
+ endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
+
+const checkServerAvailability = async (endpoint: string): Promise => {
+ try {
+ await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
+ headers: { 'Content-Type': 'application/json' },
+ });
+ return true;
+ } catch {
+ return false;
+ }
+};
+
+export const loadLMStudioChatModels = async () => {
+ const endpoint = getLMStudioApiEndpoint();
+
+ if (!endpoint) return {};
+ if (!(await checkServerAvailability(endpoint))) return {};
+
+ try {
+ const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
+ headers: { 'Content-Type': 'application/json' },
+ });
+
+ const chatModels: Record = {};
+
+ response.data.data.forEach((model: LMStudioModel) => {
+ chatModels[model.id] = {
+ displayName: model.name || model.id,
+ model: new ChatOpenAI({
+ openAIApiKey: 'lm-studio',
+ configuration: {
+ baseURL: ensureV1Endpoint(endpoint),
+ },
+ modelName: model.id,
+ temperature: 0.7,
+ streaming: true,
+ maxRetries: 3,
+ }) as unknown as BaseChatModel,
+ };
+ });
+
+ return chatModels;
+ } catch (err) {
+ console.error(`Error loading LM Studio models: ${err}`);
+ return {};
+ }
+};
+
+export const loadLMStudioEmbeddingsModels = async () => {
+ const endpoint = getLMStudioApiEndpoint();
+
+ if (!endpoint) return {};
+ if (!(await checkServerAvailability(endpoint))) return {};
+
+ try {
+ const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
+ headers: { 'Content-Type': 'application/json' },
+ });
+
+ const embeddingsModels: Record = {};
+
+ response.data.data.forEach((model: LMStudioModel) => {
+ embeddingsModels[model.id] = {
+ displayName: model.name || model.id,
+ model: new OpenAIEmbeddings({
+ openAIApiKey: 'lm-studio',
+ configuration: {
+ baseURL: ensureV1Endpoint(endpoint),
+ },
+ modelName: model.id,
+ }) as unknown as Embeddings,
+ };
+ });
+
+ return embeddingsModels;
+ } catch (err) {
+ console.error(`Error loading LM Studio embeddings model: ${err}`);
+ return {};
+ }
+};
diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts
index 92e98e4..cca2142 100644
--- a/src/lib/providers/ollama.ts
+++ b/src/lib/providers/ollama.ts
@@ -1,6 +1,11 @@
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
+
+export const PROVIDER_INFO = {
+ key: 'ollama',
+ displayName: 'Ollama',
+};
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts
index 01bacc6..61621c3 100644
--- a/src/lib/providers/openai.ts
+++ b/src/lib/providers/openai.ts
@@ -1,6 +1,11 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
+
+export const PROVIDER_INFO = {
+ key: 'openai',
+ displayName: 'OpenAI',
+};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
diff --git a/src/lib/providers/transformers.ts b/src/lib/providers/transformers.ts
index a06dd12..3098d9f 100644
--- a/src/lib/providers/transformers.ts
+++ b/src/lib/providers/transformers.ts
@@ -1,5 +1,10 @@
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
+export const PROVIDER_INFO = {
+ key: 'transformers',
+ displayName: 'Hugging Face',
+};
+
export const loadTransformersEmbeddingsModels = async () => {
try {
const embeddingModels = {