From 5220abae05e26be1b8dc7e8606c0285e5e96cee6 Mon Sep 17 00:00:00 2001
From: haddadrm <121486289+haddadrm@users.noreply.github.com>
Date: Sun, 16 Feb 2025 02:16:37 +0400
Subject: [PATCH] LM Studio Integration:
- Added LM Studio provider with OpenAI-compatible API support
- Dynamic model discovery via /v1/models endpoint
- Support for both chat and embeddings models
- Docker-compatible networking configuration
- Thinking Model Panel:
Added collapsible UI panel for model's chain of thought
-Parses responses with tags to separate reasoning
-Maintains backward compatibility with regular responses
-Styled consistently with app theme for light/dark modes
-Preserves all existing message functionality (sources, markdown, etc.)
These improvements enhance the app's compatibility with local LLMs and provide better visibility into model reasoning processes while maintaining existing functionality.
---
sample.config.toml | 12 ++-
src/config.ts | 19 +++--
src/routes/config.ts | 8 +-
ui/components/MessageBox.tsx | 159 ++++++++++++++++++++---------------
ui/tsconfig.json | 1 +
5 files changed, 117 insertions(+), 82 deletions(-)
diff --git a/sample.config.toml b/sample.config.toml
index 7b09d67..01419d3 100644
--- a/sample.config.toml
+++ b/sample.config.toml
@@ -15,12 +15,16 @@ API_KEY = ""
[MODELS.GEMINI]
API_KEY = ""
-[MODELS.CUSTOM_OPENAI]
-API_KEY = ""
-API_URL = ""
-
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
+[MODELS.LMSTUDIO]
+API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234/v1
+
+[MODELS.CUSTOM_OPENAI]
+API_KEY = ""
+API_URL = ""
+MODEL_NAME = ""
+
[API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL
\ No newline at end of file
diff --git a/src/config.ts b/src/config.ts
index ebdd0e1..f866340 100644
--- a/src/config.ts
+++ b/src/config.ts
@@ -26,15 +26,16 @@ interface Config {
OLLAMA: {
API_URL: string;
};
+ LMSTUDIO: {
+ API_URL: string;
+ };
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
MODEL_NAME: string;
};
};
- API_ENDPOINTS: {
- OLLAMA: string;
- LMSTUDIO: string;
+ API_ENDPOINTS: {
SEARXNG: string;
};
}
@@ -55,20 +56,24 @@ export const getSimilarityMeasure = () =>
export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE;
+export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY;
export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY;
+export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY;
export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY;
+export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
+export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
-export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
+export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
-export const getLMStudioApiEndpoint = () => loadConfig().API_ENDPOINTS.LMSTUDIO;
+export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LMSTUDIO.API_URL;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@@ -80,10 +85,6 @@ export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
const mergeConfigs = (current: any, update: any): any => {
- if (update === null || update === undefined) {
- return current;
- }
-
if (typeof current !== 'object' || current === null) {
return update;
}
diff --git a/src/routes/config.ts b/src/routes/config.ts
index c56123a..454693e 100644
--- a/src/routes/config.ts
+++ b/src/routes/config.ts
@@ -14,6 +14,9 @@ import {
getCustomOpenaiApiUrl,
getCustomOpenaiApiKey,
getCustomOpenaiModelName,
+ getCustomOpenaiApiUrl,
+ getCustomOpenaiApiKey,
+ getCustomOpenaiModelName,
} from '../config';
import logger from '../utils/logger';
@@ -61,7 +64,7 @@ router.get('/', async (_, res) => {
config['geminiApiKey'] = getGeminiApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
- config['customOpenaiModelName'] = getCustomOpenaiModelName();
+ config['customOpenaiModelName'] = getCustomOpenaiModelName()
res.status(200).json(config);
} catch (err: any) {
@@ -90,6 +93,9 @@ router.post('/', async (req, res) => {
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
+ LMSTUDIO: {
+ API_URL: config.lmStudioApiUrl,
+ },
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
diff --git a/ui/components/MessageBox.tsx b/ui/components/MessageBox.tsx
index e32ee0e..c31ac65 100644
--- a/ui/components/MessageBox.tsx
+++ b/ui/components/MessageBox.tsx
@@ -49,27 +49,37 @@ const MessageBox = ({
useEffect(() => {
const regex = /\[(\d+)\]/g;
+ const thinkRegex = /
+ {content} +
+