From d8486e90bb81604c4d37ff7d0496e301f2b5a9d5 Mon Sep 17 00:00:00 2001 From: skoved Date: Wed, 27 Aug 2025 09:43:09 -0400 Subject: [PATCH 1/7] make file icon in attachment modal in chat page fit light theme better make the file icon in the attachment modal for the chat page an off-white background so that it matches the light theme better and looks the same as the attachment modal on the home page --- src/components/MessageInputActions/AttachSmall.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/components/MessageInputActions/AttachSmall.tsx b/src/components/MessageInputActions/AttachSmall.tsx index 834b3f4..fd7e35c 100644 --- a/src/components/MessageInputActions/AttachSmall.tsx +++ b/src/components/MessageInputActions/AttachSmall.tsx @@ -107,8 +107,8 @@ const AttachSmall = () => { key={i} className="flex flex-row items-center justify-start w-full space-x-3 p-3" > -
- +
+

{file.fileName.length > 25 From 328b12ffbe1077d5ec7e5d5bbd85bc034926470d Mon Sep 17 00:00:00 2001 From: akubesti Date: Thu, 11 Sep 2025 16:38:01 +0700 Subject: [PATCH 2/7] feat: add new OpenAI models with proper temperature parameter handling - Add GPT 4.1 series and o1/o3/o4 models with temperature compatibility fixes - Remove gpt-5/gpt-5-mini models due to organization verification restrictions - Fix 400 errors for models that only support default temperature values --- src/lib/providers/index.ts | 6 +++++- src/lib/providers/openai.ts | 40 ++++++++++++++++++++++++++++--------- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index 1b6bb2f..3b3815f 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -120,7 +120,11 @@ export const getAvailableChatModelProviders = async () => { model: new ChatOpenAI({ apiKey: customOpenAiApiKey, modelName: customOpenAiModelName, - temperature: 0.7, + ...((() => { + const temperatureRestrictedModels = ['gpt-5-nano', 'o1', 'o3-mini', 'o4-mini']; + const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => customOpenAiModelName.includes(restrictedModel)); + return isTemperatureRestricted ? {} : { temperature: 0.7 }; + })()), configuration: { baseURL: customOpenAiApiUrl, }, diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts index 7e26763..c15b35b 100644 --- a/src/lib/providers/openai.ts +++ b/src/lib/providers/openai.ts @@ -26,6 +26,10 @@ const openaiChatModels: Record[] = [ displayName: 'GPT-4 omni', key: 'gpt-4o', }, + { + displayName: 'GPT-4o (2024-05-13)', + key: 'gpt-4o-2024-05-13', + }, { displayName: 'GPT-4 omni mini', key: 'gpt-4o-mini', @@ -47,12 +51,20 @@ const openaiChatModels: Record[] = [ key: 'gpt-5-nano', }, { - displayName: 'GPT 5 mini', - key: 'gpt-5-mini', + displayName: 'GPT 5 Chat Latest', + key: 'gpt-5-chat-latest', }, { - displayName: 'GPT 5', - key: 'gpt-5', + displayName: 'o1', + key: 'o1', + }, + { + displayName: 'o3 Mini', + key: 'o3-mini', + }, + { + displayName: 'o4 Mini', + key: 'o4-mini', }, ]; @@ -76,13 +88,23 @@ export const loadOpenAIChatModels = async () => { const chatModels: Record = {}; openaiChatModels.forEach((model) => { + // Models that only support temperature = 1 + const temperatureRestrictedModels = ['gpt-5-nano', 'o1', 'o3-mini', 'o4-mini']; + const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => model.key.includes(restrictedModel)); + + const modelConfig: any = { + apiKey: openaiApiKey, + modelName: model.key, + }; + + // Only add temperature if the model supports it + if (!isTemperatureRestricted) { + modelConfig.temperature = 0.7; + } + chatModels[model.key] = { displayName: model.displayName, - model: new ChatOpenAI({ - apiKey: openaiApiKey, - modelName: model.key, - temperature: model.key.includes('gpt-5') ? 1 : 0.7, - }) as unknown as BaseChatModel, + model: new ChatOpenAI(modelConfig) as unknown as BaseChatModel, }; }); From 6f367c34a89fd7523b568ef9b994ae3fbc6ccc26 Mon Sep 17 00:00:00 2001 From: akubesti Date: Fri, 12 Sep 2025 22:22:16 +0700 Subject: [PATCH 3/7] feat: add gpt-5, gpt-5-mini, o3 models and remove gpt-5-chat-latest - Add new OpenAI models: gpt-5, gpt-5-mini, and o3 series - Fix temperature parameter handling for o3 models - Update models list to ensure compatibility --- src/lib/providers/index.ts | 2 +- src/lib/providers/openai.ts | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index 3b3815f..00ba60f 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -121,7 +121,7 @@ export const getAvailableChatModelProviders = async () => { apiKey: customOpenAiApiKey, modelName: customOpenAiModelName, ...((() => { - const temperatureRestrictedModels = ['gpt-5-nano', 'o1', 'o3-mini', 'o4-mini']; + const temperatureRestrictedModels = ['gpt-5-nano','gpt-5','gpt-5-mini','o1', 'o3', 'o3-mini', 'o4-mini']; const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => customOpenAiModelName.includes(restrictedModel)); return isTemperatureRestricted ? {} : { temperature: 0.7 }; })()), diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts index c15b35b..828f0d8 100644 --- a/src/lib/providers/openai.ts +++ b/src/lib/providers/openai.ts @@ -51,13 +51,21 @@ const openaiChatModels: Record[] = [ key: 'gpt-5-nano', }, { - displayName: 'GPT 5 Chat Latest', - key: 'gpt-5-chat-latest', + displayName: 'GPT 5', + key: 'gpt-5', + }, + { + displayName: 'GPT 5 Mini', + key: 'gpt-5-mini', }, { displayName: 'o1', key: 'o1', }, + { + displayName: 'o3', + key: 'o3', + }, { displayName: 'o3 Mini', key: 'o3-mini', @@ -89,7 +97,7 @@ export const loadOpenAIChatModels = async () => { openaiChatModels.forEach((model) => { // Models that only support temperature = 1 - const temperatureRestrictedModels = ['gpt-5-nano', 'o1', 'o3-mini', 'o4-mini']; + const temperatureRestrictedModels = ['gpt-5-nano','gpt-5','gpt-5-mini','o1', 'o3', 'o3-mini', 'o4-mini']; const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => model.key.includes(restrictedModel)); const modelConfig: any = { From e705952503141def16ab8f58a393381c9f07e100 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Valentinas=20=C4=8Cirba?= Date: Wed, 17 Sep 2025 18:16:35 +0300 Subject: [PATCH 4/7] Add deployment badge for Hostinger --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index fa63ff1..63ee445 100644 --- a/README.md +++ b/README.md @@ -174,6 +174,8 @@ Perplexica runs on Next.js and handles all API requests. It works right away on [![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica) [![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267) [![Run on ClawCloud](https://raw.githubusercontent.com/ClawCloud/Run-Template/refs/heads/main/Run-on-ClawCloud.svg)](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica) +[![Deploy on Hostinger](https://assets.hostinger.com/vps/deploy.svg)](https://www.hostinger.com/vps/docker-hosting?compose_url=https://raw.githubusercontent.com/ItzCrazyKns/Perplexica/refs/heads/master/docker-compose.yaml) + ## Upcoming Features From 5b5e83a3a00e01936a025b8df40af0b35975db7d Mon Sep 17 00:00:00 2001 From: Ramakrishnan Sivakumar Date: Wed, 17 Sep 2025 12:28:02 -0700 Subject: [PATCH 5/7] Add lemonade integration --- README.md | 23 ++++++++- sample.config.toml | 4 ++ src/app/api/config/route.ts | 8 +++ src/app/settings/page.tsx | 44 ++++++++++++++++ src/lib/config.ts | 9 ++++ src/lib/providers/index.ts | 8 +++ src/lib/providers/lemonade.ts | 94 +++++++++++++++++++++++++++++++++++ 7 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 src/lib/providers/lemonade.ts diff --git a/README.md b/README.md index fa63ff1..3a8b4c6 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ - [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended) - [Non-Docker Installation](#non-docker-installation) - [Ollama Connection Errors](#ollama-connection-errors) + - [Lemonade Connection Errors](#lemonade-connection-errors) - [Using as a Search Engine](#using-as-a-search-engine) - [Using Perplexica's API](#using-perplexicas-api) - [Expose Perplexica to a network](#expose-perplexica-to-network) @@ -53,7 +54,7 @@ Want to know more about its architecture and how it works? You can read it [here ## Features -- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, and Mistral. +- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, Mistral, and Lemonade. - **Two Main Modes:** - **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page. - **Normal Mode:** Processes your query and performs a web search. @@ -89,6 +90,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. - `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**. - `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. + - `LEMONADE`: Your Lemonade API URL. Since Lemonade runs directly on your local machine (not in Docker), you should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Lemonade on port 8000, use `http://host.docker.internal:8000`. For other ports, adjust accordingly. **You need to fill this if you wish to use Lemonade's models**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**. @@ -150,6 +152,25 @@ If you're encountering an Ollama connection error, it is likely due to the backe - Ensure that the port (default is 11434) is not blocked by your firewall. +#### Lemonade Connection Errors + +If you're encountering a Lemonade connection error, it is likely due to the backend being unable to connect to Lemonade's API. To fix this issue you can: + +1. **Check your Lemonade API URL:** Ensure that the API URL is correctly set in the settings menu. +2. **Update API URL Based on OS:** + + - **Windows:** Use `http://host.docker.internal:8000` + - **Mac:** Use `http://host.docker.internal:8000` + - **Linux:** Use `http://:8000` + + Adjust the port number if you're using a different one. + +3. **Ensure Lemonade Server is Running:** + + - Make sure your Lemonade server is running and accessible on the configured port (default is 8000). + - Verify that Lemonade is configured to accept connections from all interfaces (`0.0.0.0`), not just localhost (`127.0.0.1`). + - Ensure that the port (default is 8000) is not blocked by your firewall. + ## Using as a Search Engine If you wish to use Perplexica as an alternative to traditional search engines like Google or Bing, or if you want to add a shortcut for quick access from your browser's search bar, follow these steps: diff --git a/sample.config.toml b/sample.config.toml index ba3e98e..be0573e 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -31,5 +31,9 @@ API_KEY = "" # Required to use AI/ML API chat and embedding models [MODELS.LM_STUDIO] API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 +[MODELS.LEMONADE] +API_URL = "" # Lemonade API URL - http://localhost:8000 +API_KEY = "" # Optional API key for Lemonade + [API_ENDPOINTS] SEARXNG = "" # SearxNG API URL - http://localhost:32768 diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index f117cce..5f66fdf 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -10,6 +10,8 @@ import { getDeepseekApiKey, getAimlApiKey, getLMStudioApiEndpoint, + getLemonadeApiEndpoint, + getLemonadeApiKey, updateConfig, getOllamaApiKey, } from '@/lib/config'; @@ -56,6 +58,8 @@ export const GET = async (req: Request) => { config['ollamaApiUrl'] = getOllamaApiEndpoint(); config['ollamaApiKey'] = getOllamaApiKey(); config['lmStudioApiUrl'] = getLMStudioApiEndpoint(); + config['lemonadeApiUrl'] = getLemonadeApiEndpoint(); + config['lemonadeApiKey'] = getLemonadeApiKey(); config['anthropicApiKey'] = getAnthropicApiKey(); config['groqApiKey'] = getGroqApiKey(); config['geminiApiKey'] = getGeminiApiKey(); @@ -106,6 +110,10 @@ export const POST = async (req: Request) => { LM_STUDIO: { API_URL: config.lmStudioApiUrl, }, + LEMONADE: { + API_URL: config.lemonadeApiUrl, + API_KEY: config.lemonadeApiKey, + }, CUSTOM_OPENAI: { API_URL: config.customOpenaiApiUrl, API_KEY: config.customOpenaiApiKey, diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 6fb8255..1af53f9 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -23,6 +23,8 @@ interface SettingsType { ollamaApiUrl: string; ollamaApiKey: string; lmStudioApiUrl: string; + lemonadeApiUrl: string; + lemonadeApiKey: string; deepseekApiKey: string; aimlApiKey: string; customOpenaiApiKey: string; @@ -953,6 +955,48 @@ const Page = () => {

+ + +
+
+

+ Lemonade API URL +

+ { + setConfig((prev) => ({ + ...prev!, + lemonadeApiUrl: e.target.value, + })); + }} + onSave={(value) => saveConfig('lemonadeApiUrl', value)} + /> +
+ +
+

+ Lemonade API Key (Optional) +

+ { + setConfig((prev) => ({ + ...prev!, + lemonadeApiKey: e.target.value, + })); + }} + onSave={(value) => saveConfig('lemonadeApiKey', value)} + /> +
+
+
) )} diff --git a/src/lib/config.ts b/src/lib/config.ts index 79d69dc..b79ec94 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -42,6 +42,10 @@ interface Config { LM_STUDIO: { API_URL: string; }; + LEMONADE: { + API_URL: string; + API_KEY: string; + }; CUSTOM_OPENAI: { API_URL: string; API_KEY: string; @@ -105,6 +109,11 @@ export const getCustomOpenaiModelName = () => export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LM_STUDIO.API_URL; +export const getLemonadeApiEndpoint = () => + loadConfig().MODELS.LEMONADE.API_URL; + +export const getLemonadeApiKey = () => loadConfig().MODELS.LEMONADE.API_KEY; + const mergeConfigs = (current: any, update: any): any => { if (update === null || update === undefined) { return current; diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index 00ba60f..4cb3fe7 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -45,6 +45,11 @@ import { loadLMStudioEmbeddingsModels, PROVIDER_INFO as LMStudioInfo, } from './lmstudio'; +import { + loadLemonadeChatModels, + loadLemonadeEmbeddingModels, + PROVIDER_INFO as LemonadeInfo, +} from './lemonade'; export const PROVIDER_METADATA = { openai: OpenAIInfo, @@ -56,6 +61,7 @@ export const PROVIDER_METADATA = { deepseek: DeepseekInfo, aimlapi: AimlApiInfo, lmstudio: LMStudioInfo, + lemonade: LemonadeInfo, custom_openai: { key: 'custom_openai', displayName: 'Custom OpenAI', @@ -84,6 +90,7 @@ export const chatModelProviders: Record< deepseek: loadDeepseekChatModels, aimlapi: loadAimlApiChatModels, lmstudio: loadLMStudioChatModels, + lemonade: loadLemonadeChatModels, }; export const embeddingModelProviders: Record< @@ -96,6 +103,7 @@ export const embeddingModelProviders: Record< transformers: loadTransformersEmbeddingsModels, aimlapi: loadAimlApiEmbeddingModels, lmstudio: loadLMStudioEmbeddingsModels, + lemonade: loadLemonadeEmbeddingModels, }; export const getAvailableChatModelProviders = async () => { diff --git a/src/lib/providers/lemonade.ts b/src/lib/providers/lemonade.ts new file mode 100644 index 0000000..7d34552 --- /dev/null +++ b/src/lib/providers/lemonade.ts @@ -0,0 +1,94 @@ +import axios from 'axios'; +import { getLemonadeApiEndpoint, getLemonadeApiKey } from '../config'; +import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'lemonade', + displayName: 'Lemonade', +}; + +import { ChatOpenAI } from '@langchain/openai'; +import { OpenAIEmbeddings } from '@langchain/openai'; + +export const loadLemonadeChatModels = async () => { + const lemonadeApiEndpoint = getLemonadeApiEndpoint(); + const lemonadeApiKey = getLemonadeApiKey(); + + if (!lemonadeApiEndpoint) return {}; + + try { + const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, { + headers: { + 'Content-Type': 'application/json', + ...(lemonadeApiKey + ? { Authorization: `Bearer ${lemonadeApiKey}` } + : {}), + }, + }); + + const { data: models } = res.data; + + const chatModels: Record = {}; + + models.forEach((model: any) => { + chatModels[model.id] = { + displayName: model.id, + model: new ChatOpenAI({ + apiKey: lemonadeApiKey || 'lemonade-key', + modelName: model.id, + temperature: 0.7, + configuration: { + baseURL: `${lemonadeApiEndpoint}/api/v1`, + }, + }), + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading Lemonade models: ${err}`); + return {}; + } +}; + +export const loadLemonadeEmbeddingModels = async () => { + const lemonadeApiEndpoint = getLemonadeApiEndpoint(); + const lemonadeApiKey = getLemonadeApiKey(); + + if (!lemonadeApiEndpoint) return {}; + + try { + const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, { + headers: { + 'Content-Type': 'application/json', + ...(lemonadeApiKey + ? { Authorization: `Bearer ${lemonadeApiKey}` } + : {}), + }, + }); + + const { data: models } = res.data; + + const embeddingModels: Record = {}; + + // Filter models that support embeddings (if Lemonade provides this info) + // For now, we'll assume all models can be used for embeddings + models.forEach((model: any) => { + embeddingModels[model.id] = { + displayName: model.id, + model: new OpenAIEmbeddings({ + apiKey: lemonadeApiKey || 'lemonade-key', + modelName: model.id, + configuration: { + baseURL: `${lemonadeApiEndpoint}/api/v1`, + }, + }), + }; + }); + + return embeddingModels; + } catch (err) { + console.error(`Error loading Lemonade embedding models: ${err}`); + return {}; + } +}; \ No newline at end of file From 5b12e9933570e4d1c1bd5aa7169fc28aafaf30f9 Mon Sep 17 00:00:00 2001 From: Ramakrishnan Sivakumar Date: Wed, 17 Sep 2025 12:53:11 -0700 Subject: [PATCH 6/7] update docs --- README.md | 3 +-- sample.config.toml | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3a8b4c6..bde16cd 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,6 @@ Want to know more about its architecture and how it works? You can read it [here ## Features -- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, Mistral, and Lemonade. - **Two Main Modes:** - **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page. - **Normal Mode:** Processes your query and performs a web search. @@ -91,7 +90,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. - `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. - `LEMONADE`: Your Lemonade API URL. Since Lemonade runs directly on your local machine (not in Docker), you should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Lemonade on port 8000, use `http://host.docker.internal:8000`. For other ports, adjust accordingly. **You need to fill this if you wish to use Lemonade's models**. - - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. + - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.` - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**. - `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.** diff --git a/sample.config.toml b/sample.config.toml index be0573e..90c69e7 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -32,7 +32,7 @@ API_KEY = "" # Required to use AI/ML API chat and embedding models API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 [MODELS.LEMONADE] -API_URL = "" # Lemonade API URL - http://localhost:8000 +API_URL = "" # Lemonade API URL - http://host.docker.internal:8000 API_KEY = "" # Optional API key for Lemonade [API_ENDPOINTS] From 8afcdd044c1cf131d6b462d55bba66e7c505c52e Mon Sep 17 00:00:00 2001 From: Ramakrishnan Sivakumar Date: Wed, 17 Sep 2025 12:54:59 -0700 Subject: [PATCH 7/7] Update readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index bde16cd..8ad9e37 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ Want to know more about its architecture and how it works? You can read it [here ## Features +- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, and Mistral. - **Two Main Modes:** - **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page. - **Normal Mode:** Processes your query and performs a web search.