From 5b5e83a3a00e01936a025b8df40af0b35975db7d Mon Sep 17 00:00:00 2001 From: Ramakrishnan Sivakumar Date: Wed, 17 Sep 2025 12:28:02 -0700 Subject: [PATCH] Add lemonade integration --- README.md | 23 ++++++++- sample.config.toml | 4 ++ src/app/api/config/route.ts | 8 +++ src/app/settings/page.tsx | 44 ++++++++++++++++ src/lib/config.ts | 9 ++++ src/lib/providers/index.ts | 8 +++ src/lib/providers/lemonade.ts | 94 +++++++++++++++++++++++++++++++++++ 7 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 src/lib/providers/lemonade.ts diff --git a/README.md b/README.md index fa63ff1..3a8b4c6 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ - [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended) - [Non-Docker Installation](#non-docker-installation) - [Ollama Connection Errors](#ollama-connection-errors) + - [Lemonade Connection Errors](#lemonade-connection-errors) - [Using as a Search Engine](#using-as-a-search-engine) - [Using Perplexica's API](#using-perplexicas-api) - [Expose Perplexica to a network](#expose-perplexica-to-network) @@ -53,7 +54,7 @@ Want to know more about its architecture and how it works? You can read it [here ## Features -- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, and Mistral. +- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, Mistral, and Lemonade. - **Two Main Modes:** - **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page. - **Normal Mode:** Processes your query and performs a web search. @@ -89,6 +90,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. - `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**. - `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. + - `LEMONADE`: Your Lemonade API URL. Since Lemonade runs directly on your local machine (not in Docker), you should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Lemonade on port 8000, use `http://host.docker.internal:8000`. For other ports, adjust accordingly. **You need to fill this if you wish to use Lemonade's models**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**. @@ -150,6 +152,25 @@ If you're encountering an Ollama connection error, it is likely due to the backe - Ensure that the port (default is 11434) is not blocked by your firewall. +#### Lemonade Connection Errors + +If you're encountering a Lemonade connection error, it is likely due to the backend being unable to connect to Lemonade's API. To fix this issue you can: + +1. **Check your Lemonade API URL:** Ensure that the API URL is correctly set in the settings menu. +2. **Update API URL Based on OS:** + + - **Windows:** Use `http://host.docker.internal:8000` + - **Mac:** Use `http://host.docker.internal:8000` + - **Linux:** Use `http://:8000` + + Adjust the port number if you're using a different one. + +3. **Ensure Lemonade Server is Running:** + + - Make sure your Lemonade server is running and accessible on the configured port (default is 8000). + - Verify that Lemonade is configured to accept connections from all interfaces (`0.0.0.0`), not just localhost (`127.0.0.1`). + - Ensure that the port (default is 8000) is not blocked by your firewall. + ## Using as a Search Engine If you wish to use Perplexica as an alternative to traditional search engines like Google or Bing, or if you want to add a shortcut for quick access from your browser's search bar, follow these steps: diff --git a/sample.config.toml b/sample.config.toml index ba3e98e..be0573e 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -31,5 +31,9 @@ API_KEY = "" # Required to use AI/ML API chat and embedding models [MODELS.LM_STUDIO] API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 +[MODELS.LEMONADE] +API_URL = "" # Lemonade API URL - http://localhost:8000 +API_KEY = "" # Optional API key for Lemonade + [API_ENDPOINTS] SEARXNG = "" # SearxNG API URL - http://localhost:32768 diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index f117cce..5f66fdf 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -10,6 +10,8 @@ import { getDeepseekApiKey, getAimlApiKey, getLMStudioApiEndpoint, + getLemonadeApiEndpoint, + getLemonadeApiKey, updateConfig, getOllamaApiKey, } from '@/lib/config'; @@ -56,6 +58,8 @@ export const GET = async (req: Request) => { config['ollamaApiUrl'] = getOllamaApiEndpoint(); config['ollamaApiKey'] = getOllamaApiKey(); config['lmStudioApiUrl'] = getLMStudioApiEndpoint(); + config['lemonadeApiUrl'] = getLemonadeApiEndpoint(); + config['lemonadeApiKey'] = getLemonadeApiKey(); config['anthropicApiKey'] = getAnthropicApiKey(); config['groqApiKey'] = getGroqApiKey(); config['geminiApiKey'] = getGeminiApiKey(); @@ -106,6 +110,10 @@ export const POST = async (req: Request) => { LM_STUDIO: { API_URL: config.lmStudioApiUrl, }, + LEMONADE: { + API_URL: config.lemonadeApiUrl, + API_KEY: config.lemonadeApiKey, + }, CUSTOM_OPENAI: { API_URL: config.customOpenaiApiUrl, API_KEY: config.customOpenaiApiKey, diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 6fb8255..1af53f9 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -23,6 +23,8 @@ interface SettingsType { ollamaApiUrl: string; ollamaApiKey: string; lmStudioApiUrl: string; + lemonadeApiUrl: string; + lemonadeApiKey: string; deepseekApiKey: string; aimlApiKey: string; customOpenaiApiKey: string; @@ -953,6 +955,48 @@ const Page = () => { + + +
+
+

+ Lemonade API URL +

+ { + setConfig((prev) => ({ + ...prev!, + lemonadeApiUrl: e.target.value, + })); + }} + onSave={(value) => saveConfig('lemonadeApiUrl', value)} + /> +
+ +
+

+ Lemonade API Key (Optional) +

+ { + setConfig((prev) => ({ + ...prev!, + lemonadeApiKey: e.target.value, + })); + }} + onSave={(value) => saveConfig('lemonadeApiKey', value)} + /> +
+
+
) )} diff --git a/src/lib/config.ts b/src/lib/config.ts index 79d69dc..b79ec94 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -42,6 +42,10 @@ interface Config { LM_STUDIO: { API_URL: string; }; + LEMONADE: { + API_URL: string; + API_KEY: string; + }; CUSTOM_OPENAI: { API_URL: string; API_KEY: string; @@ -105,6 +109,11 @@ export const getCustomOpenaiModelName = () => export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LM_STUDIO.API_URL; +export const getLemonadeApiEndpoint = () => + loadConfig().MODELS.LEMONADE.API_URL; + +export const getLemonadeApiKey = () => loadConfig().MODELS.LEMONADE.API_KEY; + const mergeConfigs = (current: any, update: any): any => { if (update === null || update === undefined) { return current; diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index 00ba60f..4cb3fe7 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -45,6 +45,11 @@ import { loadLMStudioEmbeddingsModels, PROVIDER_INFO as LMStudioInfo, } from './lmstudio'; +import { + loadLemonadeChatModels, + loadLemonadeEmbeddingModels, + PROVIDER_INFO as LemonadeInfo, +} from './lemonade'; export const PROVIDER_METADATA = { openai: OpenAIInfo, @@ -56,6 +61,7 @@ export const PROVIDER_METADATA = { deepseek: DeepseekInfo, aimlapi: AimlApiInfo, lmstudio: LMStudioInfo, + lemonade: LemonadeInfo, custom_openai: { key: 'custom_openai', displayName: 'Custom OpenAI', @@ -84,6 +90,7 @@ export const chatModelProviders: Record< deepseek: loadDeepseekChatModels, aimlapi: loadAimlApiChatModels, lmstudio: loadLMStudioChatModels, + lemonade: loadLemonadeChatModels, }; export const embeddingModelProviders: Record< @@ -96,6 +103,7 @@ export const embeddingModelProviders: Record< transformers: loadTransformersEmbeddingsModels, aimlapi: loadAimlApiEmbeddingModels, lmstudio: loadLMStudioEmbeddingsModels, + lemonade: loadLemonadeEmbeddingModels, }; export const getAvailableChatModelProviders = async () => { diff --git a/src/lib/providers/lemonade.ts b/src/lib/providers/lemonade.ts new file mode 100644 index 0000000..7d34552 --- /dev/null +++ b/src/lib/providers/lemonade.ts @@ -0,0 +1,94 @@ +import axios from 'axios'; +import { getLemonadeApiEndpoint, getLemonadeApiKey } from '../config'; +import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'lemonade', + displayName: 'Lemonade', +}; + +import { ChatOpenAI } from '@langchain/openai'; +import { OpenAIEmbeddings } from '@langchain/openai'; + +export const loadLemonadeChatModels = async () => { + const lemonadeApiEndpoint = getLemonadeApiEndpoint(); + const lemonadeApiKey = getLemonadeApiKey(); + + if (!lemonadeApiEndpoint) return {}; + + try { + const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, { + headers: { + 'Content-Type': 'application/json', + ...(lemonadeApiKey + ? { Authorization: `Bearer ${lemonadeApiKey}` } + : {}), + }, + }); + + const { data: models } = res.data; + + const chatModels: Record = {}; + + models.forEach((model: any) => { + chatModels[model.id] = { + displayName: model.id, + model: new ChatOpenAI({ + apiKey: lemonadeApiKey || 'lemonade-key', + modelName: model.id, + temperature: 0.7, + configuration: { + baseURL: `${lemonadeApiEndpoint}/api/v1`, + }, + }), + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading Lemonade models: ${err}`); + return {}; + } +}; + +export const loadLemonadeEmbeddingModels = async () => { + const lemonadeApiEndpoint = getLemonadeApiEndpoint(); + const lemonadeApiKey = getLemonadeApiKey(); + + if (!lemonadeApiEndpoint) return {}; + + try { + const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, { + headers: { + 'Content-Type': 'application/json', + ...(lemonadeApiKey + ? { Authorization: `Bearer ${lemonadeApiKey}` } + : {}), + }, + }); + + const { data: models } = res.data; + + const embeddingModels: Record = {}; + + // Filter models that support embeddings (if Lemonade provides this info) + // For now, we'll assume all models can be used for embeddings + models.forEach((model: any) => { + embeddingModels[model.id] = { + displayName: model.id, + model: new OpenAIEmbeddings({ + apiKey: lemonadeApiKey || 'lemonade-key', + modelName: model.id, + configuration: { + baseURL: `${lemonadeApiEndpoint}/api/v1`, + }, + }), + }; + }); + + return embeddingModels; + } catch (err) { + console.error(`Error loading Lemonade embedding models: ${err}`); + return {}; + } +}; \ No newline at end of file