diff --git a/README.md b/README.md index e01f109..e783b5b 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. - `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. + - `OPENROUTER`: Your OpenRouter API key. **You only need to fill this if you wish to use models via OpenRouter**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. **Note**: You can change these after starting Perplexica from the settings dialog. diff --git a/sample.config.toml b/sample.config.toml index 691b964..73a4c09 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -1,6 +1,6 @@ [GENERAL] -SIMILARITY_MEASURE = "cosine" # "cosine" or "dot" -KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m") +SIMILARITY_MEASURE = "cosine" +KEEP_ALIVE = "5m" [MODELS.OPENAI] API_KEY = "" @@ -8,6 +8,9 @@ API_KEY = "" [MODELS.GROQ] API_KEY = "" +[MODELS.OPENROUTER] +API_KEY = "" + [MODELS.ANTHROPIC] API_KEY = "" @@ -20,7 +23,7 @@ API_URL = "" MODEL_NAME = "" [MODELS.OLLAMA] -API_URL = "" # Ollama API URL - http://host.docker.internal:11434 +API_URL = "" [API_ENDPOINTS] -SEARXNG = "" # SearxNG API URL - http://localhost:32768 \ No newline at end of file +SEARXNG = "" diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index 871bb21..ca490df 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -5,6 +5,7 @@ import { getCustomOpenaiModelName, getGeminiApiKey, getGroqApiKey, + getOpenrouterApiKey, getOllamaApiEndpoint, getOpenaiApiKey, updateConfig, @@ -52,6 +53,7 @@ export const GET = async (req: Request) => { config['ollamaApiUrl'] = getOllamaApiEndpoint(); config['anthropicApiKey'] = getAnthropicApiKey(); config['groqApiKey'] = getGroqApiKey(); + config['openrouterApiKey'] = getOpenrouterApiKey(); config['geminiApiKey'] = getGeminiApiKey(); config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); @@ -79,6 +81,9 @@ export const POST = async (req: Request) => { GROQ: { API_KEY: config.groqApiKey, }, + OPENROUTER: { + API_KEY: config.openrouterApiKey, + }, ANTHROPIC: { API_KEY: config.anthropicApiKey, }, diff --git a/src/app/api/search/route.ts b/src/app/api/search/route.ts index 59d84ff..563cebe 100644 --- a/src/app/api/search/route.ts +++ b/src/app/api/search/route.ts @@ -125,7 +125,7 @@ export const POST = async (req: Request) => { embeddings, body.optimizationMode, [], - "", + '', ); if (!body.stream) { diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 8e1c45a..c5a6acd 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -17,6 +17,7 @@ interface SettingsType { }; openaiApiKey: string; groqApiKey: string; + openrouterApiKey: string; anthropicApiKey: string; geminiApiKey: string; ollamaApiUrl: string; @@ -801,6 +802,25 @@ const Page = () => { /> +
+

+ OpenRouter API Key +

+ { + setConfig((prev) => ({ + ...prev!, + openrouterApiKey: e.target.value, + })); + }} + onSave={(value) => saveConfig('openrouterApiKey', value)} + /> +
+

Anthropic API Key diff --git a/src/lib/config.ts b/src/lib/config.ts index ef99eed..d0b3f99 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -25,6 +25,9 @@ interface Config { OLLAMA: { API_URL: string; }; + OPENROUTER: { + API_KEY: string; + }; CUSTOM_OPENAI: { API_URL: string; API_KEY: string; @@ -54,6 +57,8 @@ export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY; export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY; +export const getOpenrouterApiKey = () => loadConfig().MODELS.OPENROUTER.API_KEY; + export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY; export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY; diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index c32d0fa..b01be75 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -12,6 +12,7 @@ import { loadGroqChatModels } from './groq'; import { loadAnthropicChatModels } from './anthropic'; import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; import { loadTransformersEmbeddingsModels } from './transformers'; +import { loadOpenrouterChatModels } from '@/lib/providers/openrouter'; export interface ChatModel { displayName: string; @@ -32,6 +33,7 @@ export const chatModelProviders: Record< groq: loadGroqChatModels, anthropic: loadAnthropicChatModels, gemini: loadGeminiChatModels, + openrouter: loadOpenrouterChatModels, }; export const embeddingModelProviders: Record< diff --git a/src/lib/providers/openrouter.ts b/src/lib/providers/openrouter.ts new file mode 100644 index 0000000..aec551e --- /dev/null +++ b/src/lib/providers/openrouter.ts @@ -0,0 +1,61 @@ +import { ChatOpenAI } from '@langchain/openai'; +import { getOpenrouterApiKey } from '../config'; +import { ChatModel } from '.'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; + +let openrouterChatModels: Record[] = []; + +async function fetchModelList(): Promise { + try { + const response = await fetch('https://openrouter.ai/api/v1/models', { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }); + + if (!response.ok) { + throw new Error(`API request failed with status: ${response.status}`); + } + + const data = await response.json(); + + openrouterChatModels = data.data.map((model: any) => ({ + displayName: model.name, + key: model.id, + })); + } catch (error) { + console.error('Error fetching models:', error); + } +} + +export const loadOpenrouterChatModels = async () => { + await fetchModelList(); + + const openrouterApikey = getOpenrouterApiKey(); + + if (!openrouterApikey) return {}; + + try { + const chatModels: Record = {}; + + openrouterChatModels.forEach((model) => { + chatModels[model.key] = { + displayName: model.displayName, + model: new ChatOpenAI({ + openAIApiKey: openrouterApikey, + modelName: model.key, + temperature: 0.7, + configuration: { + baseURL: 'https://openrouter.ai/api/v1', + }, + }) as unknown as BaseChatModel, + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading Openrouter models: ${err}`); + return {}; + } +};