Compare commits

..

3 Commits

Author SHA1 Message Date
Naman Bansal
bdba92f452 Merge 7288c97326 into 09661ae11d 2025-04-02 17:06:29 +05:30
namanb
7288c97326 feat(providers): changed readme as well 2025-04-02 12:26:38 +05:30
namanb
3545137bc0 feat(providers): added openrouter support 2025-04-02 12:24:27 +05:30
20 changed files with 126 additions and 394 deletions

View File

@@ -89,6 +89,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**. - `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
- `OPENROUTER`: Your OpenRouter API key. **You only need to fill this if you wish to use models via OpenRouter**.
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
**Note**: You can change these after starting Perplexica from the settings dialog. **Note**: You can change these after starting Perplexica from the settings dialog.
@@ -159,7 +160,6 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica) [![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267) [![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
[![Run on ClawCloud](https://raw.githubusercontent.com/ClawCloud/Run-Template/refs/heads/main/Run-on-ClawCloud.svg)](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
## Upcoming Features ## Upcoming Features

View File

@@ -33,7 +33,6 @@ The API accepts a JSON object in the request body, where you define the focus mo
["human", "Hi, how are you?"], ["human", "Hi, how are you?"],
["assistant", "I am doing well, how can I help you today?"] ["assistant", "I am doing well, how can I help you today?"]
], ],
"systemInstructions": "Focus on providing technical details about Perplexica's architecture.",
"stream": false "stream": false
} }
``` ```
@@ -64,8 +63,6 @@ The API accepts a JSON object in the request body, where you define the focus mo
- **`query`** (string, required): The search query or question. - **`query`** (string, required): The search query or question.
- **`systemInstructions`** (string, optional): Custom instructions provided by the user to guide the AI's response. These instructions are treated as user preferences and have lower priority than the system's core instructions. For example, you can specify a particular writing style, format, or focus area.
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example: - **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
```json ```json

View File

@@ -1,6 +1,6 @@
{ {
"name": "perplexica-frontend", "name": "perplexica-frontend",
"version": "1.10.2", "version": "1.10.1",
"license": "MIT", "license": "MIT",
"author": "ItzCrazyKns", "author": "ItzCrazyKns",
"scripts": { "scripts": {

View File

@@ -11,6 +11,9 @@ API_KEY = ""
[MODELS.ANTHROPIC] [MODELS.ANTHROPIC]
API_KEY = "" API_KEY = ""
[MODELS.OPENROUTER]
API_KEY = ""
[MODELS.GEMINI] [MODELS.GEMINI]
API_KEY = "" API_KEY = ""
@@ -22,11 +25,5 @@ MODEL_NAME = ""
[MODELS.OLLAMA] [MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434 API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK]
API_KEY = ""
[MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
[API_ENDPOINTS] [API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768 SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@@ -5,10 +5,9 @@ import {
getCustomOpenaiModelName, getCustomOpenaiModelName,
getGeminiApiKey, getGeminiApiKey,
getGroqApiKey, getGroqApiKey,
getOpenrouterApiKey,
getOllamaApiEndpoint, getOllamaApiEndpoint,
getOpenaiApiKey, getOpenaiApiKey,
getDeepseekApiKey,
getLMStudioApiEndpoint,
updateConfig, updateConfig,
} from '@/lib/config'; } from '@/lib/config';
import { import {
@@ -52,11 +51,10 @@ export const GET = async (req: Request) => {
config['openaiApiKey'] = getOpenaiApiKey(); config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint(); config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey(); config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey(); config['groqApiKey'] = getGroqApiKey();
config['openrouterApiKey'] = getOpenrouterApiKey();
config['geminiApiKey'] = getGeminiApiKey(); config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl(); config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName(); config['customOpenaiModelName'] = getCustomOpenaiModelName();
@@ -83,6 +81,9 @@ export const POST = async (req: Request) => {
GROQ: { GROQ: {
API_KEY: config.groqApiKey, API_KEY: config.groqApiKey,
}, },
OPENROUTER: {
API_KEY: config.openrouterApiKey,
},
ANTHROPIC: { ANTHROPIC: {
API_KEY: config.anthropicApiKey, API_KEY: config.anthropicApiKey,
}, },
@@ -92,12 +93,6 @@ export const POST = async (req: Request) => {
OLLAMA: { OLLAMA: {
API_URL: config.ollamaApiUrl, API_URL: config.ollamaApiUrl,
}, },
DEEPSEEK: {
API_KEY: config.deepseekApiKey,
},
LM_STUDIO: {
API_URL: config.lmStudioApiUrl,
},
CUSTOM_OPENAI: { CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl, API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey, API_KEY: config.customOpenaiApiKey,

View File

@@ -34,7 +34,6 @@ interface ChatRequestBody {
query: string; query: string;
history: Array<[string, string]>; history: Array<[string, string]>;
stream?: boolean; stream?: boolean;
systemInstructions?: string;
} }
export const POST = async (req: Request) => { export const POST = async (req: Request) => {
@@ -126,7 +125,7 @@ export const POST = async (req: Request) => {
embeddings, embeddings,
body.optimizationMode, body.optimizationMode,
[], [],
body.systemInstructions || '', '',
); );
if (!body.stream) { if (!body.stream) {

View File

@@ -7,7 +7,6 @@ import { Switch } from '@headlessui/react';
import ThemeSwitcher from '@/components/theme/Switcher'; import ThemeSwitcher from '@/components/theme/Switcher';
import { ImagesIcon, VideoIcon } from 'lucide-react'; import { ImagesIcon, VideoIcon } from 'lucide-react';
import Link from 'next/link'; import Link from 'next/link';
import { PROVIDER_METADATA } from '@/lib/providers';
interface SettingsType { interface SettingsType {
chatModelProviders: { chatModelProviders: {
@@ -18,11 +17,10 @@ interface SettingsType {
}; };
openaiApiKey: string; openaiApiKey: string;
groqApiKey: string; groqApiKey: string;
openrouterApiKey: string;
anthropicApiKey: string; anthropicApiKey: string;
geminiApiKey: string; geminiApiKey: string;
ollamaApiUrl: string; ollamaApiUrl: string;
lmStudioApiUrl: string;
deepseekApiKey: string;
customOpenaiApiKey: string; customOpenaiApiKey: string;
customOpenaiApiUrl: string; customOpenaiApiUrl: string;
customOpenaiModelName: string; customOpenaiModelName: string;
@@ -550,7 +548,6 @@ const Page = () => {
(provider) => ({ (provider) => ({
value: provider, value: provider,
label: label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() + provider.charAt(0).toUpperCase() +
provider.slice(1), provider.slice(1),
}), }),
@@ -693,7 +690,6 @@ const Page = () => {
(provider) => ({ (provider) => ({
value: provider, value: provider,
label: label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() + provider.charAt(0).toUpperCase() +
provider.slice(1), provider.slice(1),
}), }),
@@ -806,6 +802,25 @@ const Page = () => {
/> />
</div> </div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
OpenRouter API Key
</p>
<Input
type="text"
placeholder="OpenRouter API Key"
value={config.openrouterApiKey}
isSaving={savingStates['openrouterApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
openrouterApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('openrouterApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1"> <div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm"> <p className="text-black/70 dark:text-white/70 text-sm">
Anthropic API Key Anthropic API Key
@@ -843,44 +858,6 @@ const Page = () => {
onSave={(value) => saveConfig('geminiApiKey', value)} onSave={(value) => saveConfig('geminiApiKey', value)}
/> />
</div> </div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Deepseek API Key
</p>
<Input
type="text"
placeholder="Deepseek API Key"
value={config.deepseekApiKey}
isSaving={savingStates['deepseekApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
deepseekApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('deepseekApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL
</p>
<Input
type="text"
placeholder="LM Studio API URL"
value={config.lmStudioApiUrl}
isSaving={savingStates['lmStudioApiUrl']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lmStudioApiUrl: e.target.value,
}));
}}
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
/>
</div>
</div> </div>
</SettingsSection> </SettingsSection>
</div> </div>

View File

@@ -48,7 +48,6 @@ const MessageBox = ({
const [speechMessage, setSpeechMessage] = useState(message.content); const [speechMessage, setSpeechMessage] = useState(message.content);
useEffect(() => { useEffect(() => {
const citationRegex = /\[([^\]]+)\]/g;
const regex = /\[(\d+)\]/g; const regex = /\[(\d+)\]/g;
let processedMessage = message.content; let processedMessage = message.content;
@@ -68,36 +67,13 @@ const MessageBox = ({
) { ) {
setParsedMessage( setParsedMessage(
processedMessage.replace( processedMessage.replace(
citationRegex, regex,
(_, capturedContent: string) => { (_, number) =>
const numbers = capturedContent `<a href="${
.split(',') message.sources?.[number - 1]?.metadata?.url
.map((numStr) => numStr.trim()); }" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
const linksHtml = numbers
.map((numStr) => {
const number = parseInt(numStr);
if (isNaN(number) || number <= 0) {
return `[${numStr}]`;
}
const source = message.sources?.[number - 1];
const url = source?.metadata?.url;
if (url) {
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
} else {
return `[${numStr}]`;
}
})
.join('');
return linksHtml;
},
), ),
); );
setSpeechMessage(message.content.replace(regex, ''));
return; return;
} }

View File

@@ -1,14 +1,7 @@
import fs from 'fs';
import path from 'path';
import toml from '@iarna/toml'; import toml from '@iarna/toml';
// Use dynamic imports for Node.js modules to prevent client-side errors
let fs: any;
let path: any;
if (typeof window === 'undefined') {
// We're on the server
fs = require('fs');
path = require('path');
}
const configFileName = 'config.toml'; const configFileName = 'config.toml';
interface Config { interface Config {
@@ -32,12 +25,9 @@ interface Config {
OLLAMA: { OLLAMA: {
API_URL: string; API_URL: string;
}; };
DEEPSEEK: { OPENROUTER: {
API_KEY: string; API_KEY: string;
}; };
LM_STUDIO: {
API_URL: string;
};
CUSTOM_OPENAI: { CUSTOM_OPENAI: {
API_URL: string; API_URL: string;
API_KEY: string; API_KEY: string;
@@ -53,17 +43,10 @@ type RecursivePartial<T> = {
[P in keyof T]?: RecursivePartial<T[P]>; [P in keyof T]?: RecursivePartial<T[P]>;
}; };
const loadConfig = () => { const loadConfig = () =>
// Server-side only toml.parse(
if (typeof window === 'undefined') {
return toml.parse(
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config; ) as any as Config;
}
// Client-side fallback - settings will be loaded via API
return {} as Config;
};
export const getSimilarityMeasure = () => export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE; loadConfig().GENERAL.SIMILARITY_MEASURE;
@@ -74,6 +57,8 @@ export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY;
export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY; export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY;
export const getOpenrouterApiKey = () => loadConfig().MODELS.OPENROUTER.API_KEY;
export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY; export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY; export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
@@ -83,8 +68,6 @@ export const getSearxngApiEndpoint = () =>
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL; export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getCustomOpenaiApiKey = () => export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY; loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@@ -94,9 +77,6 @@ export const getCustomOpenaiApiUrl = () =>
export const getCustomOpenaiModelName = () => export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME; loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
export const getLMStudioApiEndpoint = () =>
loadConfig().MODELS.LM_STUDIO.API_URL;
const mergeConfigs = (current: any, update: any): any => { const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) { if (update === null || update === undefined) {
return current; return current;
@@ -129,13 +109,10 @@ const mergeConfigs = (current: any, update: any): any => {
}; };
export const updateConfig = (config: RecursivePartial<Config>) => { export const updateConfig = (config: RecursivePartial<Config>) => {
// Server-side only
if (typeof window === 'undefined') {
const currentConfig = loadConfig(); const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config); const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync( fs.writeFileSync(
path.join(path.join(process.cwd(), `${configFileName}`)), path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig), toml.stringify(mergedConfig),
); );
}
}; };

View File

@@ -1,11 +1,6 @@
import { ChatAnthropic } from '@langchain/anthropic'; import { ChatAnthropic } from '@langchain/anthropic';
import { ChatModel } from '.'; import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config'; import { getAnthropicApiKey } from '../config';
export const PROVIDER_INFO = {
key: 'anthropic',
displayName: 'Anthropic',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [ const anthropicChatModels: Record<string, string>[] = [

View File

@@ -1,49 +0,0 @@
import { ChatOpenAI } from '@langchain/openai';
import { getDeepseekApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
export const PROVIDER_INFO = {
key: 'deepseek',
displayName: 'Deepseek AI',
};
const deepseekChatModels: Record<string, string>[] = [
{
displayName: 'Deepseek Chat (Deepseek V3)',
key: 'deepseek-chat',
},
{
displayName: 'Deepseek Reasoner (Deepseek R1)',
key: 'deepseek-reasoner',
},
];
export const loadDeepseekChatModels = async () => {
const deepseekApiKey = getDeepseekApiKey();
if (!deepseekApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
deepseekChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.deepseek.com',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Deepseek models: ${err}`);
return {};
}
};

View File

@@ -4,11 +4,6 @@ import {
} from '@langchain/google-genai'; } from '@langchain/google-genai';
import { getGeminiApiKey } from '../config'; import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'gemini',
displayName: 'Google Gemini',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';
@@ -45,12 +40,8 @@ const geminiChatModels: Record<string, string>[] = [
const geminiEmbeddingModels: Record<string, string>[] = [ const geminiEmbeddingModels: Record<string, string>[] = [
{ {
displayName: 'Text Embedding 004', displayName: 'Gemini Embedding',
key: 'models/text-embedding-004', key: 'gemini-embedding-exp',
},
{
displayName: 'Embedding 001',
key: 'models/embedding-001',
}, },
]; ];

View File

@@ -1,11 +1,6 @@
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../config'; import { getGroqApiKey } from '../config';
import { ChatModel } from '.'; import { ChatModel } from '.';
export const PROVIDER_INFO = {
key: 'groq',
displayName: 'Groq',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [ const groqChatModels: Record<string, string>[] = [
@@ -77,14 +72,6 @@ const groqChatModels: Record<string, string>[] = [
displayName: 'Llama 3.2 90B Vision Preview (Preview)', displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview', key: 'llama-3.2-90b-vision-preview',
}, },
/* {
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
}, */
{
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
},
]; ];
export const loadGroqChatModels = async () => { export const loadGroqChatModels = async () => {

View File

@@ -1,60 +1,18 @@
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
loadOpenAIChatModels,
loadOpenAIEmbeddingModels,
PROVIDER_INFO as OpenAIInfo,
PROVIDER_INFO,
} from './openai';
import { import {
getCustomOpenaiApiKey, getCustomOpenaiApiKey,
getCustomOpenaiApiUrl, getCustomOpenaiApiUrl,
getCustomOpenaiModelName, getCustomOpenaiModelName,
} from '../config'; } from '../config';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
loadOllamaChatModels, import { loadGroqChatModels } from './groq';
loadOllamaEmbeddingModels, import { loadAnthropicChatModels } from './anthropic';
PROVIDER_INFO as OllamaInfo, import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
} from './ollama'; import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq'; import { loadOpenrouterChatModels } from '@/lib/providers/openrouter';
import {
loadAnthropicChatModels,
PROVIDER_INFO as AnthropicInfo,
} from './anthropic';
import {
loadGeminiChatModels,
loadGeminiEmbeddingModels,
PROVIDER_INFO as GeminiInfo,
} from './gemini';
import {
loadTransformersEmbeddingsModels,
PROVIDER_INFO as TransformersInfo,
} from './transformers';
import {
loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo,
} from './deepseek';
import {
loadLMStudioChatModels,
loadLMStudioEmbeddingsModels,
PROVIDER_INFO as LMStudioInfo,
} from './lmstudio';
export const PROVIDER_METADATA = {
openai: OpenAIInfo,
ollama: OllamaInfo,
groq: GroqInfo,
anthropic: AnthropicInfo,
gemini: GeminiInfo,
transformers: TransformersInfo,
deepseek: DeepseekInfo,
lmstudio: LMStudioInfo,
custom_openai: {
key: 'custom_openai',
displayName: 'Custom OpenAI',
},
};
export interface ChatModel { export interface ChatModel {
displayName: string; displayName: string;
@@ -75,8 +33,7 @@ export const chatModelProviders: Record<
groq: loadGroqChatModels, groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels, openrouter: loadOpenrouterChatModels,
lmstudio: loadLMStudioChatModels,
}; };
export const embeddingModelProviders: Record< export const embeddingModelProviders: Record<
@@ -87,7 +44,6 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels, ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels, gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels, transformers: loadTransformersEmbeddingsModels,
lmstudio: loadLMStudioEmbeddingsModels,
}; };
export const getAvailableChatModelProviders = async () => { export const getAvailableChatModelProviders = async () => {

View File

@@ -1,100 +0,0 @@
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
import axios from 'axios';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'lmstudio',
displayName: 'LM Studio',
};
import { ChatOpenAI } from '@langchain/openai';
import { OpenAIEmbeddings } from '@langchain/openai';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
interface LMStudioModel {
id: string;
name?: string;
}
const ensureV1Endpoint = (endpoint: string): string =>
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
try {
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
return true;
} catch {
return false;
}
};
export const loadLMStudioChatModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
temperature: 0.7,
streaming: true,
maxRetries: 3,
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading LM Studio models: ${err}`);
return {};
}
};
export const loadLMStudioEmbeddingsModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const embeddingsModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
}) as unknown as Embeddings,
};
});
return embeddingsModels;
} catch (err) {
console.error(`Error loading LM Studio embeddings model: ${err}`);
return {};
}
};

View File

@@ -1,11 +1,6 @@
import axios from 'axios'; import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config'; import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';

View File

@@ -1,11 +1,6 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../config'; import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'openai',
displayName: 'OpenAI',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';
@@ -30,18 +25,6 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT-4 omni mini', displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini', key: 'gpt-4o-mini',
}, },
{
displayName: 'GPT 4.1 nano',
key: 'gpt-4.1-nano',
},
{
displayName: 'GPT 4.1 mini',
key: 'gpt-4.1-mini',
},
{
displayName: 'GPT 4.1',
key: 'gpt-4.1',
},
]; ];
const openaiEmbeddingModels: Record<string, string>[] = [ const openaiEmbeddingModels: Record<string, string>[] = [

View File

@@ -0,0 +1,61 @@
import { ChatOpenAI } from '@langchain/openai';
import { getOpenrouterApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
let openrouterChatModels: Record<string, string>[] = [];
async function fetchModelList(): Promise<void> {
try {
const response = await fetch('https://openrouter.ai/api/v1/models', {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
if (!response.ok) {
throw new Error(`API request failed with status: ${response.status}`);
}
const data = await response.json();
openrouterChatModels = data.data.map((model: any) => ({
displayName: model.name,
key: model.id,
}));
} catch (error) {
console.error('Error fetching models:', error);
}
}
export const loadOpenrouterChatModels = async () => {
await fetchModelList();
const openrouterApikey = getOpenrouterApiKey();
if (!openrouterApikey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
openrouterChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: openrouterApikey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://openrouter.ai/api/v1',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Openrouter models: ${err}`);
return {};
}
};

View File

@@ -1,10 +1,5 @@
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const PROVIDER_INFO = {
key: 'transformers',
displayName: 'Hugging Face',
};
export const loadTransformersEmbeddingsModels = async () => { export const loadTransformersEmbeddingsModels = async () => {
try { try {
const embeddingModels = { const embeddingModels = {

View File

@@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splittedText = await splitter.splitText(parsedText); const splittedText = await splitter.splitText(parsedText);
const title = res.data const title = res.data
.toString('utf8') .toString('utf8')
.match(/<title.*>(.*?)<\/title>/)?.[1]; .match(/<title>(.*?)<\/title>/)?.[1];
const linkDocs = splittedText.map((text) => { const linkDocs = splittedText.map((text) => {
return new Document({ return new Document({