Compare commits

..

23 Commits

Author SHA1 Message Date
ItzCrazyKns
4154d5e4b1 Merge branch 'pr/629' 2025-04-23 20:35:52 +05:30
ItzCrazyKns
1862491496 feat(settings): add LM Studio API URL 2025-04-12 11:59:05 +05:30
ItzCrazyKns
073b5e897c feat(app): lint & beautify 2025-04-12 11:58:52 +05:30
Rami
9a332e79e4 Merge branch 'ItzCrazyKns:master' into feature/lm-studio-provider 2025-04-11 20:07:58 +04:00
ItzCrazyKns
72450b9217 Merge pull request #731 from ClawCloud-Ron/master
docs: add ClawCloud Run button
2025-04-11 21:20:44 +05:30
haddadrm
7e1dc33a08 Implement provider formatting improvements and fix client-side compatibility
- Add PROVIDER_INFO metadata to each provider file with proper display names
- Create centralized PROVIDER_METADATA in index.ts for consistent reference
- Update settings UI to use provider metadata for display names
- Fix client/server compatibility for Node.js modules in config.ts
2025-04-11 19:18:19 +04:00
haddadrm
aa240009ab Feature: Add LM Studio provider integration - Added LM Studio provider to support OpenAI compatible API - Implemented chat and embeddings model loading - Updated config to include LM Studio API endpoint 2025-04-11 19:18:19 +04:00
sjiampojamarn
41b258e4d8 Set speech message before return 2025-04-08 23:17:52 -07:00
ItzCrazyKns
da1123d84b feat(groq): update model name 2025-04-07 23:30:51 +05:30
ItzCrazyKns
627775c430 feat(groq): remove maverick (not being run yet) 2025-04-07 23:29:51 +05:30
ItzCrazyKns
245573efca feat(groq): update model list 2025-04-07 23:23:18 +05:30
ClawCloud-Ron
28b9cca413 docs: add ClawCloud Run button 2025-04-07 16:49:59 +08:00
ItzCrazyKns
a85f762c58 feat(package): bump version 2025-04-07 10:27:04 +05:30
ItzCrazyKns
3ddcceda0a feat(gemini-provider): update embedding models 2025-04-07 10:26:29 +05:30
ItzCrazyKns
e226645bc7 feat(app): lint & beautify 2025-04-06 13:48:58 +05:30
ItzCrazyKns
5447530ece Merge branch 'feat/deepseek-provider' 2025-04-06 13:48:10 +05:30
ItzCrazyKns
ed6d46a440 Merge branch 'pr/719' 2025-04-06 13:47:57 +05:30
ItzCrazyKns
588e68e93e feat(providers): add deepseek provider 2025-04-06 13:37:43 +05:30
ItzCrazyKns
c4440327db Merge pull request #720 from OmarElKadri/master
feat(search): add optional systemInstructions to API request body
2025-04-06 10:34:29 +05:30
OTYAK
64e2d457cc feat(search): add optional systemInstructions to API request body 2025-04-05 19:06:18 +01:00
ItzCrazyKns
bf705afc21 feat(message-box): change styles, lint & beautify 2025-04-05 22:32:56 +05:30
singleparadox
2e4433a6b3 feat(message-box): support [1,2,3,4] citation format instead of just [1][2][3] 2025-04-05 15:24:45 +00:00
wellCh4n
8aaee2c40c feat(app): support complex title 2025-02-15 16:48:21 +08:00
21 changed files with 651 additions and 346 deletions

View File

@ -159,6 +159,7 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
[![Run on ClawCloud](https://raw.githubusercontent.com/ClawCloud/Run-Template/refs/heads/main/Run-on-ClawCloud.svg)](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
## Upcoming Features

1
data/.gitignore vendored
View File

@ -1,3 +1,2 @@
*
!models.json
!.gitignore

View File

@ -1,157 +0,0 @@
{
"_comment": "Ollama models are fetched from the Ollama API, so they are not included here.",
"chatModels": {
"openai": [
{
"displayName": "GPT-3.5 Turbo",
"key": "gpt-3.5-turbo"
},
{
"displayName": "GPT-4",
"key": "gpt-4"
},
{
"displayName": "GPT-4 Turbo",
"key": "gpt-4-turbo"
},
{
"displayName": "GPT-4 Omni",
"key": "gpt-4o"
},
{
"displayName": "GPT-4 Omni Mini",
"key": "gpt-4o-mini"
}
],
"groq": [
{
"displayName": "Gemma2 9B IT",
"key": "gemma2-9b-it"
},
{
"displayName": "Llama 3.3 70B Versatile",
"key": "llama-3.3-70b-versatile"
},
{
"displayName": "Llama 3.1 8B Instant",
"key": "llama-3.1-8b-instant"
},
{
"displayName": "Llama3 70B 8192",
"key": "llama3-70b-8192"
},
{
"displayName": "Llama3 8B 8192",
"key": "llama3-8b-8192"
},
{
"displayName": "Mixtral 8x7B 32768",
"key": "mixtral-8x7b-32768"
},
{
"displayName": "Qwen QWQ 32B (Preview)",
"key": "qwen-qwq-32b"
},
{
"displayName": "Mistral Saba 24B (Preview)",
"key": "mistral-saba-24b"
},
{
"displayName": "DeepSeek R1 Distill Llama 70B (Preview)",
"key": "deepseek-r1-distill-llama-70b"
}
],
"gemini": [
{
"displayName": "Gemini 2.5 Pro Experimental",
"key": "gemini-2.5-pro-exp-03-25"
},
{
"displayName": "Gemini 2.0 Flash",
"key": "gemini-2.0-flash"
},
{
"displayName": "Gemini 2.0 Flash-Lite",
"key": "gemini-2.0-flash-lite"
},
{
"displayName": "Gemini 2.0 Flash Thinking Experimental",
"key": "gemini-2.0-flash-thinking-exp-01-21"
},
{
"displayName": "Gemini 1.5 Flash",
"key": "gemini-1.5-flash"
},
{
"displayName": "Gemini 1.5 Flash-8B",
"key": "gemini-1.5-flash-8b"
},
{
"displayName": "Gemini 1.5 Pro",
"key": "gemini-1.5-pro"
}
],
"anthropic": [
{
"displayName": "Claude 3.7 Sonnet",
"key": "claude-3-7-sonnet-20250219"
},
{
"displayName": "Claude 3.5 Haiku",
"key": "claude-3-5-haiku-20241022"
},
{
"displayName": "Claude 3.5 Sonnet v2",
"key": "claude-3-5-sonnet-20241022"
},
{
"displayName": "Claude 3.5 Sonnet",
"key": "claude-3-5-sonnet-20240620"
},
{
"displayName": "Claude 3 Opus",
"key": "claude-3-opus-20240229"
},
{
"displayName": "Claude 3 Sonnet",
"key": "claude-3-sonnet-20240229"
},
{
"displayName": "Claude 3 Haiku",
"key": "claude-3-haiku-20240307"
}
]
},
"embeddingModels": {
"openai": [
{
"displayName": "Text Embedding 3 Large",
"key": "text-embedding-3-large"
},
{
"displayName": "Text Embedding 3 Small",
"key": "text-embedding-3-small"
}
],
"gemini": [
{
"displayName": "Gemini Embedding",
"key": "gemini-embedding-exp"
}
],
"transformers": [
{
"displayName": "BGE Small",
"key": "xenova-bge-small-en-v1.5"
},
{
"displayName": "GTE Small",
"key": "xenova-gte-small"
},
{
"displayName": "Bert Multilingual",
"key": "xenova-bert-base-multilingual-uncased"
}
]
}
}

View File

@ -33,6 +33,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
["human", "Hi, how are you?"],
["assistant", "I am doing well, how can I help you today?"]
],
"systemInstructions": "Focus on providing technical details about Perplexica's architecture.",
"stream": false
}
```
@ -63,6 +64,8 @@ The API accepts a JSON object in the request body, where you define the focus mo
- **`query`** (string, required): The search query or question.
- **`systemInstructions`** (string, optional): Custom instructions provided by the user to guide the AI's response. These instructions are treated as user preferences and have lower priority than the system's core instructions. For example, you can specify a particular writing style, format, or focus area.
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
```json

View File

@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.10.1",
"version": "1.10.2",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {

View File

@ -22,5 +22,11 @@ MODEL_NAME = ""
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK]
API_KEY = ""
[MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
[API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768
SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@ -7,6 +7,8 @@ import {
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
getDeepseekApiKey,
getLMStudioApiEndpoint,
updateConfig,
} from '@/lib/config';
import {
@ -50,9 +52,11 @@ export const GET = async (req: Request) => {
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
@ -88,6 +92,12 @@ export const POST = async (req: Request) => {
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
DEEPSEEK: {
API_KEY: config.deepseekApiKey,
},
LM_STUDIO: {
API_URL: config.lmStudioApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,

View File

@ -34,6 +34,7 @@ interface ChatRequestBody {
query: string;
history: Array<[string, string]>;
stream?: boolean;
systemInstructions?: string;
}
export const POST = async (req: Request) => {
@ -125,7 +126,7 @@ export const POST = async (req: Request) => {
embeddings,
body.optimizationMode,
[],
'',
body.systemInstructions || '',
);
if (!body.stream) {

View File

@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
import ThemeSwitcher from '@/components/theme/Switcher';
import { ImagesIcon, VideoIcon } from 'lucide-react';
import Link from 'next/link';
import { PROVIDER_METADATA } from '@/lib/providers';
interface SettingsType {
chatModelProviders: {
@ -20,6 +21,8 @@ interface SettingsType {
anthropicApiKey: string;
geminiApiKey: string;
ollamaApiUrl: string;
lmStudioApiUrl: string;
deepseekApiKey: string;
customOpenaiApiKey: string;
customOpenaiApiUrl: string;
customOpenaiModelName: string;
@ -547,8 +550,9 @@ const Page = () => {
(provider) => ({
value: provider,
label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() +
provider.slice(1),
provider.slice(1),
}),
)}
/>
@ -689,8 +693,9 @@ const Page = () => {
(provider) => ({
value: provider,
label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() +
provider.slice(1),
provider.slice(1),
}),
)}
/>
@ -838,6 +843,44 @@ const Page = () => {
onSave={(value) => saveConfig('geminiApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Deepseek API Key
</p>
<Input
type="text"
placeholder="Deepseek API Key"
value={config.deepseekApiKey}
isSaving={savingStates['deepseekApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
deepseekApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('deepseekApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL
</p>
<Input
type="text"
placeholder="LM Studio API URL"
value={config.lmStudioApiUrl}
isSaving={savingStates['lmStudioApiUrl']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lmStudioApiUrl: e.target.value,
}));
}}
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
/>
</div>
</div>
</SettingsSection>
</div>

View File

@ -48,6 +48,7 @@ const MessageBox = ({
const [speechMessage, setSpeechMessage] = useState(message.content);
useEffect(() => {
const citationRegex = /\[([^\]]+)\]/g;
const regex = /\[(\d+)\]/g;
let processedMessage = message.content;
@ -67,13 +68,36 @@ const MessageBox = ({
) {
setParsedMessage(
processedMessage.replace(
regex,
(_, number) =>
`<a href="${
message.sources?.[number - 1]?.metadata?.url
}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
citationRegex,
(_, capturedContent: string) => {
const numbers = capturedContent
.split(',')
.map((numStr) => numStr.trim());
const linksHtml = numbers
.map((numStr) => {
const number = parseInt(numStr);
if (isNaN(number) || number <= 0) {
return `[${numStr}]`;
}
const source = message.sources?.[number - 1];
const url = source?.metadata?.url;
if (url) {
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
} else {
return `[${numStr}]`;
}
})
.join('');
return linksHtml;
},
),
);
setSpeechMessage(message.content.replace(regex, ''));
return;
}

View File

@ -1,7 +1,14 @@
import fs from 'fs';
import path from 'path';
import toml from '@iarna/toml';
// Use dynamic imports for Node.js modules to prevent client-side errors
let fs: any;
let path: any;
if (typeof window === 'undefined') {
// We're on the server
fs = require('fs');
path = require('path');
}
const configFileName = 'config.toml';
interface Config {
@ -25,6 +32,12 @@ interface Config {
OLLAMA: {
API_URL: string;
};
DEEPSEEK: {
API_KEY: string;
};
LM_STUDIO: {
API_URL: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
@ -40,10 +53,17 @@ type RecursivePartial<T> = {
[P in keyof T]?: RecursivePartial<T[P]>;
};
const loadConfig = () =>
toml.parse(
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
const loadConfig = () => {
// Server-side only
if (typeof window === 'undefined') {
return toml.parse(
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
}
// Client-side fallback - settings will be loaded via API
return {} as Config;
};
export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE;
@ -63,6 +83,8 @@ export const getSearxngApiEndpoint = () =>
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@ -72,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
export const getLMStudioApiEndpoint = () =>
loadConfig().MODELS.LM_STUDIO.API_URL;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
@ -104,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
};
export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig),
);
// Server-side only
if (typeof window === 'undefined') {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig),
);
}
};

View File

@ -1,22 +1,53 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatModel, getModelsList, RawModel } from '.';
import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config';
export const PROVIDER_INFO = {
key: 'anthropic',
displayName: 'Anthropic',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const loadModels = () => {
return getModelsList()?.['chatModels']['anthropic'] as unknown as RawModel[]
}
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey();
if (!anthropicApiKey) return {};
const models = loadModels()
if (!anthropicApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
models.forEach((model) => {
anthropicChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatAnthropic({

View File

@ -0,0 +1,49 @@
import { ChatOpenAI } from '@langchain/openai';
import { getDeepseekApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
export const PROVIDER_INFO = {
key: 'deepseek',
displayName: 'Deepseek AI',
};
const deepseekChatModels: Record<string, string>[] = [
{
displayName: 'Deepseek Chat (Deepseek V3)',
key: 'deepseek-chat',
},
{
displayName: 'Deepseek Reasoner (Deepseek R1)',
key: 'deepseek-reasoner',
},
];
export const loadDeepseekChatModels = async () => {
const deepseekApiKey = getDeepseekApiKey();
if (!deepseekApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
deepseekChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.deepseek.com',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Deepseek models: ${err}`);
return {};
}
};

View File

@ -3,24 +3,66 @@ import {
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel, getModelsList, RawModel } from '.';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'gemini',
displayName: 'Google Gemini',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const loadModels = (modelType: 'chat' | 'embedding') => {
return getModelsList()?.[modelType === 'chat' ? 'chatModels' : 'embeddingModels']['gemini'] as unknown as RawModel[]
}
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-exp-03-25',
},
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Flash Thinking Experimental',
key: 'gemini-2.0-flash-thinking-exp-01-21',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 004',
key: 'models/text-embedding-004',
},
{
displayName: 'Embedding 001',
key: 'models/embedding-001',
},
];
export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
const models = loadModels('chat');
if (!geminiApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
models.forEach((model) => {
geminiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatGoogleGenerativeAI({
@ -40,14 +82,13 @@ export const loadGeminiChatModels = async () => {
export const loadGeminiEmbeddingModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
const models = loadModels('embedding');
if (!geminiApiKey) return {};
try {
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model) => {
geminiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new GoogleGenerativeAIEmbeddings({

View File

@ -1,22 +1,101 @@
import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../config';
import { ChatModel, getModelsList, RawModel } from '.';
import { ChatModel } from '.';
export const PROVIDER_INFO = {
key: 'groq',
displayName: 'Groq',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const loadModels = () => {
return getModelsList()?.chatModels['groq'] as unknown as RawModel[]
}
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
/* {
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
}, */
{
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
},
];
export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey();
if (!groqApiKey) return {};
const models = loadModels()
if (!groqApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
models.forEach((model) => {
groqChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({

View File

@ -1,39 +1,69 @@
import { Embeddings } from '@langchain/core/embeddings'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai'
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import {
loadOpenAIChatModels,
loadOpenAIEmbeddingModels,
PROVIDER_INFO as OpenAIInfo,
PROVIDER_INFO,
} from './openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config'
import { ChatOpenAI } from '@langchain/openai'
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama'
import { loadGroqChatModels } from './groq'
import { loadAnthropicChatModels } from './anthropic'
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'
import { loadTransformersEmbeddingsModels } from './transformers'
import path from 'path'
import fs from 'fs'
} from '../config';
import { ChatOpenAI } from '@langchain/openai';
import {
loadOllamaChatModels,
loadOllamaEmbeddingModels,
PROVIDER_INFO as OllamaInfo,
} from './ollama';
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
import {
loadAnthropicChatModels,
PROVIDER_INFO as AnthropicInfo,
} from './anthropic';
import {
loadGeminiChatModels,
loadGeminiEmbeddingModels,
PROVIDER_INFO as GeminiInfo,
} from './gemini';
import {
loadTransformersEmbeddingsModels,
PROVIDER_INFO as TransformersInfo,
} from './transformers';
import {
loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo,
} from './deepseek';
import {
loadLMStudioChatModels,
loadLMStudioEmbeddingsModels,
PROVIDER_INFO as LMStudioInfo,
} from './lmstudio';
export const PROVIDER_METADATA = {
openai: OpenAIInfo,
ollama: OllamaInfo,
groq: GroqInfo,
anthropic: AnthropicInfo,
gemini: GeminiInfo,
transformers: TransformersInfo,
deepseek: DeepseekInfo,
lmstudio: LMStudioInfo,
custom_openai: {
key: 'custom_openai',
displayName: 'Custom OpenAI',
},
};
export interface ChatModel {
displayName: string
model: BaseChatModel
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string
model: Embeddings
}
export type RawModel = {
displayName: string
key: string
}
type ModelsList = {
[key in "chatModels" | "embeddingModels"]: {
[key: string]: RawModel[]
}
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
@ -45,7 +75,9 @@ export const chatModelProviders: Record<
groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
}
deepseek: loadDeepseekChatModels,
lmstudio: loadLMStudioChatModels,
};
export const embeddingModelProviders: Record<
string,
@ -55,43 +87,22 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels,
}
export const getModelsList = (): ModelsList | null => {
const modelFile = path.join(process.cwd(), 'data/models.json')
try {
const content = fs.readFileSync(modelFile, 'utf-8')
return JSON.parse(content) as ModelsList
} catch (err) {
console.error(`Error reading models file: ${err}`)
return null
}
}
export const updateModelsList = (models: ModelsList) => {
try {
const modelFile = path.join(process.cwd(), 'data/models.json')
const content = JSON.stringify(models, null, 2)
fs.writeFileSync(modelFile, content, 'utf-8')
} catch(err) {
console.error(`Error updating models file: ${err}`)
}
}
lmstudio: loadLMStudioEmbeddingsModels,
};
export const getAvailableChatModelProviders = async () => {
const models: Record<string, Record<string, ChatModel>> = {}
const models: Record<string, Record<string, ChatModel>> = {};
for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider]()
const providerModels = await chatModelProviders[provider]();
if (Object.keys(providerModels).length > 0) {
models[provider] = providerModels
models[provider] = providerModels;
}
}
const customOpenAiApiKey = getCustomOpenaiApiKey()
const customOpenAiApiUrl = getCustomOpenaiApiUrl()
const customOpenAiModelName = getCustomOpenaiModelName()
const customOpenAiApiKey = getCustomOpenaiApiKey();
const customOpenAiApiUrl = getCustomOpenaiApiUrl();
const customOpenAiModelName = getCustomOpenaiModelName();
models['custom_openai'] = {
...(customOpenAiApiKey && customOpenAiApiUrl && customOpenAiModelName
@ -109,20 +120,20 @@ export const getAvailableChatModelProviders = async () => {
},
}
: {}),
}
};
return models
}
return models;
};
export const getAvailableEmbeddingModelProviders = async () => {
const models: Record<string, Record<string, EmbeddingModel>> = {}
const models: Record<string, Record<string, EmbeddingModel>> = {};
for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider]()
const providerModels = await embeddingModelProviders[provider]();
if (Object.keys(providerModels).length > 0) {
models[provider] = providerModels
models[provider] = providerModels;
}
}
return models
}
return models;
};

View File

@ -0,0 +1,100 @@
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
import axios from 'axios';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'lmstudio',
displayName: 'LM Studio',
};
import { ChatOpenAI } from '@langchain/openai';
import { OpenAIEmbeddings } from '@langchain/openai';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
interface LMStudioModel {
id: string;
name?: string;
}
const ensureV1Endpoint = (endpoint: string): string =>
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
try {
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
return true;
} catch {
return false;
}
};
export const loadLMStudioChatModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
temperature: 0.7,
streaming: true,
maxRetries: 3,
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading LM Studio models: ${err}`);
return {};
}
};
export const loadLMStudioEmbeddingsModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const embeddingsModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
}) as unknown as Embeddings,
};
});
return embeddingsModels;
} catch (err) {
console.error(`Error loading LM Studio embeddings model: ${err}`);
return {};
}
};

View File

@ -1,39 +1,29 @@
import axios from 'axios'
import { getKeepAlive, getOllamaApiEndpoint } from '../config'
import { ChatModel, EmbeddingModel } from '.'
import { ChatOllama } from '@langchain/community/chat_models/ollama'
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaApiEndpoint) return {};
const loadModels = async (apiURL: string) => {
try {
const res = await axios.get(`${apiURL}/api/tags`, {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
})
});
if (res.status !== 200) {
console.error(`Failed to load Ollama models: ${res.data}`)
return []
}
const { models } = res.data;
const { models } = res.data
return models
} catch (err) {
console.error(`Error loading Ollama models: ${err}`)
return []
}
}
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint()
if (!ollamaApiEndpoint) return {}
const models = await loadModels(ollamaApiEndpoint)
try {
const chatModels: Record<string, ChatModel> = {}
const chatModels: Record<string, ChatModel> = {};
models.forEach((model: any) => {
chatModels[model.model] = {
@ -44,24 +34,31 @@ export const loadOllamaChatModels = async () => {
temperature: 0.7,
keepAlive: getKeepAlive(),
}),
}
})
};
});
return chatModels
return chatModels;
} catch (err) {
console.error(`Error loading Ollama models: ${err}`)
return {}
console.error(`Error loading Ollama models: ${err}`);
return {};
}
}
};
export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint()
if (!ollamaApiEndpoint) return {}
const ollamaApiEndpoint = getOllamaApiEndpoint();
const models = await loadModels(ollamaApiEndpoint)
if (!ollamaApiEndpoint) return {};
try {
const embeddingModels: Record<string, EmbeddingModel> = {}
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models } = res.data;
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model: any) => {
embeddingModels[model.model] = {
@ -70,12 +67,12 @@ export const loadOllamaEmbeddingModels = async () => {
baseUrl: ollamaApiEndpoint,
model: model.model,
}),
}
})
};
});
return embeddingModels
return embeddingModels;
} catch (err) {
console.error(`Error loading Ollama embeddings models: ${err}`)
return {}
console.error(`Error loading Ollama embeddings models: ${err}`);
return {};
}
}
};

View File

@ -1,23 +1,57 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel, getModelsList, RawModel } from '.';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'openai',
displayName: 'OpenAI',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const loadModels = (modelType: 'chat' | 'embedding') => {
return getModelsList()?.[modelType === 'chat' ? 'chatModels' : 'embeddingModels']['openai'] as unknown as RawModel[]
}
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
export const loadOpenAIChatModels = async () => {
const openaiApiKey = getOpenaiApiKey();
const models = loadModels('chat');
if (!openaiApiKey || !models) return {};
if (!openaiApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
models.forEach((model) => {
openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
@ -37,14 +71,13 @@ export const loadOpenAIChatModels = async () => {
export const loadOpenAIEmbeddingModels = async () => {
const openaiApiKey = getOpenaiApiKey();
const models = loadModels('embedding');
if (!openaiApiKey || !models) return {};
if (!openaiApiKey) return {};
try {
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model) => {
openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({

View File

@ -1,30 +1,36 @@
import { EmbeddingModel, getModelsList, RawModel } from '.'
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
const loadModels = () => {
return getModelsList()?.embeddingModels[
'transformers'
] as unknown as RawModel[]
}
export const PROVIDER_INFO = {
key: 'transformers',
displayName: 'Hugging Face',
};
export const loadTransformersEmbeddingsModels = async () => {
try {
const models = loadModels()
const embeddingModels: Record<string, EmbeddingModel> = {}
models.forEach(model => {
embeddingModels[model.key] = {
displayName: model.displayName,
const embeddingModels = {
'xenova-bge-small-en-v1.5': {
displayName: 'BGE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: model.key,
modelName: 'Xenova/bge-small-en-v1.5',
}),
}
})
},
'xenova-gte-small': {
displayName: 'GTE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/gte-small',
}),
},
'xenova-bert-base-multilingual-uncased': {
displayName: 'Bert Multilingual',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bert-base-multilingual-uncased',
}),
},
};
return embeddingModels
return embeddingModels;
} catch (err) {
console.error(`Error loading Transformers embeddings model: ${err}`)
return {}
console.error(`Error loading Transformers embeddings model: ${err}`);
return {};
}
}
};

View File

@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splittedText = await splitter.splitText(parsedText);
const title = res.data
.toString('utf8')
.match(/<title>(.*?)<\/title>/)?.[1];
.match(/<title.*>(.*?)<\/title>/)?.[1];
const linkDocs = splittedText.map((text) => {
return new Document({