mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-18 07:48:35 +00:00
Compare commits
26 Commits
09661ae11d
...
feat/model
Author | SHA1 | Date | |
---|---|---|---|
701819d018 | |||
68e151b2bd | |||
06ff272541 | |||
4154d5e4b1 | |||
1862491496 | |||
073b5e897c | |||
9a332e79e4 | |||
72450b9217 | |||
7e1dc33a08 | |||
aa240009ab | |||
41b258e4d8 | |||
da1123d84b | |||
627775c430 | |||
245573efca | |||
28b9cca413 | |||
a85f762c58 | |||
3ddcceda0a | |||
e226645bc7 | |||
5447530ece | |||
ed6d46a440 | |||
588e68e93e | |||
c4440327db | |||
64e2d457cc | |||
bf705afc21 | |||
2e4433a6b3 | |||
8aaee2c40c |
@ -159,6 +159,7 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
|
||||
|
||||
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
||||
[](https://repocloud.io/details/?app_id=267)
|
||||
[](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
|
||||
|
||||
## Upcoming Features
|
||||
|
||||
|
@ -33,6 +33,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
|
||||
["human", "Hi, how are you?"],
|
||||
["assistant", "I am doing well, how can I help you today?"]
|
||||
],
|
||||
"systemInstructions": "Focus on providing technical details about Perplexica's architecture.",
|
||||
"stream": false
|
||||
}
|
||||
```
|
||||
@ -63,6 +64,8 @@ The API accepts a JSON object in the request body, where you define the focus mo
|
||||
|
||||
- **`query`** (string, required): The search query or question.
|
||||
|
||||
- **`systemInstructions`** (string, optional): Custom instructions provided by the user to guide the AI's response. These instructions are treated as user preferences and have lower priority than the system's core instructions. For example, you can specify a particular writing style, format, or focus area.
|
||||
|
||||
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
|
||||
|
||||
```json
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "perplexica-frontend",
|
||||
"version": "1.10.1",
|
||||
"version": "1.10.2",
|
||||
"license": "MIT",
|
||||
"author": "ItzCrazyKns",
|
||||
"scripts": {
|
||||
|
@ -22,5 +22,11 @@ MODEL_NAME = ""
|
||||
[MODELS.OLLAMA]
|
||||
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
||||
|
||||
[MODELS.DEEPSEEK]
|
||||
API_KEY = ""
|
||||
|
||||
[MODELS.LM_STUDIO]
|
||||
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
||||
|
||||
[API_ENDPOINTS]
|
||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
||||
|
@ -7,6 +7,8 @@ import {
|
||||
getGroqApiKey,
|
||||
getOllamaApiEndpoint,
|
||||
getOpenaiApiKey,
|
||||
getDeepseekApiKey,
|
||||
getLMStudioApiEndpoint,
|
||||
updateConfig,
|
||||
} from '@/lib/config';
|
||||
import {
|
||||
@ -50,9 +52,11 @@ export const GET = async (req: Request) => {
|
||||
|
||||
config['openaiApiKey'] = getOpenaiApiKey();
|
||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||
config['groqApiKey'] = getGroqApiKey();
|
||||
config['geminiApiKey'] = getGeminiApiKey();
|
||||
config['deepseekApiKey'] = getDeepseekApiKey();
|
||||
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
||||
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
||||
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
||||
@ -88,6 +92,12 @@ export const POST = async (req: Request) => {
|
||||
OLLAMA: {
|
||||
API_URL: config.ollamaApiUrl,
|
||||
},
|
||||
DEEPSEEK: {
|
||||
API_KEY: config.deepseekApiKey,
|
||||
},
|
||||
LM_STUDIO: {
|
||||
API_URL: config.lmStudioApiUrl,
|
||||
},
|
||||
CUSTOM_OPENAI: {
|
||||
API_URL: config.customOpenaiApiUrl,
|
||||
API_KEY: config.customOpenaiApiKey,
|
||||
|
@ -34,6 +34,7 @@ interface ChatRequestBody {
|
||||
query: string;
|
||||
history: Array<[string, string]>;
|
||||
stream?: boolean;
|
||||
systemInstructions?: string;
|
||||
}
|
||||
|
||||
export const POST = async (req: Request) => {
|
||||
@ -125,7 +126,7 @@ export const POST = async (req: Request) => {
|
||||
embeddings,
|
||||
body.optimizationMode,
|
||||
[],
|
||||
'',
|
||||
body.systemInstructions || '',
|
||||
);
|
||||
|
||||
if (!body.stream) {
|
||||
|
@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
|
||||
import ThemeSwitcher from '@/components/theme/Switcher';
|
||||
import { ImagesIcon, VideoIcon } from 'lucide-react';
|
||||
import Link from 'next/link';
|
||||
import { PROVIDER_METADATA } from '@/lib/providers';
|
||||
|
||||
interface SettingsType {
|
||||
chatModelProviders: {
|
||||
@ -20,6 +21,8 @@ interface SettingsType {
|
||||
anthropicApiKey: string;
|
||||
geminiApiKey: string;
|
||||
ollamaApiUrl: string;
|
||||
lmStudioApiUrl: string;
|
||||
deepseekApiKey: string;
|
||||
customOpenaiApiKey: string;
|
||||
customOpenaiApiUrl: string;
|
||||
customOpenaiModelName: string;
|
||||
@ -547,8 +550,9 @@ const Page = () => {
|
||||
(provider) => ({
|
||||
value: provider,
|
||||
label:
|
||||
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
||||
provider.charAt(0).toUpperCase() +
|
||||
provider.slice(1),
|
||||
provider.slice(1),
|
||||
}),
|
||||
)}
|
||||
/>
|
||||
@ -689,8 +693,9 @@ const Page = () => {
|
||||
(provider) => ({
|
||||
value: provider,
|
||||
label:
|
||||
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
||||
provider.charAt(0).toUpperCase() +
|
||||
provider.slice(1),
|
||||
provider.slice(1),
|
||||
}),
|
||||
)}
|
||||
/>
|
||||
@ -838,6 +843,44 @@ const Page = () => {
|
||||
onSave={(value) => saveConfig('geminiApiKey', value)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col space-y-1">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
Deepseek API Key
|
||||
</p>
|
||||
<Input
|
||||
type="text"
|
||||
placeholder="Deepseek API Key"
|
||||
value={config.deepseekApiKey}
|
||||
isSaving={savingStates['deepseekApiKey']}
|
||||
onChange={(e) => {
|
||||
setConfig((prev) => ({
|
||||
...prev!,
|
||||
deepseekApiKey: e.target.value,
|
||||
}));
|
||||
}}
|
||||
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col space-y-1">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
LM Studio API URL
|
||||
</p>
|
||||
<Input
|
||||
type="text"
|
||||
placeholder="LM Studio API URL"
|
||||
value={config.lmStudioApiUrl}
|
||||
isSaving={savingStates['lmStudioApiUrl']}
|
||||
onChange={(e) => {
|
||||
setConfig((prev) => ({
|
||||
...prev!,
|
||||
lmStudioApiUrl: e.target.value,
|
||||
}));
|
||||
}}
|
||||
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</SettingsSection>
|
||||
</div>
|
||||
|
@ -48,6 +48,7 @@ const MessageBox = ({
|
||||
const [speechMessage, setSpeechMessage] = useState(message.content);
|
||||
|
||||
useEffect(() => {
|
||||
const citationRegex = /\[([^\]]+)\]/g;
|
||||
const regex = /\[(\d+)\]/g;
|
||||
let processedMessage = message.content;
|
||||
|
||||
@ -67,13 +68,36 @@ const MessageBox = ({
|
||||
) {
|
||||
setParsedMessage(
|
||||
processedMessage.replace(
|
||||
regex,
|
||||
(_, number) =>
|
||||
`<a href="${
|
||||
message.sources?.[number - 1]?.metadata?.url
|
||||
}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||
citationRegex,
|
||||
(_, capturedContent: string) => {
|
||||
const numbers = capturedContent
|
||||
.split(',')
|
||||
.map((numStr) => numStr.trim());
|
||||
|
||||
const linksHtml = numbers
|
||||
.map((numStr) => {
|
||||
const number = parseInt(numStr);
|
||||
|
||||
if (isNaN(number) || number <= 0) {
|
||||
return `[${numStr}]`;
|
||||
}
|
||||
|
||||
const source = message.sources?.[number - 1];
|
||||
const url = source?.metadata?.url;
|
||||
|
||||
if (url) {
|
||||
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
|
||||
} else {
|
||||
return `[${numStr}]`;
|
||||
}
|
||||
})
|
||||
.join('');
|
||||
|
||||
return linksHtml;
|
||||
},
|
||||
),
|
||||
);
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,14 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import toml from '@iarna/toml';
|
||||
|
||||
// Use dynamic imports for Node.js modules to prevent client-side errors
|
||||
let fs: any;
|
||||
let path: any;
|
||||
if (typeof window === 'undefined') {
|
||||
// We're on the server
|
||||
fs = require('fs');
|
||||
path = require('path');
|
||||
}
|
||||
|
||||
const configFileName = 'config.toml';
|
||||
|
||||
interface Config {
|
||||
@ -25,6 +32,12 @@ interface Config {
|
||||
OLLAMA: {
|
||||
API_URL: string;
|
||||
};
|
||||
DEEPSEEK: {
|
||||
API_KEY: string;
|
||||
};
|
||||
LM_STUDIO: {
|
||||
API_URL: string;
|
||||
};
|
||||
CUSTOM_OPENAI: {
|
||||
API_URL: string;
|
||||
API_KEY: string;
|
||||
@ -40,10 +53,17 @@ type RecursivePartial<T> = {
|
||||
[P in keyof T]?: RecursivePartial<T[P]>;
|
||||
};
|
||||
|
||||
const loadConfig = () =>
|
||||
toml.parse(
|
||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||
) as any as Config;
|
||||
const loadConfig = () => {
|
||||
// Server-side only
|
||||
if (typeof window === 'undefined') {
|
||||
return toml.parse(
|
||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||
) as any as Config;
|
||||
}
|
||||
|
||||
// Client-side fallback - settings will be loaded via API
|
||||
return {} as Config;
|
||||
};
|
||||
|
||||
export const getSimilarityMeasure = () =>
|
||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||
@ -63,6 +83,8 @@ export const getSearxngApiEndpoint = () =>
|
||||
|
||||
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
|
||||
|
||||
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
|
||||
|
||||
export const getCustomOpenaiApiKey = () =>
|
||||
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
||||
|
||||
@ -72,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
|
||||
export const getCustomOpenaiModelName = () =>
|
||||
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
||||
|
||||
export const getLMStudioApiEndpoint = () =>
|
||||
loadConfig().MODELS.LM_STUDIO.API_URL;
|
||||
|
||||
const mergeConfigs = (current: any, update: any): any => {
|
||||
if (update === null || update === undefined) {
|
||||
return current;
|
||||
@ -104,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
|
||||
};
|
||||
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||
fs.writeFileSync(
|
||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||
toml.stringify(mergedConfig),
|
||||
);
|
||||
// Server-side only
|
||||
if (typeof window === 'undefined') {
|
||||
const currentConfig = loadConfig();
|
||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||
fs.writeFileSync(
|
||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||
toml.stringify(mergedConfig),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
@ -1,6 +1,11 @@
|
||||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import { ChatModel } from '.';
|
||||
import { getAnthropicApiKey } from '../config';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'anthropic',
|
||||
displayName: 'Anthropic',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const anthropicChatModels: Record<string, string>[] = [
|
||||
|
49
src/lib/providers/deepseek.ts
Normal file
49
src/lib/providers/deepseek.ts
Normal file
@ -0,0 +1,49 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { getDeepseekApiKey } from '../config';
|
||||
import { ChatModel } from '.';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'deepseek',
|
||||
displayName: 'Deepseek AI',
|
||||
};
|
||||
|
||||
const deepseekChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Deepseek Chat (Deepseek V3)',
|
||||
key: 'deepseek-chat',
|
||||
},
|
||||
{
|
||||
displayName: 'Deepseek Reasoner (Deepseek R1)',
|
||||
key: 'deepseek-reasoner',
|
||||
},
|
||||
];
|
||||
|
||||
export const loadDeepseekChatModels = async () => {
|
||||
const deepseekApiKey = getDeepseekApiKey();
|
||||
|
||||
if (!deepseekApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
deepseekChatModels.forEach((model) => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: deepseekApiKey,
|
||||
modelName: model.key,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: 'https://api.deepseek.com',
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Deepseek models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
@ -4,6 +4,11 @@ import {
|
||||
} from '@langchain/google-genai';
|
||||
import { getGeminiApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'gemini',
|
||||
displayName: 'Google Gemini',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
@ -40,8 +45,12 @@ const geminiChatModels: Record<string, string>[] = [
|
||||
|
||||
const geminiEmbeddingModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Gemini Embedding',
|
||||
key: 'gemini-embedding-exp',
|
||||
displayName: 'Text Embedding 004',
|
||||
key: 'models/text-embedding-004',
|
||||
},
|
||||
{
|
||||
displayName: 'Embedding 001',
|
||||
key: 'models/embedding-001',
|
||||
},
|
||||
];
|
||||
|
||||
|
@ -1,6 +1,11 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { getGroqApiKey } from '../config';
|
||||
import { ChatModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'groq',
|
||||
displayName: 'Groq',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const groqChatModels: Record<string, string>[] = [
|
||||
@ -72,6 +77,14 @@ const groqChatModels: Record<string, string>[] = [
|
||||
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
|
||||
key: 'llama-3.2-90b-vision-preview',
|
||||
},
|
||||
/* {
|
||||
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
|
||||
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
|
||||
}, */
|
||||
{
|
||||
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
|
||||
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
|
||||
},
|
||||
];
|
||||
|
||||
export const loadGroqChatModels = async () => {
|
||||
|
@ -1,17 +1,60 @@
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
|
||||
import {
|
||||
loadOpenAIChatModels,
|
||||
loadOpenAIEmbeddingModels,
|
||||
PROVIDER_INFO as OpenAIInfo,
|
||||
PROVIDER_INFO,
|
||||
} from './openai';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
|
||||
import { loadGroqChatModels } from './groq';
|
||||
import { loadAnthropicChatModels } from './anthropic';
|
||||
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
|
||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||
import {
|
||||
loadOllamaChatModels,
|
||||
loadOllamaEmbeddingModels,
|
||||
PROVIDER_INFO as OllamaInfo,
|
||||
} from './ollama';
|
||||
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
|
||||
import {
|
||||
loadAnthropicChatModels,
|
||||
PROVIDER_INFO as AnthropicInfo,
|
||||
} from './anthropic';
|
||||
import {
|
||||
loadGeminiChatModels,
|
||||
loadGeminiEmbeddingModels,
|
||||
PROVIDER_INFO as GeminiInfo,
|
||||
} from './gemini';
|
||||
import {
|
||||
loadTransformersEmbeddingsModels,
|
||||
PROVIDER_INFO as TransformersInfo,
|
||||
} from './transformers';
|
||||
import {
|
||||
loadDeepseekChatModels,
|
||||
PROVIDER_INFO as DeepseekInfo,
|
||||
} from './deepseek';
|
||||
import {
|
||||
loadLMStudioChatModels,
|
||||
loadLMStudioEmbeddingsModels,
|
||||
PROVIDER_INFO as LMStudioInfo,
|
||||
} from './lmstudio';
|
||||
|
||||
export const PROVIDER_METADATA = {
|
||||
openai: OpenAIInfo,
|
||||
ollama: OllamaInfo,
|
||||
groq: GroqInfo,
|
||||
anthropic: AnthropicInfo,
|
||||
gemini: GeminiInfo,
|
||||
transformers: TransformersInfo,
|
||||
deepseek: DeepseekInfo,
|
||||
lmstudio: LMStudioInfo,
|
||||
custom_openai: {
|
||||
key: 'custom_openai',
|
||||
displayName: 'Custom OpenAI',
|
||||
},
|
||||
};
|
||||
|
||||
export interface ChatModel {
|
||||
displayName: string;
|
||||
@ -32,6 +75,8 @@ export const chatModelProviders: Record<
|
||||
groq: loadGroqChatModels,
|
||||
anthropic: loadAnthropicChatModels,
|
||||
gemini: loadGeminiChatModels,
|
||||
deepseek: loadDeepseekChatModels,
|
||||
lmstudio: loadLMStudioChatModels,
|
||||
};
|
||||
|
||||
export const embeddingModelProviders: Record<
|
||||
@ -42,6 +87,7 @@ export const embeddingModelProviders: Record<
|
||||
ollama: loadOllamaEmbeddingModels,
|
||||
gemini: loadGeminiEmbeddingModels,
|
||||
transformers: loadTransformersEmbeddingsModels,
|
||||
lmstudio: loadLMStudioEmbeddingsModels,
|
||||
};
|
||||
|
||||
export const getAvailableChatModelProviders = async () => {
|
||||
|
100
src/lib/providers/lmstudio.ts
Normal file
100
src/lib/providers/lmstudio.ts
Normal file
@ -0,0 +1,100 @@
|
||||
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
|
||||
import axios from 'axios';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'lmstudio',
|
||||
displayName: 'LM Studio',
|
||||
};
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
interface LMStudioModel {
|
||||
id: string;
|
||||
name?: string;
|
||||
}
|
||||
|
||||
const ensureV1Endpoint = (endpoint: string): string =>
|
||||
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
||||
|
||||
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
|
||||
try {
|
||||
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioChatModels = async () => {
|
||||
const endpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!endpoint) return {};
|
||||
if (!(await checkServerAvailability(endpoint))) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
response.data.data.forEach((model: LMStudioModel) => {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
streaming: true,
|
||||
maxRetries: 3,
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading LM Studio models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioEmbeddingsModels = async () => {
|
||||
const endpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!endpoint) return {};
|
||||
if (!(await checkServerAvailability(endpoint))) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
const embeddingsModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
response.data.data.forEach((model: LMStudioModel) => {
|
||||
embeddingsModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
modelName: model.id,
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
});
|
||||
|
||||
return embeddingsModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
@ -1,6 +1,11 @@
|
||||
import axios from 'axios';
|
||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'ollama',
|
||||
displayName: 'Ollama',
|
||||
};
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
|
||||
|
@ -1,6 +1,11 @@
|
||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getOpenaiApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'openai',
|
||||
displayName: 'OpenAI',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
@ -25,6 +30,18 @@ const openaiChatModels: Record<string, string>[] = [
|
||||
displayName: 'GPT-4 omni mini',
|
||||
key: 'gpt-4o-mini',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1 nano',
|
||||
key: 'gpt-4.1-nano',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1 mini',
|
||||
key: 'gpt-4.1-mini',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1',
|
||||
key: 'gpt-4.1',
|
||||
},
|
||||
];
|
||||
|
||||
const openaiEmbeddingModels: Record<string, string>[] = [
|
||||
|
@ -1,5 +1,10 @@
|
||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'transformers',
|
||||
displayName: 'Hugging Face',
|
||||
};
|
||||
|
||||
export const loadTransformersEmbeddingsModels = async () => {
|
||||
try {
|
||||
const embeddingModels = {
|
||||
|
@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
||||
const splittedText = await splitter.splitText(parsedText);
|
||||
const title = res.data
|
||||
.toString('utf8')
|
||||
.match(/<title>(.*?)<\/title>/)?.[1];
|
||||
.match(/<title.*>(.*?)<\/title>/)?.[1];
|
||||
|
||||
const linkDocs = splittedText.map((text) => {
|
||||
return new Document({
|
||||
|
Reference in New Issue
Block a user