mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-18 15:21:33 +00:00
Compare commits
6 Commits
feat/model
...
114a7aa09d
Author | SHA1 | Date | |
---|---|---|---|
|
114a7aa09d | ||
|
d0ba8c9038 | ||
|
934fb0a23b | ||
|
8ecf3b4e99 | ||
|
b5ee8386e7 | ||
|
0fcd598ff7 |
@@ -159,7 +159,6 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
|
|||||||
|
|
||||||
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
||||||
[](https://repocloud.io/details/?app_id=267)
|
[](https://repocloud.io/details/?app_id=267)
|
||||||
[](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
|
|
||||||
|
|
||||||
## Upcoming Features
|
## Upcoming Features
|
||||||
|
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "perplexica-frontend",
|
"name": "perplexica-frontend",
|
||||||
"version": "1.10.2",
|
"version": "1.10.1",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"author": "ItzCrazyKns",
|
"author": "ItzCrazyKns",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
@@ -25,8 +25,5 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
|||||||
[MODELS.DEEPSEEK]
|
[MODELS.DEEPSEEK]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
|
|
||||||
[MODELS.LM_STUDIO]
|
|
||||||
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
@@ -8,7 +8,6 @@ import {
|
|||||||
getOllamaApiEndpoint,
|
getOllamaApiEndpoint,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
getDeepseekApiKey,
|
getDeepseekApiKey,
|
||||||
getLMStudioApiEndpoint,
|
|
||||||
updateConfig,
|
updateConfig,
|
||||||
} from '@/lib/config';
|
} from '@/lib/config';
|
||||||
import {
|
import {
|
||||||
@@ -52,7 +51,6 @@ export const GET = async (req: Request) => {
|
|||||||
|
|
||||||
config['openaiApiKey'] = getOpenaiApiKey();
|
config['openaiApiKey'] = getOpenaiApiKey();
|
||||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||||
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
|
||||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
config['geminiApiKey'] = getGeminiApiKey();
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
@@ -95,9 +93,6 @@ export const POST = async (req: Request) => {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: config.deepseekApiKey,
|
API_KEY: config.deepseekApiKey,
|
||||||
},
|
},
|
||||||
LM_STUDIO: {
|
|
||||||
API_URL: config.lmStudioApiUrl,
|
|
||||||
},
|
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: config.customOpenaiApiUrl,
|
API_URL: config.customOpenaiApiUrl,
|
||||||
API_KEY: config.customOpenaiApiKey,
|
API_KEY: config.customOpenaiApiKey,
|
||||||
|
@@ -7,7 +7,6 @@ import { Switch } from '@headlessui/react';
|
|||||||
import ThemeSwitcher from '@/components/theme/Switcher';
|
import ThemeSwitcher from '@/components/theme/Switcher';
|
||||||
import { ImagesIcon, VideoIcon } from 'lucide-react';
|
import { ImagesIcon, VideoIcon } from 'lucide-react';
|
||||||
import Link from 'next/link';
|
import Link from 'next/link';
|
||||||
import { PROVIDER_METADATA } from '@/lib/providers';
|
|
||||||
|
|
||||||
interface SettingsType {
|
interface SettingsType {
|
||||||
chatModelProviders: {
|
chatModelProviders: {
|
||||||
@@ -21,7 +20,6 @@ interface SettingsType {
|
|||||||
anthropicApiKey: string;
|
anthropicApiKey: string;
|
||||||
geminiApiKey: string;
|
geminiApiKey: string;
|
||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
lmStudioApiUrl: string;
|
|
||||||
deepseekApiKey: string;
|
deepseekApiKey: string;
|
||||||
customOpenaiApiKey: string;
|
customOpenaiApiKey: string;
|
||||||
customOpenaiApiUrl: string;
|
customOpenaiApiUrl: string;
|
||||||
@@ -550,7 +548,6 @@ const Page = () => {
|
|||||||
(provider) => ({
|
(provider) => ({
|
||||||
value: provider,
|
value: provider,
|
||||||
label:
|
label:
|
||||||
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
|
||||||
provider.charAt(0).toUpperCase() +
|
provider.charAt(0).toUpperCase() +
|
||||||
provider.slice(1),
|
provider.slice(1),
|
||||||
}),
|
}),
|
||||||
@@ -693,7 +690,6 @@ const Page = () => {
|
|||||||
(provider) => ({
|
(provider) => ({
|
||||||
value: provider,
|
value: provider,
|
||||||
label:
|
label:
|
||||||
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
|
||||||
provider.charAt(0).toUpperCase() +
|
provider.charAt(0).toUpperCase() +
|
||||||
provider.slice(1),
|
provider.slice(1),
|
||||||
}),
|
}),
|
||||||
@@ -862,25 +858,6 @@ const Page = () => {
|
|||||||
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="flex flex-col space-y-1">
|
|
||||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
|
||||||
LM Studio API URL
|
|
||||||
</p>
|
|
||||||
<Input
|
|
||||||
type="text"
|
|
||||||
placeholder="LM Studio API URL"
|
|
||||||
value={config.lmStudioApiUrl}
|
|
||||||
isSaving={savingStates['lmStudioApiUrl']}
|
|
||||||
onChange={(e) => {
|
|
||||||
setConfig((prev) => ({
|
|
||||||
...prev!,
|
|
||||||
lmStudioApiUrl: e.target.value,
|
|
||||||
}));
|
|
||||||
}}
|
|
||||||
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
</SettingsSection>
|
</SettingsSection>
|
||||||
</div>
|
</div>
|
||||||
|
@@ -363,7 +363,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
|||||||
|
|
||||||
if (data.type === 'sources') {
|
if (data.type === 'sources') {
|
||||||
sources = data.data;
|
sources = data.data;
|
||||||
if (!added) {
|
|
||||||
setMessages((prevMessages) => [
|
setMessages((prevMessages) => [
|
||||||
...prevMessages,
|
...prevMessages,
|
||||||
{
|
{
|
||||||
@@ -376,7 +375,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
|||||||
},
|
},
|
||||||
]);
|
]);
|
||||||
added = true;
|
added = true;
|
||||||
}
|
|
||||||
setMessageAppeared(true);
|
setMessageAppeared(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -394,8 +392,8 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
|||||||
},
|
},
|
||||||
]);
|
]);
|
||||||
added = true;
|
added = true;
|
||||||
}
|
setMessageAppeared(true);
|
||||||
|
} else {
|
||||||
setMessages((prev) =>
|
setMessages((prev) =>
|
||||||
prev.map((message) => {
|
prev.map((message) => {
|
||||||
if (message.messageId === data.messageId) {
|
if (message.messageId === data.messageId) {
|
||||||
@@ -405,9 +403,9 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
|||||||
return message;
|
return message;
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
|
||||||
recievedMessage += data.data;
|
recievedMessage += data.data;
|
||||||
setMessageAppeared(true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data.type === 'messageEnd') {
|
if (data.type === 'messageEnd') {
|
||||||
|
@@ -97,7 +97,6 @@ const MessageBox = ({
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
setSpeechMessage(message.content.replace(regex, ''));
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,14 +1,7 @@
|
|||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
import toml from '@iarna/toml';
|
import toml from '@iarna/toml';
|
||||||
|
|
||||||
// Use dynamic imports for Node.js modules to prevent client-side errors
|
|
||||||
let fs: any;
|
|
||||||
let path: any;
|
|
||||||
if (typeof window === 'undefined') {
|
|
||||||
// We're on the server
|
|
||||||
fs = require('fs');
|
|
||||||
path = require('path');
|
|
||||||
}
|
|
||||||
|
|
||||||
const configFileName = 'config.toml';
|
const configFileName = 'config.toml';
|
||||||
|
|
||||||
interface Config {
|
interface Config {
|
||||||
@@ -35,9 +28,6 @@ interface Config {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
};
|
};
|
||||||
LM_STUDIO: {
|
|
||||||
API_URL: string;
|
|
||||||
};
|
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
@@ -53,17 +43,10 @@ type RecursivePartial<T> = {
|
|||||||
[P in keyof T]?: RecursivePartial<T[P]>;
|
[P in keyof T]?: RecursivePartial<T[P]>;
|
||||||
};
|
};
|
||||||
|
|
||||||
const loadConfig = () => {
|
const loadConfig = () =>
|
||||||
// Server-side only
|
toml.parse(
|
||||||
if (typeof window === 'undefined') {
|
|
||||||
return toml.parse(
|
|
||||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||||
) as any as Config;
|
) as any as Config;
|
||||||
}
|
|
||||||
|
|
||||||
// Client-side fallback - settings will be loaded via API
|
|
||||||
return {} as Config;
|
|
||||||
};
|
|
||||||
|
|
||||||
export const getSimilarityMeasure = () =>
|
export const getSimilarityMeasure = () =>
|
||||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||||
@@ -94,9 +77,6 @@ export const getCustomOpenaiApiUrl = () =>
|
|||||||
export const getCustomOpenaiModelName = () =>
|
export const getCustomOpenaiModelName = () =>
|
||||||
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
||||||
|
|
||||||
export const getLMStudioApiEndpoint = () =>
|
|
||||||
loadConfig().MODELS.LM_STUDIO.API_URL;
|
|
||||||
|
|
||||||
const mergeConfigs = (current: any, update: any): any => {
|
const mergeConfigs = (current: any, update: any): any => {
|
||||||
if (update === null || update === undefined) {
|
if (update === null || update === undefined) {
|
||||||
return current;
|
return current;
|
||||||
@@ -129,13 +109,10 @@ const mergeConfigs = (current: any, update: any): any => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||||
// Server-side only
|
|
||||||
if (typeof window === 'undefined') {
|
|
||||||
const currentConfig = loadConfig();
|
const currentConfig = loadConfig();
|
||||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||||
fs.writeFileSync(
|
fs.writeFileSync(
|
||||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||||
toml.stringify(mergedConfig),
|
toml.stringify(mergedConfig),
|
||||||
);
|
);
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
@@ -1,11 +1,6 @@
|
|||||||
import { ChatAnthropic } from '@langchain/anthropic';
|
import { ChatAnthropic } from '@langchain/anthropic';
|
||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
import { getAnthropicApiKey } from '../config';
|
import { getAnthropicApiKey } from '../config';
|
||||||
|
|
||||||
export const PROVIDER_INFO = {
|
|
||||||
key: 'anthropic',
|
|
||||||
displayName: 'Anthropic',
|
|
||||||
};
|
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
const anthropicChatModels: Record<string, string>[] = [
|
const anthropicChatModels: Record<string, string>[] = [
|
||||||
|
@@ -3,11 +3,6 @@ import { getDeepseekApiKey } from '../config';
|
|||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
export const PROVIDER_INFO = {
|
|
||||||
key: 'deepseek',
|
|
||||||
displayName: 'Deepseek AI',
|
|
||||||
};
|
|
||||||
|
|
||||||
const deepseekChatModels: Record<string, string>[] = [
|
const deepseekChatModels: Record<string, string>[] = [
|
||||||
{
|
{
|
||||||
displayName: 'Deepseek Chat (Deepseek V3)',
|
displayName: 'Deepseek Chat (Deepseek V3)',
|
||||||
|
@@ -4,11 +4,6 @@ import {
|
|||||||
} from '@langchain/google-genai';
|
} from '@langchain/google-genai';
|
||||||
import { getGeminiApiKey } from '../config';
|
import { getGeminiApiKey } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
export const PROVIDER_INFO = {
|
|
||||||
key: 'gemini',
|
|
||||||
displayName: 'Google Gemini',
|
|
||||||
};
|
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
@@ -45,12 +40,8 @@ const geminiChatModels: Record<string, string>[] = [
|
|||||||
|
|
||||||
const geminiEmbeddingModels: Record<string, string>[] = [
|
const geminiEmbeddingModels: Record<string, string>[] = [
|
||||||
{
|
{
|
||||||
displayName: 'Text Embedding 004',
|
displayName: 'Gemini Embedding',
|
||||||
key: 'models/text-embedding-004',
|
key: 'gemini-embedding-exp',
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Embedding 001',
|
|
||||||
key: 'models/embedding-001',
|
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@@ -1,11 +1,6 @@
|
|||||||
import { ChatOpenAI } from '@langchain/openai';
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
import { getGroqApiKey } from '../config';
|
import { getGroqApiKey } from '../config';
|
||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
|
|
||||||
export const PROVIDER_INFO = {
|
|
||||||
key: 'groq',
|
|
||||||
displayName: 'Groq',
|
|
||||||
};
|
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
const groqChatModels: Record<string, string>[] = [
|
const groqChatModels: Record<string, string>[] = [
|
||||||
@@ -77,14 +72,6 @@ const groqChatModels: Record<string, string>[] = [
|
|||||||
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
|
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
|
||||||
key: 'llama-3.2-90b-vision-preview',
|
key: 'llama-3.2-90b-vision-preview',
|
||||||
},
|
},
|
||||||
/* {
|
|
||||||
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
|
|
||||||
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
|
|
||||||
}, */
|
|
||||||
{
|
|
||||||
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
|
|
||||||
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
|
|
||||||
},
|
|
||||||
];
|
];
|
||||||
|
|
||||||
export const loadGroqChatModels = async () => {
|
export const loadGroqChatModels = async () => {
|
||||||
|
@@ -1,60 +1,18 @@
|
|||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import {
|
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
|
||||||
loadOpenAIChatModels,
|
|
||||||
loadOpenAIEmbeddingModels,
|
|
||||||
PROVIDER_INFO as OpenAIInfo,
|
|
||||||
PROVIDER_INFO,
|
|
||||||
} from './openai';
|
|
||||||
import {
|
import {
|
||||||
getCustomOpenaiApiKey,
|
getCustomOpenaiApiKey,
|
||||||
getCustomOpenaiApiUrl,
|
getCustomOpenaiApiUrl,
|
||||||
getCustomOpenaiModelName,
|
getCustomOpenaiModelName,
|
||||||
} from '../config';
|
} from '../config';
|
||||||
import { ChatOpenAI } from '@langchain/openai';
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
import {
|
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
|
||||||
loadOllamaChatModels,
|
import { loadGroqChatModels } from './groq';
|
||||||
loadOllamaEmbeddingModels,
|
import { loadAnthropicChatModels } from './anthropic';
|
||||||
PROVIDER_INFO as OllamaInfo,
|
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
|
||||||
} from './ollama';
|
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||||
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
|
import { loadDeepseekChatModels } from './deepseek';
|
||||||
import {
|
|
||||||
loadAnthropicChatModels,
|
|
||||||
PROVIDER_INFO as AnthropicInfo,
|
|
||||||
} from './anthropic';
|
|
||||||
import {
|
|
||||||
loadGeminiChatModels,
|
|
||||||
loadGeminiEmbeddingModels,
|
|
||||||
PROVIDER_INFO as GeminiInfo,
|
|
||||||
} from './gemini';
|
|
||||||
import {
|
|
||||||
loadTransformersEmbeddingsModels,
|
|
||||||
PROVIDER_INFO as TransformersInfo,
|
|
||||||
} from './transformers';
|
|
||||||
import {
|
|
||||||
loadDeepseekChatModels,
|
|
||||||
PROVIDER_INFO as DeepseekInfo,
|
|
||||||
} from './deepseek';
|
|
||||||
import {
|
|
||||||
loadLMStudioChatModels,
|
|
||||||
loadLMStudioEmbeddingsModels,
|
|
||||||
PROVIDER_INFO as LMStudioInfo,
|
|
||||||
} from './lmstudio';
|
|
||||||
|
|
||||||
export const PROVIDER_METADATA = {
|
|
||||||
openai: OpenAIInfo,
|
|
||||||
ollama: OllamaInfo,
|
|
||||||
groq: GroqInfo,
|
|
||||||
anthropic: AnthropicInfo,
|
|
||||||
gemini: GeminiInfo,
|
|
||||||
transformers: TransformersInfo,
|
|
||||||
deepseek: DeepseekInfo,
|
|
||||||
lmstudio: LMStudioInfo,
|
|
||||||
custom_openai: {
|
|
||||||
key: 'custom_openai',
|
|
||||||
displayName: 'Custom OpenAI',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export interface ChatModel {
|
export interface ChatModel {
|
||||||
displayName: string;
|
displayName: string;
|
||||||
@@ -76,7 +34,6 @@ export const chatModelProviders: Record<
|
|||||||
anthropic: loadAnthropicChatModels,
|
anthropic: loadAnthropicChatModels,
|
||||||
gemini: loadGeminiChatModels,
|
gemini: loadGeminiChatModels,
|
||||||
deepseek: loadDeepseekChatModels,
|
deepseek: loadDeepseekChatModels,
|
||||||
lmstudio: loadLMStudioChatModels,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export const embeddingModelProviders: Record<
|
export const embeddingModelProviders: Record<
|
||||||
@@ -87,7 +44,6 @@ export const embeddingModelProviders: Record<
|
|||||||
ollama: loadOllamaEmbeddingModels,
|
ollama: loadOllamaEmbeddingModels,
|
||||||
gemini: loadGeminiEmbeddingModels,
|
gemini: loadGeminiEmbeddingModels,
|
||||||
transformers: loadTransformersEmbeddingsModels,
|
transformers: loadTransformersEmbeddingsModels,
|
||||||
lmstudio: loadLMStudioEmbeddingsModels,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getAvailableChatModelProviders = async () => {
|
export const getAvailableChatModelProviders = async () => {
|
||||||
|
@@ -1,100 +0,0 @@
|
|||||||
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
|
|
||||||
import axios from 'axios';
|
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
|
||||||
|
|
||||||
export const PROVIDER_INFO = {
|
|
||||||
key: 'lmstudio',
|
|
||||||
displayName: 'LM Studio',
|
|
||||||
};
|
|
||||||
import { ChatOpenAI } from '@langchain/openai';
|
|
||||||
import { OpenAIEmbeddings } from '@langchain/openai';
|
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
||||||
import { Embeddings } from '@langchain/core/embeddings';
|
|
||||||
|
|
||||||
interface LMStudioModel {
|
|
||||||
id: string;
|
|
||||||
name?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
const ensureV1Endpoint = (endpoint: string): string =>
|
|
||||||
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
|
||||||
|
|
||||||
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
|
|
||||||
try {
|
|
||||||
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
});
|
|
||||||
return true;
|
|
||||||
} catch {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
export const loadLMStudioChatModels = async () => {
|
|
||||||
const endpoint = getLMStudioApiEndpoint();
|
|
||||||
|
|
||||||
if (!endpoint) return {};
|
|
||||||
if (!(await checkServerAvailability(endpoint))) return {};
|
|
||||||
|
|
||||||
try {
|
|
||||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
});
|
|
||||||
|
|
||||||
const chatModels: Record<string, ChatModel> = {};
|
|
||||||
|
|
||||||
response.data.data.forEach((model: LMStudioModel) => {
|
|
||||||
chatModels[model.id] = {
|
|
||||||
displayName: model.name || model.id,
|
|
||||||
model: new ChatOpenAI({
|
|
||||||
openAIApiKey: 'lm-studio',
|
|
||||||
configuration: {
|
|
||||||
baseURL: ensureV1Endpoint(endpoint),
|
|
||||||
},
|
|
||||||
modelName: model.id,
|
|
||||||
temperature: 0.7,
|
|
||||||
streaming: true,
|
|
||||||
maxRetries: 3,
|
|
||||||
}) as unknown as BaseChatModel,
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
return chatModels;
|
|
||||||
} catch (err) {
|
|
||||||
console.error(`Error loading LM Studio models: ${err}`);
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
export const loadLMStudioEmbeddingsModels = async () => {
|
|
||||||
const endpoint = getLMStudioApiEndpoint();
|
|
||||||
|
|
||||||
if (!endpoint) return {};
|
|
||||||
if (!(await checkServerAvailability(endpoint))) return {};
|
|
||||||
|
|
||||||
try {
|
|
||||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
});
|
|
||||||
|
|
||||||
const embeddingsModels: Record<string, EmbeddingModel> = {};
|
|
||||||
|
|
||||||
response.data.data.forEach((model: LMStudioModel) => {
|
|
||||||
embeddingsModels[model.id] = {
|
|
||||||
displayName: model.name || model.id,
|
|
||||||
model: new OpenAIEmbeddings({
|
|
||||||
openAIApiKey: 'lm-studio',
|
|
||||||
configuration: {
|
|
||||||
baseURL: ensureV1Endpoint(endpoint),
|
|
||||||
},
|
|
||||||
modelName: model.id,
|
|
||||||
}) as unknown as Embeddings,
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
return embeddingsModels;
|
|
||||||
} catch (err) {
|
|
||||||
console.error(`Error loading LM Studio embeddings model: ${err}`);
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
};
|
|
@@ -1,11 +1,6 @@
|
|||||||
import axios from 'axios';
|
import axios from 'axios';
|
||||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
export const PROVIDER_INFO = {
|
|
||||||
key: 'ollama',
|
|
||||||
displayName: 'Ollama',
|
|
||||||
};
|
|
||||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||||
|
|
||||||
|
@@ -1,11 +1,6 @@
|
|||||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||||
import { getOpenaiApiKey } from '../config';
|
import { getOpenaiApiKey } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
export const PROVIDER_INFO = {
|
|
||||||
key: 'openai',
|
|
||||||
displayName: 'OpenAI',
|
|
||||||
};
|
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
@@ -30,18 +25,6 @@ const openaiChatModels: Record<string, string>[] = [
|
|||||||
displayName: 'GPT-4 omni mini',
|
displayName: 'GPT-4 omni mini',
|
||||||
key: 'gpt-4o-mini',
|
key: 'gpt-4o-mini',
|
||||||
},
|
},
|
||||||
{
|
|
||||||
displayName: 'GPT 4.1 nano',
|
|
||||||
key: 'gpt-4.1-nano',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'GPT 4.1 mini',
|
|
||||||
key: 'gpt-4.1-mini',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'GPT 4.1',
|
|
||||||
key: 'gpt-4.1',
|
|
||||||
},
|
|
||||||
];
|
];
|
||||||
|
|
||||||
const openaiEmbeddingModels: Record<string, string>[] = [
|
const openaiEmbeddingModels: Record<string, string>[] = [
|
||||||
|
@@ -1,10 +1,5 @@
|
|||||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
||||||
|
|
||||||
export const PROVIDER_INFO = {
|
|
||||||
key: 'transformers',
|
|
||||||
displayName: 'Hugging Face',
|
|
||||||
};
|
|
||||||
|
|
||||||
export const loadTransformersEmbeddingsModels = async () => {
|
export const loadTransformersEmbeddingsModels = async () => {
|
||||||
try {
|
try {
|
||||||
const embeddingModels = {
|
const embeddingModels = {
|
||||||
|
@@ -6,11 +6,6 @@ import {
|
|||||||
MessagesPlaceholder,
|
MessagesPlaceholder,
|
||||||
PromptTemplate,
|
PromptTemplate,
|
||||||
} from '@langchain/core/prompts';
|
} from '@langchain/core/prompts';
|
||||||
import {
|
|
||||||
RunnableLambda,
|
|
||||||
RunnableMap,
|
|
||||||
RunnableSequence,
|
|
||||||
} from '@langchain/core/runnables';
|
|
||||||
import { BaseMessage } from '@langchain/core/messages';
|
import { BaseMessage } from '@langchain/core/messages';
|
||||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||||
import LineListOutputParser from '../outputParsers/listLineOutputParser';
|
import LineListOutputParser from '../outputParsers/listLineOutputParser';
|
||||||
@@ -24,6 +19,7 @@ import computeSimilarity from '../utils/computeSimilarity';
|
|||||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||||
import eventEmitter from 'events';
|
import eventEmitter from 'events';
|
||||||
import { StreamEvent } from '@langchain/core/tracers/log_stream';
|
import { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||||
|
import { EventEmitter } from 'node:stream';
|
||||||
|
|
||||||
export interface MetaSearchAgentType {
|
export interface MetaSearchAgentType {
|
||||||
searchAndAnswer: (
|
searchAndAnswer: (
|
||||||
@@ -47,7 +43,7 @@ interface Config {
|
|||||||
activeEngines: string[];
|
activeEngines: string[];
|
||||||
}
|
}
|
||||||
|
|
||||||
type BasicChainInput = {
|
type SearchInput = {
|
||||||
chat_history: BaseMessage[];
|
chat_history: BaseMessage[];
|
||||||
query: string;
|
query: string;
|
||||||
};
|
};
|
||||||
@@ -60,14 +56,25 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
this.config = config;
|
this.config = config;
|
||||||
}
|
}
|
||||||
|
|
||||||
private async createSearchRetrieverChain(llm: BaseChatModel) {
|
private async searchSources(
|
||||||
|
llm: BaseChatModel,
|
||||||
|
input: SearchInput,
|
||||||
|
emitter: EventEmitter,
|
||||||
|
) {
|
||||||
(llm as unknown as ChatOpenAI).temperature = 0;
|
(llm as unknown as ChatOpenAI).temperature = 0;
|
||||||
|
|
||||||
return RunnableSequence.from([
|
const chatPrompt = PromptTemplate.fromTemplate(
|
||||||
PromptTemplate.fromTemplate(this.config.queryGeneratorPrompt),
|
this.config.queryGeneratorPrompt,
|
||||||
llm,
|
);
|
||||||
this.strParser,
|
|
||||||
RunnableLambda.from(async (input: string) => {
|
const processedChatPrompt = await chatPrompt.invoke({
|
||||||
|
chat_history: formatChatHistoryAsString(input.chat_history),
|
||||||
|
query: input.query,
|
||||||
|
});
|
||||||
|
|
||||||
|
const llmRes = await llm.invoke(processedChatPrompt);
|
||||||
|
const messageStr = await this.strParser.invoke(llmRes);
|
||||||
|
|
||||||
const linksOutputParser = new LineListOutputParser({
|
const linksOutputParser = new LineListOutputParser({
|
||||||
key: 'links',
|
key: 'links',
|
||||||
});
|
});
|
||||||
@@ -76,10 +83,10 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
key: 'question',
|
key: 'question',
|
||||||
});
|
});
|
||||||
|
|
||||||
const links = await linksOutputParser.parse(input);
|
const links = await linksOutputParser.parse(messageStr);
|
||||||
let question = this.config.summarizer
|
let question = this.config.summarizer
|
||||||
? await questionOutputParser.parse(input)
|
? await questionOutputParser.parse(messageStr)
|
||||||
: input;
|
: messageStr;
|
||||||
|
|
||||||
if (question === 'not_needed') {
|
if (question === 'not_needed') {
|
||||||
return { query: '', docs: [] };
|
return { query: '', docs: [] };
|
||||||
@@ -99,8 +106,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
linkDocs.map((doc) => {
|
linkDocs.map((doc) => {
|
||||||
const URLDocExists = docGroups.find(
|
const URLDocExists = docGroups.find(
|
||||||
(d) =>
|
(d) =>
|
||||||
d.metadata.url === doc.metadata.url &&
|
d.metadata.url === doc.metadata.url && d.metadata.totalDocs < 10,
|
||||||
d.metadata.totalDocs < 10,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!URLDocExists) {
|
if (!URLDocExists) {
|
||||||
@@ -115,8 +121,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
|
|
||||||
const docIndex = docGroups.findIndex(
|
const docIndex = docGroups.findIndex(
|
||||||
(d) =>
|
(d) =>
|
||||||
d.metadata.url === doc.metadata.url &&
|
d.metadata.url === doc.metadata.url && d.metadata.totalDocs < 10,
|
||||||
d.metadata.totalDocs < 10,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if (docIndex !== -1) {
|
if (docIndex !== -1) {
|
||||||
@@ -228,42 +233,31 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
|
|
||||||
return { query: question, docs: documents };
|
return { query: question, docs: documents };
|
||||||
}
|
}
|
||||||
}),
|
|
||||||
]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private async createAnsweringChain(
|
private async streamAnswer(
|
||||||
llm: BaseChatModel,
|
llm: BaseChatModel,
|
||||||
fileIds: string[],
|
fileIds: string[],
|
||||||
embeddings: Embeddings,
|
embeddings: Embeddings,
|
||||||
optimizationMode: 'speed' | 'balanced' | 'quality',
|
optimizationMode: 'speed' | 'balanced' | 'quality',
|
||||||
systemInstructions: string,
|
systemInstructions: string,
|
||||||
|
input: SearchInput,
|
||||||
|
emitter: EventEmitter,
|
||||||
) {
|
) {
|
||||||
return RunnableSequence.from([
|
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||||
RunnableMap.from({
|
['system', this.config.responsePrompt],
|
||||||
systemInstructions: () => systemInstructions,
|
new MessagesPlaceholder('chat_history'),
|
||||||
query: (input: BasicChainInput) => input.query,
|
['user', '{query}'],
|
||||||
chat_history: (input: BasicChainInput) => input.chat_history,
|
]);
|
||||||
date: () => new Date().toISOString(),
|
|
||||||
context: RunnableLambda.from(async (input: BasicChainInput) => {
|
|
||||||
const processedHistory = formatChatHistoryAsString(
|
|
||||||
input.chat_history,
|
|
||||||
);
|
|
||||||
|
|
||||||
let docs: Document[] | null = null;
|
let docs: Document[] | null = null;
|
||||||
let query = input.query;
|
let query = input.query;
|
||||||
|
|
||||||
if (this.config.searchWeb) {
|
if (this.config.searchWeb) {
|
||||||
const searchRetrieverChain =
|
const searchResults = await this.searchSources(llm, input, emitter);
|
||||||
await this.createSearchRetrieverChain(llm);
|
|
||||||
|
|
||||||
const searchRetrieverResult = await searchRetrieverChain.invoke({
|
query = searchResults.query;
|
||||||
chat_history: processedHistory,
|
docs = searchResults.docs;
|
||||||
query,
|
|
||||||
});
|
|
||||||
|
|
||||||
query = searchRetrieverResult.query;
|
|
||||||
docs = searchRetrieverResult.docs;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const sortedDocs = await this.rerankDocs(
|
const sortedDocs = await this.rerankDocs(
|
||||||
@@ -274,23 +268,30 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
optimizationMode,
|
optimizationMode,
|
||||||
);
|
);
|
||||||
|
|
||||||
return sortedDocs;
|
emitter.emit('data', JSON.stringify({ type: 'sources', data: sortedDocs }));
|
||||||
})
|
|
||||||
.withConfig({
|
const context = this.processDocs(sortedDocs);
|
||||||
runName: 'FinalSourceRetriever',
|
|
||||||
})
|
const formattedChatPrompt = await chatPrompt.invoke({
|
||||||
.pipe(this.processDocs),
|
query: input.query,
|
||||||
}),
|
chat_history: input.chat_history,
|
||||||
ChatPromptTemplate.fromMessages([
|
date: new Date().toISOString(),
|
||||||
['system', this.config.responsePrompt],
|
context: context,
|
||||||
new MessagesPlaceholder('chat_history'),
|
systemInstructions: systemInstructions,
|
||||||
['user', '{query}'],
|
|
||||||
]),
|
|
||||||
llm,
|
|
||||||
this.strParser,
|
|
||||||
]).withConfig({
|
|
||||||
runName: 'FinalResponseGenerator',
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const llmRes = await llm.stream(formattedChatPrompt);
|
||||||
|
|
||||||
|
for await (const data of llmRes) {
|
||||||
|
const messageStr = await this.strParser.invoke(data);
|
||||||
|
|
||||||
|
emitter.emit(
|
||||||
|
'data',
|
||||||
|
JSON.stringify({ type: 'response', data: messageStr }),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
emitter.emit('end');
|
||||||
}
|
}
|
||||||
|
|
||||||
private async rerankDocs(
|
private async rerankDocs(
|
||||||
@@ -431,39 +432,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
.join('\n');
|
.join('\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
private async handleStream(
|
|
||||||
stream: AsyncGenerator<StreamEvent, any, any>,
|
|
||||||
emitter: eventEmitter,
|
|
||||||
) {
|
|
||||||
for await (const event of stream) {
|
|
||||||
if (
|
|
||||||
event.event === 'on_chain_end' &&
|
|
||||||
event.name === 'FinalSourceRetriever'
|
|
||||||
) {
|
|
||||||
``;
|
|
||||||
emitter.emit(
|
|
||||||
'data',
|
|
||||||
JSON.stringify({ type: 'sources', data: event.data.output }),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if (
|
|
||||||
event.event === 'on_chain_stream' &&
|
|
||||||
event.name === 'FinalResponseGenerator'
|
|
||||||
) {
|
|
||||||
emitter.emit(
|
|
||||||
'data',
|
|
||||||
JSON.stringify({ type: 'response', data: event.data.chunk }),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if (
|
|
||||||
event.event === 'on_chain_end' &&
|
|
||||||
event.name === 'FinalResponseGenerator'
|
|
||||||
) {
|
|
||||||
emitter.emit('end');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async searchAndAnswer(
|
async searchAndAnswer(
|
||||||
message: string,
|
message: string,
|
||||||
history: BaseMessage[],
|
history: BaseMessage[],
|
||||||
@@ -475,26 +443,19 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
) {
|
) {
|
||||||
const emitter = new eventEmitter();
|
const emitter = new eventEmitter();
|
||||||
|
|
||||||
const answeringChain = await this.createAnsweringChain(
|
this.streamAnswer(
|
||||||
llm,
|
llm,
|
||||||
fileIds,
|
fileIds,
|
||||||
embeddings,
|
embeddings,
|
||||||
optimizationMode,
|
optimizationMode,
|
||||||
systemInstructions,
|
systemInstructions,
|
||||||
);
|
|
||||||
|
|
||||||
const stream = answeringChain.streamEvents(
|
|
||||||
{
|
{
|
||||||
chat_history: history,
|
chat_history: history,
|
||||||
query: message,
|
query: message,
|
||||||
},
|
},
|
||||||
{
|
emitter,
|
||||||
version: 'v1',
|
|
||||||
},
|
|
||||||
);
|
);
|
||||||
|
|
||||||
this.handleStream(stream, emitter);
|
|
||||||
|
|
||||||
return emitter;
|
return emitter;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
|||||||
const splittedText = await splitter.splitText(parsedText);
|
const splittedText = await splitter.splitText(parsedText);
|
||||||
const title = res.data
|
const title = res.data
|
||||||
.toString('utf8')
|
.toString('utf8')
|
||||||
.match(/<title.*>(.*?)<\/title>/)?.[1];
|
.match(/<title>(.*?)<\/title>/)?.[1];
|
||||||
|
|
||||||
const linkDocs = splittedText.map((text) => {
|
const linkDocs = splittedText.map((text) => {
|
||||||
return new Document({
|
return new Document({
|
||||||
|
Reference in New Issue
Block a user