mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-05-02 09:12:37 +00:00
Compare commits
12 Commits
f790457376
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
68e151b2bd | ||
|
06ff272541 | ||
|
4154d5e4b1 | ||
|
1862491496 | ||
|
073b5e897c | ||
|
9a332e79e4 | ||
|
72450b9217 | ||
|
7e1dc33a08 | ||
|
aa240009ab | ||
|
41b258e4d8 | ||
|
28b9cca413 | ||
|
8aaee2c40c |
17
README.md
17
README.md
@ -1,21 +1,5 @@
|
||||
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
|
||||
|
||||
<div align="center" markdown="1">
|
||||
<sup>Special thanks to:</sup>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://www.warp.dev/perplexica">
|
||||
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/775dd593-9b5f-40f1-bf48-479faff4c27b">
|
||||
</a>
|
||||
|
||||
### [Warp, the AI Devtool that lives in your terminal](https://www.warp.dev/perplexica)
|
||||
|
||||
[Available for MacOS, Linux, & Windows](https://www.warp.dev/perplexica)
|
||||
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
|
||||
[](https://discord.gg/26aArMy8tT)
|
||||
|
||||

|
||||
@ -159,6 +143,7 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
|
||||
|
||||
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
||||
[](https://repocloud.io/details/?app_id=267)
|
||||
[](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
|
||||
|
||||
## Upcoming Features
|
||||
|
||||
|
@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
||||
[MODELS.DEEPSEEK]
|
||||
API_KEY = ""
|
||||
|
||||
[MODELS.LM_STUDIO]
|
||||
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
||||
|
||||
[API_ENDPOINTS]
|
||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
||||
|
@ -29,7 +29,6 @@ type Message = {
|
||||
messageId: string;
|
||||
chatId: string;
|
||||
content: string;
|
||||
userSessionId: string;
|
||||
};
|
||||
|
||||
type ChatModel = {
|
||||
@ -139,7 +138,6 @@ const handleHistorySave = async (
|
||||
where: eq(chats.id, message.chatId),
|
||||
});
|
||||
|
||||
let currentDate = new Date();
|
||||
if (!chat) {
|
||||
await db
|
||||
.insert(chats)
|
||||
@ -149,8 +147,6 @@ const handleHistorySave = async (
|
||||
createdAt: new Date().toString(),
|
||||
focusMode: focusMode,
|
||||
files: files.map(getFileDetails),
|
||||
userSessionId: message.userSessionId,
|
||||
timestamp: currentDate.toISOString(),
|
||||
})
|
||||
.execute();
|
||||
}
|
||||
|
@ -1,47 +1,10 @@
|
||||
import db from '@/lib/db';
|
||||
import { chats } from '@/lib/db/schema';
|
||||
import { eq, sql} from 'drizzle-orm';
|
||||
|
||||
export const GET = async (req: Request) => {
|
||||
try {
|
||||
// get header from request
|
||||
const headers = await req.headers;
|
||||
const userSessionId = headers.get('user-session-id')?.toString() ?? '';
|
||||
const maxRecordLimit = parseInt(headers.get('max-record-limit') || '20', 10);
|
||||
|
||||
if (userSessionId == '') {
|
||||
return Response.json({ chats: {} }, { status: 200 });
|
||||
}
|
||||
|
||||
let chatsRes = await db.query.chats.findMany({
|
||||
where: eq(chats.userSessionId, userSessionId),
|
||||
});
|
||||
|
||||
chatsRes = chatsRes.reverse();
|
||||
// Keep only the latest records in the database. Delete older records.
|
||||
if (chatsRes.length > maxRecordLimit) {
|
||||
const deleteChatsQuery = sql`DELETE FROM chats
|
||||
WHERE userSessionId = ${userSessionId} AND (
|
||||
timestamp IS NULL OR
|
||||
timestamp NOT in (
|
||||
SELECT timestamp FROM chats
|
||||
WHERE userSessionId = ${userSessionId}
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT ${maxRecordLimit}
|
||||
)
|
||||
)
|
||||
`;
|
||||
await db.run(deleteChatsQuery);
|
||||
// Delete messages that no longer link with the chat from the database.
|
||||
const deleteMessagesQuery = sql`DELETE FROM messages
|
||||
WHERE chatId NOT IN (
|
||||
SELECT id FROM chats
|
||||
)
|
||||
`;
|
||||
await db.run(deleteMessagesQuery);
|
||||
}
|
||||
|
||||
return Response.json({ chats: chatsRes }, { status: 200 });
|
||||
let chats = await db.query.chats.findMany();
|
||||
chats = chats.reverse();
|
||||
return Response.json({ chats: chats }, { status: 200 });
|
||||
} catch (err) {
|
||||
console.error('Error in getting chats: ', err);
|
||||
return Response.json(
|
||||
|
@ -8,6 +8,7 @@ import {
|
||||
getOllamaApiEndpoint,
|
||||
getOpenaiApiKey,
|
||||
getDeepseekApiKey,
|
||||
getLMStudioApiEndpoint,
|
||||
updateConfig,
|
||||
} from '@/lib/config';
|
||||
import {
|
||||
@ -51,6 +52,7 @@ export const GET = async (req: Request) => {
|
||||
|
||||
config['openaiApiKey'] = getOpenaiApiKey();
|
||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||
config['groqApiKey'] = getGroqApiKey();
|
||||
config['geminiApiKey'] = getGeminiApiKey();
|
||||
@ -93,6 +95,9 @@ export const POST = async (req: Request) => {
|
||||
DEEPSEEK: {
|
||||
API_KEY: config.deepseekApiKey,
|
||||
},
|
||||
LM_STUDIO: {
|
||||
API_URL: config.lmStudioApiUrl,
|
||||
},
|
||||
CUSTOM_OPENAI: {
|
||||
API_URL: config.customOpenaiApiUrl,
|
||||
API_KEY: config.customOpenaiApiKey,
|
||||
|
@ -1,6 +1,5 @@
|
||||
'use client';
|
||||
|
||||
import crypto from 'crypto';
|
||||
import DeleteChat from '@/components/DeleteChat';
|
||||
import { cn, formatTimeDifference } from '@/lib/utils';
|
||||
import { BookOpenText, ClockIcon, Delete, ScanEye } from 'lucide-react';
|
||||
@ -22,34 +21,10 @@ const Page = () => {
|
||||
const fetchChats = async () => {
|
||||
setLoading(true);
|
||||
|
||||
let userSessionId = localStorage.getItem('userSessionId');
|
||||
if (!userSessionId) {
|
||||
userSessionId = crypto.randomBytes(20).toString('hex');
|
||||
localStorage.setItem('userSessionId', userSessionId)
|
||||
}
|
||||
|
||||
// Get maxRecordLimit from localStorage or set default
|
||||
let maxRecordLimit = localStorage.getItem('maxRecordLimit');
|
||||
if (!maxRecordLimit) {
|
||||
maxRecordLimit = '20';
|
||||
localStorage.setItem('maxRecordLimit', maxRecordLimit);
|
||||
} else {
|
||||
let valueInt = parseInt(maxRecordLimit, 10) || 20;
|
||||
if (valueInt < 1) {
|
||||
valueInt = 1;
|
||||
} else if (valueInt > 100) {
|
||||
valueInt = 100;
|
||||
}
|
||||
maxRecordLimit = valueInt.toString();
|
||||
localStorage.setItem('maxRecordLimit', maxRecordLimit);
|
||||
}
|
||||
|
||||
const res = await fetch(`/api/chats`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'user-session-id': userSessionId!,
|
||||
'max-record-limit': maxRecordLimit,
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
|
||||
import ThemeSwitcher from '@/components/theme/Switcher';
|
||||
import { ImagesIcon, VideoIcon } from 'lucide-react';
|
||||
import Link from 'next/link';
|
||||
import { PROVIDER_METADATA } from '@/lib/providers';
|
||||
|
||||
interface SettingsType {
|
||||
chatModelProviders: {
|
||||
@ -20,11 +21,11 @@ interface SettingsType {
|
||||
anthropicApiKey: string;
|
||||
geminiApiKey: string;
|
||||
ollamaApiUrl: string;
|
||||
lmStudioApiUrl: string;
|
||||
deepseekApiKey: string;
|
||||
customOpenaiApiKey: string;
|
||||
customOpenaiApiUrl: string;
|
||||
customOpenaiModelName: string;
|
||||
maxRecordLimit: string;
|
||||
}
|
||||
|
||||
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
||||
@ -147,7 +148,6 @@ const Page = () => {
|
||||
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
|
||||
const [systemInstructions, setSystemInstructions] = useState<string>('');
|
||||
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
|
||||
const [maxRecordLimit, setMaxRecordLimit] = useState<string>('20');
|
||||
|
||||
useEffect(() => {
|
||||
const fetchConfig = async () => {
|
||||
@ -210,8 +210,6 @@ const Page = () => {
|
||||
|
||||
setSystemInstructions(localStorage.getItem('systemInstructions')!);
|
||||
|
||||
setMaxRecordLimit(localStorage.getItem('maxRecordLimit') || data.maxRecordLimit || '20');
|
||||
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
||||
@ -370,15 +368,6 @@ const Page = () => {
|
||||
localStorage.setItem('embeddingModel', value);
|
||||
} else if (key === 'systemInstructions') {
|
||||
localStorage.setItem('systemInstructions', value);
|
||||
} else if (key === 'maxRecordLimit') {
|
||||
let valueInt = parseInt(value, 10) || 20;
|
||||
if (valueInt < 1) {
|
||||
valueInt = 1;
|
||||
} else if (valueInt > 100) {
|
||||
valueInt = 100;
|
||||
}
|
||||
setMaxRecordLimit(valueInt.toString());
|
||||
localStorage.setItem('maxRecordLimit', valueInt.toString());
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to save:', err);
|
||||
@ -561,8 +550,9 @@ const Page = () => {
|
||||
(provider) => ({
|
||||
value: provider,
|
||||
label:
|
||||
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
||||
provider.charAt(0).toUpperCase() +
|
||||
provider.slice(1),
|
||||
provider.slice(1),
|
||||
}),
|
||||
)}
|
||||
/>
|
||||
@ -703,8 +693,9 @@ const Page = () => {
|
||||
(provider) => ({
|
||||
value: provider,
|
||||
label:
|
||||
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
||||
provider.charAt(0).toUpperCase() +
|
||||
provider.slice(1),
|
||||
provider.slice(1),
|
||||
}),
|
||||
)}
|
||||
/>
|
||||
@ -871,36 +862,24 @@ const Page = () => {
|
||||
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</SettingsSection>
|
||||
|
||||
<SettingsSection title="Chat History">
|
||||
<div className="flex flex-col space-y-4">
|
||||
<div className="flex flex-col space-y-1">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
Maximum Chat History Records
|
||||
</p>
|
||||
<div className="flex items-center space-x-2">
|
||||
<Input
|
||||
type="number"
|
||||
min="1"
|
||||
max="100"
|
||||
pattern="[0-9]*"
|
||||
inputMode="numeric"
|
||||
value={maxRecordLimit}
|
||||
isSaving={savingStates['maxRecordLimit']}
|
||||
onChange={(e) => {
|
||||
setMaxRecordLimit(e.target.value);
|
||||
}}
|
||||
onSave={(value) => saveConfig('maxRecordLimit', value)}
|
||||
/>
|
||||
<span className="text-black/60 dark:text-white/60 text-sm">
|
||||
records
|
||||
</span>
|
||||
</div>
|
||||
<p className="text-xs text-black/60 dark:text-white/60 mt-1">
|
||||
Maximum number of chat records to keep in history. Older records will be automatically deleted.
|
||||
LM Studio API URL
|
||||
</p>
|
||||
<Input
|
||||
type="text"
|
||||
placeholder="LM Studio API URL"
|
||||
value={config.lmStudioApiUrl}
|
||||
isSaving={savingStates['lmStudioApiUrl']}
|
||||
onChange={(e) => {
|
||||
setConfig((prev) => ({
|
||||
...prev!,
|
||||
lmStudioApiUrl: e.target.value,
|
||||
}));
|
||||
}}
|
||||
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</SettingsSection>
|
||||
|
@ -95,18 +95,6 @@ const checkConfig = async (
|
||||
if (!embeddingModel || !embeddingModelProvider) {
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
|
||||
let userSessionId = localStorage.getItem('userSessionId');
|
||||
if (!userSessionId) {
|
||||
userSessionId = crypto.randomBytes(20).toString('hex');
|
||||
localStorage.setItem('userSessionId', userSessionId!)
|
||||
}
|
||||
|
||||
let maxRecordLimit = localStorage.getItem('maxRecordLimit');
|
||||
if (!maxRecordLimit) {
|
||||
maxRecordLimit = '20';
|
||||
localStorage.setItem('maxRecordLimit', maxRecordLimit);
|
||||
}
|
||||
|
||||
if (
|
||||
!embeddingModelProviders ||
|
||||
Object.keys(embeddingModelProviders).length === 0
|
||||
@ -354,7 +342,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
let added = false;
|
||||
|
||||
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
|
||||
let userSessionId = localStorage.getItem('userSessionId');
|
||||
|
||||
setMessages((prevMessages) => [
|
||||
...prevMessages,
|
||||
@ -479,7 +466,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
messageId: messageId,
|
||||
chatId: chatId!,
|
||||
content: message,
|
||||
userSessionId: userSessionId,
|
||||
},
|
||||
chatId: chatId!,
|
||||
files: fileIds,
|
||||
|
@ -97,6 +97,7 @@ const MessageBox = ({
|
||||
},
|
||||
),
|
||||
);
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,14 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import toml from '@iarna/toml';
|
||||
|
||||
// Use dynamic imports for Node.js modules to prevent client-side errors
|
||||
let fs: any;
|
||||
let path: any;
|
||||
if (typeof window === 'undefined') {
|
||||
// We're on the server
|
||||
fs = require('fs');
|
||||
path = require('path');
|
||||
}
|
||||
|
||||
const configFileName = 'config.toml';
|
||||
|
||||
interface Config {
|
||||
@ -28,6 +35,9 @@ interface Config {
|
||||
DEEPSEEK: {
|
||||
API_KEY: string;
|
||||
};
|
||||
LM_STUDIO: {
|
||||
API_URL: string;
|
||||
};
|
||||
CUSTOM_OPENAI: {
|
||||
API_URL: string;
|
||||
API_KEY: string;
|
||||
@ -43,10 +53,17 @@ type RecursivePartial<T> = {
|
||||
[P in keyof T]?: RecursivePartial<T[P]>;
|
||||
};
|
||||
|
||||
const loadConfig = () =>
|
||||
toml.parse(
|
||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||
) as any as Config;
|
||||
const loadConfig = () => {
|
||||
// Server-side only
|
||||
if (typeof window === 'undefined') {
|
||||
return toml.parse(
|
||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||
) as any as Config;
|
||||
}
|
||||
|
||||
// Client-side fallback - settings will be loaded via API
|
||||
return {} as Config;
|
||||
};
|
||||
|
||||
export const getSimilarityMeasure = () =>
|
||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||
@ -77,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
|
||||
export const getCustomOpenaiModelName = () =>
|
||||
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
||||
|
||||
export const getLMStudioApiEndpoint = () =>
|
||||
loadConfig().MODELS.LM_STUDIO.API_URL;
|
||||
|
||||
const mergeConfigs = (current: any, update: any): any => {
|
||||
if (update === null || update === undefined) {
|
||||
return current;
|
||||
@ -109,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
|
||||
};
|
||||
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||
fs.writeFileSync(
|
||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||
toml.stringify(mergedConfig),
|
||||
);
|
||||
// Server-side only
|
||||
if (typeof window === 'undefined') {
|
||||
const currentConfig = loadConfig();
|
||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||
fs.writeFileSync(
|
||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||
toml.stringify(mergedConfig),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
@ -25,6 +25,4 @@ export const chats = sqliteTable('chats', {
|
||||
files: text('files', { mode: 'json' })
|
||||
.$type<File[]>()
|
||||
.default(sql`'[]'`),
|
||||
userSessionId: text('userSessionId'),
|
||||
timestamp: text('timestamp'),
|
||||
});
|
||||
|
@ -1,6 +1,11 @@
|
||||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import { ChatModel } from '.';
|
||||
import { getAnthropicApiKey } from '../config';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'anthropic',
|
||||
displayName: 'Anthropic',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const anthropicChatModels: Record<string, string>[] = [
|
||||
|
@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config';
|
||||
import { ChatModel } from '.';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'deepseek',
|
||||
displayName: 'Deepseek AI',
|
||||
};
|
||||
|
||||
const deepseekChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Deepseek Chat (Deepseek V3)',
|
||||
|
@ -4,6 +4,11 @@ import {
|
||||
} from '@langchain/google-genai';
|
||||
import { getGeminiApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'gemini',
|
||||
displayName: 'Google Gemini',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
|
@ -1,6 +1,11 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { getGroqApiKey } from '../config';
|
||||
import { ChatModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'groq',
|
||||
displayName: 'Groq',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const groqChatModels: Record<string, string>[] = [
|
||||
|
@ -1,18 +1,60 @@
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
|
||||
import {
|
||||
loadOpenAIChatModels,
|
||||
loadOpenAIEmbeddingModels,
|
||||
PROVIDER_INFO as OpenAIInfo,
|
||||
PROVIDER_INFO,
|
||||
} from './openai';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
|
||||
import { loadGroqChatModels } from './groq';
|
||||
import { loadAnthropicChatModels } from './anthropic';
|
||||
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
|
||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||
import { loadDeepseekChatModels } from './deepseek';
|
||||
import {
|
||||
loadOllamaChatModels,
|
||||
loadOllamaEmbeddingModels,
|
||||
PROVIDER_INFO as OllamaInfo,
|
||||
} from './ollama';
|
||||
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
|
||||
import {
|
||||
loadAnthropicChatModels,
|
||||
PROVIDER_INFO as AnthropicInfo,
|
||||
} from './anthropic';
|
||||
import {
|
||||
loadGeminiChatModels,
|
||||
loadGeminiEmbeddingModels,
|
||||
PROVIDER_INFO as GeminiInfo,
|
||||
} from './gemini';
|
||||
import {
|
||||
loadTransformersEmbeddingsModels,
|
||||
PROVIDER_INFO as TransformersInfo,
|
||||
} from './transformers';
|
||||
import {
|
||||
loadDeepseekChatModels,
|
||||
PROVIDER_INFO as DeepseekInfo,
|
||||
} from './deepseek';
|
||||
import {
|
||||
loadLMStudioChatModels,
|
||||
loadLMStudioEmbeddingsModels,
|
||||
PROVIDER_INFO as LMStudioInfo,
|
||||
} from './lmstudio';
|
||||
|
||||
export const PROVIDER_METADATA = {
|
||||
openai: OpenAIInfo,
|
||||
ollama: OllamaInfo,
|
||||
groq: GroqInfo,
|
||||
anthropic: AnthropicInfo,
|
||||
gemini: GeminiInfo,
|
||||
transformers: TransformersInfo,
|
||||
deepseek: DeepseekInfo,
|
||||
lmstudio: LMStudioInfo,
|
||||
custom_openai: {
|
||||
key: 'custom_openai',
|
||||
displayName: 'Custom OpenAI',
|
||||
},
|
||||
};
|
||||
|
||||
export interface ChatModel {
|
||||
displayName: string;
|
||||
@ -34,6 +76,7 @@ export const chatModelProviders: Record<
|
||||
anthropic: loadAnthropicChatModels,
|
||||
gemini: loadGeminiChatModels,
|
||||
deepseek: loadDeepseekChatModels,
|
||||
lmstudio: loadLMStudioChatModels,
|
||||
};
|
||||
|
||||
export const embeddingModelProviders: Record<
|
||||
@ -44,6 +87,7 @@ export const embeddingModelProviders: Record<
|
||||
ollama: loadOllamaEmbeddingModels,
|
||||
gemini: loadGeminiEmbeddingModels,
|
||||
transformers: loadTransformersEmbeddingsModels,
|
||||
lmstudio: loadLMStudioEmbeddingsModels,
|
||||
};
|
||||
|
||||
export const getAvailableChatModelProviders = async () => {
|
||||
|
100
src/lib/providers/lmstudio.ts
Normal file
100
src/lib/providers/lmstudio.ts
Normal file
@ -0,0 +1,100 @@
|
||||
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
|
||||
import axios from 'axios';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'lmstudio',
|
||||
displayName: 'LM Studio',
|
||||
};
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
interface LMStudioModel {
|
||||
id: string;
|
||||
name?: string;
|
||||
}
|
||||
|
||||
const ensureV1Endpoint = (endpoint: string): string =>
|
||||
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
||||
|
||||
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
|
||||
try {
|
||||
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioChatModels = async () => {
|
||||
const endpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!endpoint) return {};
|
||||
if (!(await checkServerAvailability(endpoint))) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
response.data.data.forEach((model: LMStudioModel) => {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
streaming: true,
|
||||
maxRetries: 3,
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading LM Studio models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioEmbeddingsModels = async () => {
|
||||
const endpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!endpoint) return {};
|
||||
if (!(await checkServerAvailability(endpoint))) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
const embeddingsModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
response.data.data.forEach((model: LMStudioModel) => {
|
||||
embeddingsModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
modelName: model.id,
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
});
|
||||
|
||||
return embeddingsModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
@ -1,6 +1,11 @@
|
||||
import axios from 'axios';
|
||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'ollama',
|
||||
displayName: 'Ollama',
|
||||
};
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
|
||||
|
@ -1,6 +1,11 @@
|
||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getOpenaiApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'openai',
|
||||
displayName: 'OpenAI',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
@ -25,6 +30,18 @@ const openaiChatModels: Record<string, string>[] = [
|
||||
displayName: 'GPT-4 omni mini',
|
||||
key: 'gpt-4o-mini',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1 nano',
|
||||
key: 'gpt-4.1-nano',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1 mini',
|
||||
key: 'gpt-4.1-mini',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1',
|
||||
key: 'gpt-4.1',
|
||||
},
|
||||
];
|
||||
|
||||
const openaiEmbeddingModels: Record<string, string>[] = [
|
||||
|
@ -1,5 +1,10 @@
|
||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'transformers',
|
||||
displayName: 'Hugging Face',
|
||||
};
|
||||
|
||||
export const loadTransformersEmbeddingsModels = async () => {
|
||||
try {
|
||||
const embeddingModels = {
|
||||
|
@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
||||
const splittedText = await splitter.splitText(parsedText);
|
||||
const title = res.data
|
||||
.toString('utf8')
|
||||
.match(/<title>(.*?)<\/title>/)?.[1];
|
||||
.match(/<title.*>(.*?)<\/title>/)?.[1];
|
||||
|
||||
const linkDocs = splittedText.map((text) => {
|
||||
return new Document({
|
||||
|
Reference in New Issue
Block a user