mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-05-03 09:42:30 +00:00
Compare commits
12 Commits
f790457376
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
68e151b2bd | ||
|
06ff272541 | ||
|
4154d5e4b1 | ||
|
1862491496 | ||
|
073b5e897c | ||
|
9a332e79e4 | ||
|
72450b9217 | ||
|
7e1dc33a08 | ||
|
aa240009ab | ||
|
41b258e4d8 | ||
|
28b9cca413 | ||
|
8aaee2c40c |
17
README.md
17
README.md
@ -1,21 +1,5 @@
|
|||||||
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
|
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
|
||||||
|
|
||||||
<div align="center" markdown="1">
|
|
||||||
<sup>Special thanks to:</sup>
|
|
||||||
<br>
|
|
||||||
<br>
|
|
||||||
<a href="https://www.warp.dev/perplexica">
|
|
||||||
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/775dd593-9b5f-40f1-bf48-479faff4c27b">
|
|
||||||
</a>
|
|
||||||
|
|
||||||
### [Warp, the AI Devtool that lives in your terminal](https://www.warp.dev/perplexica)
|
|
||||||
|
|
||||||
[Available for MacOS, Linux, & Windows](https://www.warp.dev/perplexica)
|
|
||||||
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<hr/>
|
|
||||||
|
|
||||||
[](https://discord.gg/26aArMy8tT)
|
[](https://discord.gg/26aArMy8tT)
|
||||||
|
|
||||||

|

|
||||||
@ -159,6 +143,7 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
|
|||||||
|
|
||||||
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
||||||
[](https://repocloud.io/details/?app_id=267)
|
[](https://repocloud.io/details/?app_id=267)
|
||||||
|
[](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
|
||||||
|
|
||||||
## Upcoming Features
|
## Upcoming Features
|
||||||
|
|
||||||
|
@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
|||||||
[MODELS.DEEPSEEK]
|
[MODELS.DEEPSEEK]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
|
|
||||||
|
[MODELS.LM_STUDIO]
|
||||||
|
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
@ -29,7 +29,6 @@ type Message = {
|
|||||||
messageId: string;
|
messageId: string;
|
||||||
chatId: string;
|
chatId: string;
|
||||||
content: string;
|
content: string;
|
||||||
userSessionId: string;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
type ChatModel = {
|
type ChatModel = {
|
||||||
@ -139,7 +138,6 @@ const handleHistorySave = async (
|
|||||||
where: eq(chats.id, message.chatId),
|
where: eq(chats.id, message.chatId),
|
||||||
});
|
});
|
||||||
|
|
||||||
let currentDate = new Date();
|
|
||||||
if (!chat) {
|
if (!chat) {
|
||||||
await db
|
await db
|
||||||
.insert(chats)
|
.insert(chats)
|
||||||
@ -149,8 +147,6 @@ const handleHistorySave = async (
|
|||||||
createdAt: new Date().toString(),
|
createdAt: new Date().toString(),
|
||||||
focusMode: focusMode,
|
focusMode: focusMode,
|
||||||
files: files.map(getFileDetails),
|
files: files.map(getFileDetails),
|
||||||
userSessionId: message.userSessionId,
|
|
||||||
timestamp: currentDate.toISOString(),
|
|
||||||
})
|
})
|
||||||
.execute();
|
.execute();
|
||||||
}
|
}
|
||||||
|
@ -1,47 +1,10 @@
|
|||||||
import db from '@/lib/db';
|
import db from '@/lib/db';
|
||||||
import { chats } from '@/lib/db/schema';
|
|
||||||
import { eq, sql} from 'drizzle-orm';
|
|
||||||
|
|
||||||
export const GET = async (req: Request) => {
|
export const GET = async (req: Request) => {
|
||||||
try {
|
try {
|
||||||
// get header from request
|
let chats = await db.query.chats.findMany();
|
||||||
const headers = await req.headers;
|
chats = chats.reverse();
|
||||||
const userSessionId = headers.get('user-session-id')?.toString() ?? '';
|
return Response.json({ chats: chats }, { status: 200 });
|
||||||
const maxRecordLimit = parseInt(headers.get('max-record-limit') || '20', 10);
|
|
||||||
|
|
||||||
if (userSessionId == '') {
|
|
||||||
return Response.json({ chats: {} }, { status: 200 });
|
|
||||||
}
|
|
||||||
|
|
||||||
let chatsRes = await db.query.chats.findMany({
|
|
||||||
where: eq(chats.userSessionId, userSessionId),
|
|
||||||
});
|
|
||||||
|
|
||||||
chatsRes = chatsRes.reverse();
|
|
||||||
// Keep only the latest records in the database. Delete older records.
|
|
||||||
if (chatsRes.length > maxRecordLimit) {
|
|
||||||
const deleteChatsQuery = sql`DELETE FROM chats
|
|
||||||
WHERE userSessionId = ${userSessionId} AND (
|
|
||||||
timestamp IS NULL OR
|
|
||||||
timestamp NOT in (
|
|
||||||
SELECT timestamp FROM chats
|
|
||||||
WHERE userSessionId = ${userSessionId}
|
|
||||||
ORDER BY timestamp DESC
|
|
||||||
LIMIT ${maxRecordLimit}
|
|
||||||
)
|
|
||||||
)
|
|
||||||
`;
|
|
||||||
await db.run(deleteChatsQuery);
|
|
||||||
// Delete messages that no longer link with the chat from the database.
|
|
||||||
const deleteMessagesQuery = sql`DELETE FROM messages
|
|
||||||
WHERE chatId NOT IN (
|
|
||||||
SELECT id FROM chats
|
|
||||||
)
|
|
||||||
`;
|
|
||||||
await db.run(deleteMessagesQuery);
|
|
||||||
}
|
|
||||||
|
|
||||||
return Response.json({ chats: chatsRes }, { status: 200 });
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Error in getting chats: ', err);
|
console.error('Error in getting chats: ', err);
|
||||||
return Response.json(
|
return Response.json(
|
||||||
|
@ -8,6 +8,7 @@ import {
|
|||||||
getOllamaApiEndpoint,
|
getOllamaApiEndpoint,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
getDeepseekApiKey,
|
getDeepseekApiKey,
|
||||||
|
getLMStudioApiEndpoint,
|
||||||
updateConfig,
|
updateConfig,
|
||||||
} from '@/lib/config';
|
} from '@/lib/config';
|
||||||
import {
|
import {
|
||||||
@ -51,6 +52,7 @@ export const GET = async (req: Request) => {
|
|||||||
|
|
||||||
config['openaiApiKey'] = getOpenaiApiKey();
|
config['openaiApiKey'] = getOpenaiApiKey();
|
||||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||||
|
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
||||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
config['geminiApiKey'] = getGeminiApiKey();
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
@ -93,6 +95,9 @@ export const POST = async (req: Request) => {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: config.deepseekApiKey,
|
API_KEY: config.deepseekApiKey,
|
||||||
},
|
},
|
||||||
|
LM_STUDIO: {
|
||||||
|
API_URL: config.lmStudioApiUrl,
|
||||||
|
},
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: config.customOpenaiApiUrl,
|
API_URL: config.customOpenaiApiUrl,
|
||||||
API_KEY: config.customOpenaiApiKey,
|
API_KEY: config.customOpenaiApiKey,
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
'use client';
|
'use client';
|
||||||
|
|
||||||
import crypto from 'crypto';
|
|
||||||
import DeleteChat from '@/components/DeleteChat';
|
import DeleteChat from '@/components/DeleteChat';
|
||||||
import { cn, formatTimeDifference } from '@/lib/utils';
|
import { cn, formatTimeDifference } from '@/lib/utils';
|
||||||
import { BookOpenText, ClockIcon, Delete, ScanEye } from 'lucide-react';
|
import { BookOpenText, ClockIcon, Delete, ScanEye } from 'lucide-react';
|
||||||
@ -22,34 +21,10 @@ const Page = () => {
|
|||||||
const fetchChats = async () => {
|
const fetchChats = async () => {
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
|
|
||||||
let userSessionId = localStorage.getItem('userSessionId');
|
|
||||||
if (!userSessionId) {
|
|
||||||
userSessionId = crypto.randomBytes(20).toString('hex');
|
|
||||||
localStorage.setItem('userSessionId', userSessionId)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get maxRecordLimit from localStorage or set default
|
|
||||||
let maxRecordLimit = localStorage.getItem('maxRecordLimit');
|
|
||||||
if (!maxRecordLimit) {
|
|
||||||
maxRecordLimit = '20';
|
|
||||||
localStorage.setItem('maxRecordLimit', maxRecordLimit);
|
|
||||||
} else {
|
|
||||||
let valueInt = parseInt(maxRecordLimit, 10) || 20;
|
|
||||||
if (valueInt < 1) {
|
|
||||||
valueInt = 1;
|
|
||||||
} else if (valueInt > 100) {
|
|
||||||
valueInt = 100;
|
|
||||||
}
|
|
||||||
maxRecordLimit = valueInt.toString();
|
|
||||||
localStorage.setItem('maxRecordLimit', maxRecordLimit);
|
|
||||||
}
|
|
||||||
|
|
||||||
const res = await fetch(`/api/chats`, {
|
const res = await fetch(`/api/chats`, {
|
||||||
method: 'GET',
|
method: 'GET',
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'user-session-id': userSessionId!,
|
|
||||||
'max-record-limit': maxRecordLimit,
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
|
|||||||
import ThemeSwitcher from '@/components/theme/Switcher';
|
import ThemeSwitcher from '@/components/theme/Switcher';
|
||||||
import { ImagesIcon, VideoIcon } from 'lucide-react';
|
import { ImagesIcon, VideoIcon } from 'lucide-react';
|
||||||
import Link from 'next/link';
|
import Link from 'next/link';
|
||||||
|
import { PROVIDER_METADATA } from '@/lib/providers';
|
||||||
|
|
||||||
interface SettingsType {
|
interface SettingsType {
|
||||||
chatModelProviders: {
|
chatModelProviders: {
|
||||||
@ -20,11 +21,11 @@ interface SettingsType {
|
|||||||
anthropicApiKey: string;
|
anthropicApiKey: string;
|
||||||
geminiApiKey: string;
|
geminiApiKey: string;
|
||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
|
lmStudioApiUrl: string;
|
||||||
deepseekApiKey: string;
|
deepseekApiKey: string;
|
||||||
customOpenaiApiKey: string;
|
customOpenaiApiKey: string;
|
||||||
customOpenaiApiUrl: string;
|
customOpenaiApiUrl: string;
|
||||||
customOpenaiModelName: string;
|
customOpenaiModelName: string;
|
||||||
maxRecordLimit: string;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
||||||
@ -147,7 +148,6 @@ const Page = () => {
|
|||||||
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
|
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
|
||||||
const [systemInstructions, setSystemInstructions] = useState<string>('');
|
const [systemInstructions, setSystemInstructions] = useState<string>('');
|
||||||
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
|
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
|
||||||
const [maxRecordLimit, setMaxRecordLimit] = useState<string>('20');
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const fetchConfig = async () => {
|
const fetchConfig = async () => {
|
||||||
@ -210,8 +210,6 @@ const Page = () => {
|
|||||||
|
|
||||||
setSystemInstructions(localStorage.getItem('systemInstructions')!);
|
setSystemInstructions(localStorage.getItem('systemInstructions')!);
|
||||||
|
|
||||||
setMaxRecordLimit(localStorage.getItem('maxRecordLimit') || data.maxRecordLimit || '20');
|
|
||||||
|
|
||||||
setIsLoading(false);
|
setIsLoading(false);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -370,15 +368,6 @@ const Page = () => {
|
|||||||
localStorage.setItem('embeddingModel', value);
|
localStorage.setItem('embeddingModel', value);
|
||||||
} else if (key === 'systemInstructions') {
|
} else if (key === 'systemInstructions') {
|
||||||
localStorage.setItem('systemInstructions', value);
|
localStorage.setItem('systemInstructions', value);
|
||||||
} else if (key === 'maxRecordLimit') {
|
|
||||||
let valueInt = parseInt(value, 10) || 20;
|
|
||||||
if (valueInt < 1) {
|
|
||||||
valueInt = 1;
|
|
||||||
} else if (valueInt > 100) {
|
|
||||||
valueInt = 100;
|
|
||||||
}
|
|
||||||
setMaxRecordLimit(valueInt.toString());
|
|
||||||
localStorage.setItem('maxRecordLimit', valueInt.toString());
|
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to save:', err);
|
console.error('Failed to save:', err);
|
||||||
@ -561,6 +550,7 @@ const Page = () => {
|
|||||||
(provider) => ({
|
(provider) => ({
|
||||||
value: provider,
|
value: provider,
|
||||||
label:
|
label:
|
||||||
|
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
||||||
provider.charAt(0).toUpperCase() +
|
provider.charAt(0).toUpperCase() +
|
||||||
provider.slice(1),
|
provider.slice(1),
|
||||||
}),
|
}),
|
||||||
@ -703,6 +693,7 @@ const Page = () => {
|
|||||||
(provider) => ({
|
(provider) => ({
|
||||||
value: provider,
|
value: provider,
|
||||||
label:
|
label:
|
||||||
|
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
||||||
provider.charAt(0).toUpperCase() +
|
provider.charAt(0).toUpperCase() +
|
||||||
provider.slice(1),
|
provider.slice(1),
|
||||||
}),
|
}),
|
||||||
@ -871,36 +862,24 @@ const Page = () => {
|
|||||||
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
|
||||||
</SettingsSection>
|
|
||||||
|
|
||||||
<SettingsSection title="Chat History">
|
|
||||||
<div className="flex flex-col space-y-4">
|
|
||||||
<div className="flex flex-col space-y-1">
|
<div className="flex flex-col space-y-1">
|
||||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
Maximum Chat History Records
|
LM Studio API URL
|
||||||
</p>
|
</p>
|
||||||
<div className="flex items-center space-x-2">
|
|
||||||
<Input
|
<Input
|
||||||
type="number"
|
type="text"
|
||||||
min="1"
|
placeholder="LM Studio API URL"
|
||||||
max="100"
|
value={config.lmStudioApiUrl}
|
||||||
pattern="[0-9]*"
|
isSaving={savingStates['lmStudioApiUrl']}
|
||||||
inputMode="numeric"
|
|
||||||
value={maxRecordLimit}
|
|
||||||
isSaving={savingStates['maxRecordLimit']}
|
|
||||||
onChange={(e) => {
|
onChange={(e) => {
|
||||||
setMaxRecordLimit(e.target.value);
|
setConfig((prev) => ({
|
||||||
|
...prev!,
|
||||||
|
lmStudioApiUrl: e.target.value,
|
||||||
|
}));
|
||||||
}}
|
}}
|
||||||
onSave={(value) => saveConfig('maxRecordLimit', value)}
|
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
|
||||||
/>
|
/>
|
||||||
<span className="text-black/60 dark:text-white/60 text-sm">
|
|
||||||
records
|
|
||||||
</span>
|
|
||||||
</div>
|
|
||||||
<p className="text-xs text-black/60 dark:text-white/60 mt-1">
|
|
||||||
Maximum number of chat records to keep in history. Older records will be automatically deleted.
|
|
||||||
</p>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</SettingsSection>
|
</SettingsSection>
|
||||||
|
@ -95,18 +95,6 @@ const checkConfig = async (
|
|||||||
if (!embeddingModel || !embeddingModelProvider) {
|
if (!embeddingModel || !embeddingModelProvider) {
|
||||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||||
|
|
||||||
let userSessionId = localStorage.getItem('userSessionId');
|
|
||||||
if (!userSessionId) {
|
|
||||||
userSessionId = crypto.randomBytes(20).toString('hex');
|
|
||||||
localStorage.setItem('userSessionId', userSessionId!)
|
|
||||||
}
|
|
||||||
|
|
||||||
let maxRecordLimit = localStorage.getItem('maxRecordLimit');
|
|
||||||
if (!maxRecordLimit) {
|
|
||||||
maxRecordLimit = '20';
|
|
||||||
localStorage.setItem('maxRecordLimit', maxRecordLimit);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
!embeddingModelProviders ||
|
!embeddingModelProviders ||
|
||||||
Object.keys(embeddingModelProviders).length === 0
|
Object.keys(embeddingModelProviders).length === 0
|
||||||
@ -354,7 +342,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
|||||||
let added = false;
|
let added = false;
|
||||||
|
|
||||||
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
|
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
|
||||||
let userSessionId = localStorage.getItem('userSessionId');
|
|
||||||
|
|
||||||
setMessages((prevMessages) => [
|
setMessages((prevMessages) => [
|
||||||
...prevMessages,
|
...prevMessages,
|
||||||
@ -479,7 +466,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
|||||||
messageId: messageId,
|
messageId: messageId,
|
||||||
chatId: chatId!,
|
chatId: chatId!,
|
||||||
content: message,
|
content: message,
|
||||||
userSessionId: userSessionId,
|
|
||||||
},
|
},
|
||||||
chatId: chatId!,
|
chatId: chatId!,
|
||||||
files: fileIds,
|
files: fileIds,
|
||||||
|
@ -97,6 +97,7 @@ const MessageBox = ({
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
setSpeechMessage(message.content.replace(regex, ''));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,14 @@
|
|||||||
import fs from 'fs';
|
|
||||||
import path from 'path';
|
|
||||||
import toml from '@iarna/toml';
|
import toml from '@iarna/toml';
|
||||||
|
|
||||||
|
// Use dynamic imports for Node.js modules to prevent client-side errors
|
||||||
|
let fs: any;
|
||||||
|
let path: any;
|
||||||
|
if (typeof window === 'undefined') {
|
||||||
|
// We're on the server
|
||||||
|
fs = require('fs');
|
||||||
|
path = require('path');
|
||||||
|
}
|
||||||
|
|
||||||
const configFileName = 'config.toml';
|
const configFileName = 'config.toml';
|
||||||
|
|
||||||
interface Config {
|
interface Config {
|
||||||
@ -28,6 +35,9 @@ interface Config {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
};
|
};
|
||||||
|
LM_STUDIO: {
|
||||||
|
API_URL: string;
|
||||||
|
};
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
@ -43,10 +53,17 @@ type RecursivePartial<T> = {
|
|||||||
[P in keyof T]?: RecursivePartial<T[P]>;
|
[P in keyof T]?: RecursivePartial<T[P]>;
|
||||||
};
|
};
|
||||||
|
|
||||||
const loadConfig = () =>
|
const loadConfig = () => {
|
||||||
toml.parse(
|
// Server-side only
|
||||||
|
if (typeof window === 'undefined') {
|
||||||
|
return toml.parse(
|
||||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||||
) as any as Config;
|
) as any as Config;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client-side fallback - settings will be loaded via API
|
||||||
|
return {} as Config;
|
||||||
|
};
|
||||||
|
|
||||||
export const getSimilarityMeasure = () =>
|
export const getSimilarityMeasure = () =>
|
||||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||||
@ -77,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
|
|||||||
export const getCustomOpenaiModelName = () =>
|
export const getCustomOpenaiModelName = () =>
|
||||||
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
||||||
|
|
||||||
|
export const getLMStudioApiEndpoint = () =>
|
||||||
|
loadConfig().MODELS.LM_STUDIO.API_URL;
|
||||||
|
|
||||||
const mergeConfigs = (current: any, update: any): any => {
|
const mergeConfigs = (current: any, update: any): any => {
|
||||||
if (update === null || update === undefined) {
|
if (update === null || update === undefined) {
|
||||||
return current;
|
return current;
|
||||||
@ -109,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||||
|
// Server-side only
|
||||||
|
if (typeof window === 'undefined') {
|
||||||
const currentConfig = loadConfig();
|
const currentConfig = loadConfig();
|
||||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||||
fs.writeFileSync(
|
fs.writeFileSync(
|
||||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||||
toml.stringify(mergedConfig),
|
toml.stringify(mergedConfig),
|
||||||
);
|
);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
@ -25,6 +25,4 @@ export const chats = sqliteTable('chats', {
|
|||||||
files: text('files', { mode: 'json' })
|
files: text('files', { mode: 'json' })
|
||||||
.$type<File[]>()
|
.$type<File[]>()
|
||||||
.default(sql`'[]'`),
|
.default(sql`'[]'`),
|
||||||
userSessionId: text('userSessionId'),
|
|
||||||
timestamp: text('timestamp'),
|
|
||||||
});
|
});
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
import { ChatAnthropic } from '@langchain/anthropic';
|
import { ChatAnthropic } from '@langchain/anthropic';
|
||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
import { getAnthropicApiKey } from '../config';
|
import { getAnthropicApiKey } from '../config';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'anthropic',
|
||||||
|
displayName: 'Anthropic',
|
||||||
|
};
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
const anthropicChatModels: Record<string, string>[] = [
|
const anthropicChatModels: Record<string, string>[] = [
|
||||||
|
@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config';
|
|||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'deepseek',
|
||||||
|
displayName: 'Deepseek AI',
|
||||||
|
};
|
||||||
|
|
||||||
const deepseekChatModels: Record<string, string>[] = [
|
const deepseekChatModels: Record<string, string>[] = [
|
||||||
{
|
{
|
||||||
displayName: 'Deepseek Chat (Deepseek V3)',
|
displayName: 'Deepseek Chat (Deepseek V3)',
|
||||||
|
@ -4,6 +4,11 @@ import {
|
|||||||
} from '@langchain/google-genai';
|
} from '@langchain/google-genai';
|
||||||
import { getGeminiApiKey } from '../config';
|
import { getGeminiApiKey } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'gemini',
|
||||||
|
displayName: 'Google Gemini',
|
||||||
|
};
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
import { ChatOpenAI } from '@langchain/openai';
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
import { getGroqApiKey } from '../config';
|
import { getGroqApiKey } from '../config';
|
||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'groq',
|
||||||
|
displayName: 'Groq',
|
||||||
|
};
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
const groqChatModels: Record<string, string>[] = [
|
const groqChatModels: Record<string, string>[] = [
|
||||||
|
@ -1,18 +1,60 @@
|
|||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
|
import {
|
||||||
|
loadOpenAIChatModels,
|
||||||
|
loadOpenAIEmbeddingModels,
|
||||||
|
PROVIDER_INFO as OpenAIInfo,
|
||||||
|
PROVIDER_INFO,
|
||||||
|
} from './openai';
|
||||||
import {
|
import {
|
||||||
getCustomOpenaiApiKey,
|
getCustomOpenaiApiKey,
|
||||||
getCustomOpenaiApiUrl,
|
getCustomOpenaiApiUrl,
|
||||||
getCustomOpenaiModelName,
|
getCustomOpenaiModelName,
|
||||||
} from '../config';
|
} from '../config';
|
||||||
import { ChatOpenAI } from '@langchain/openai';
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
|
import {
|
||||||
import { loadGroqChatModels } from './groq';
|
loadOllamaChatModels,
|
||||||
import { loadAnthropicChatModels } from './anthropic';
|
loadOllamaEmbeddingModels,
|
||||||
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
|
PROVIDER_INFO as OllamaInfo,
|
||||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
} from './ollama';
|
||||||
import { loadDeepseekChatModels } from './deepseek';
|
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
|
||||||
|
import {
|
||||||
|
loadAnthropicChatModels,
|
||||||
|
PROVIDER_INFO as AnthropicInfo,
|
||||||
|
} from './anthropic';
|
||||||
|
import {
|
||||||
|
loadGeminiChatModels,
|
||||||
|
loadGeminiEmbeddingModels,
|
||||||
|
PROVIDER_INFO as GeminiInfo,
|
||||||
|
} from './gemini';
|
||||||
|
import {
|
||||||
|
loadTransformersEmbeddingsModels,
|
||||||
|
PROVIDER_INFO as TransformersInfo,
|
||||||
|
} from './transformers';
|
||||||
|
import {
|
||||||
|
loadDeepseekChatModels,
|
||||||
|
PROVIDER_INFO as DeepseekInfo,
|
||||||
|
} from './deepseek';
|
||||||
|
import {
|
||||||
|
loadLMStudioChatModels,
|
||||||
|
loadLMStudioEmbeddingsModels,
|
||||||
|
PROVIDER_INFO as LMStudioInfo,
|
||||||
|
} from './lmstudio';
|
||||||
|
|
||||||
|
export const PROVIDER_METADATA = {
|
||||||
|
openai: OpenAIInfo,
|
||||||
|
ollama: OllamaInfo,
|
||||||
|
groq: GroqInfo,
|
||||||
|
anthropic: AnthropicInfo,
|
||||||
|
gemini: GeminiInfo,
|
||||||
|
transformers: TransformersInfo,
|
||||||
|
deepseek: DeepseekInfo,
|
||||||
|
lmstudio: LMStudioInfo,
|
||||||
|
custom_openai: {
|
||||||
|
key: 'custom_openai',
|
||||||
|
displayName: 'Custom OpenAI',
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
export interface ChatModel {
|
export interface ChatModel {
|
||||||
displayName: string;
|
displayName: string;
|
||||||
@ -34,6 +76,7 @@ export const chatModelProviders: Record<
|
|||||||
anthropic: loadAnthropicChatModels,
|
anthropic: loadAnthropicChatModels,
|
||||||
gemini: loadGeminiChatModels,
|
gemini: loadGeminiChatModels,
|
||||||
deepseek: loadDeepseekChatModels,
|
deepseek: loadDeepseekChatModels,
|
||||||
|
lmstudio: loadLMStudioChatModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const embeddingModelProviders: Record<
|
export const embeddingModelProviders: Record<
|
||||||
@ -44,6 +87,7 @@ export const embeddingModelProviders: Record<
|
|||||||
ollama: loadOllamaEmbeddingModels,
|
ollama: loadOllamaEmbeddingModels,
|
||||||
gemini: loadGeminiEmbeddingModels,
|
gemini: loadGeminiEmbeddingModels,
|
||||||
transformers: loadTransformersEmbeddingsModels,
|
transformers: loadTransformersEmbeddingsModels,
|
||||||
|
lmstudio: loadLMStudioEmbeddingsModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getAvailableChatModelProviders = async () => {
|
export const getAvailableChatModelProviders = async () => {
|
||||||
|
100
src/lib/providers/lmstudio.ts
Normal file
100
src/lib/providers/lmstudio.ts
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
|
||||||
|
import axios from 'axios';
|
||||||
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'lmstudio',
|
||||||
|
displayName: 'LM Studio',
|
||||||
|
};
|
||||||
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
|
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||||
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
|
interface LMStudioModel {
|
||||||
|
id: string;
|
||||||
|
name?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ensureV1Endpoint = (endpoint: string): string =>
|
||||||
|
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
||||||
|
|
||||||
|
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
|
||||||
|
try {
|
||||||
|
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLMStudioChatModels = async () => {
|
||||||
|
const endpoint = getLMStudioApiEndpoint();
|
||||||
|
|
||||||
|
if (!endpoint) return {};
|
||||||
|
if (!(await checkServerAvailability(endpoint))) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
|
||||||
|
const chatModels: Record<string, ChatModel> = {};
|
||||||
|
|
||||||
|
response.data.data.forEach((model: LMStudioModel) => {
|
||||||
|
chatModels[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new ChatOpenAI({
|
||||||
|
openAIApiKey: 'lm-studio',
|
||||||
|
configuration: {
|
||||||
|
baseURL: ensureV1Endpoint(endpoint),
|
||||||
|
},
|
||||||
|
modelName: model.id,
|
||||||
|
temperature: 0.7,
|
||||||
|
streaming: true,
|
||||||
|
maxRetries: 3,
|
||||||
|
}) as unknown as BaseChatModel,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading LM Studio models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLMStudioEmbeddingsModels = async () => {
|
||||||
|
const endpoint = getLMStudioApiEndpoint();
|
||||||
|
|
||||||
|
if (!endpoint) return {};
|
||||||
|
if (!(await checkServerAvailability(endpoint))) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
|
||||||
|
const embeddingsModels: Record<string, EmbeddingModel> = {};
|
||||||
|
|
||||||
|
response.data.data.forEach((model: LMStudioModel) => {
|
||||||
|
embeddingsModels[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new OpenAIEmbeddings({
|
||||||
|
openAIApiKey: 'lm-studio',
|
||||||
|
configuration: {
|
||||||
|
baseURL: ensureV1Endpoint(endpoint),
|
||||||
|
},
|
||||||
|
modelName: model.id,
|
||||||
|
}) as unknown as Embeddings,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
return embeddingsModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
@ -1,6 +1,11 @@
|
|||||||
import axios from 'axios';
|
import axios from 'axios';
|
||||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'ollama',
|
||||||
|
displayName: 'Ollama',
|
||||||
|
};
|
||||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||||
|
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||||
import { getOpenaiApiKey } from '../config';
|
import { getOpenaiApiKey } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'openai',
|
||||||
|
displayName: 'OpenAI',
|
||||||
|
};
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
@ -25,6 +30,18 @@ const openaiChatModels: Record<string, string>[] = [
|
|||||||
displayName: 'GPT-4 omni mini',
|
displayName: 'GPT-4 omni mini',
|
||||||
key: 'gpt-4o-mini',
|
key: 'gpt-4o-mini',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
displayName: 'GPT 4.1 nano',
|
||||||
|
key: 'gpt-4.1-nano',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'GPT 4.1 mini',
|
||||||
|
key: 'gpt-4.1-mini',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'GPT 4.1',
|
||||||
|
key: 'gpt-4.1',
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
const openaiEmbeddingModels: Record<string, string>[] = [
|
const openaiEmbeddingModels: Record<string, string>[] = [
|
||||||
|
@ -1,5 +1,10 @@
|
|||||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'transformers',
|
||||||
|
displayName: 'Hugging Face',
|
||||||
|
};
|
||||||
|
|
||||||
export const loadTransformersEmbeddingsModels = async () => {
|
export const loadTransformersEmbeddingsModels = async () => {
|
||||||
try {
|
try {
|
||||||
const embeddingModels = {
|
const embeddingModels = {
|
||||||
|
@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
|||||||
const splittedText = await splitter.splitText(parsedText);
|
const splittedText = await splitter.splitText(parsedText);
|
||||||
const title = res.data
|
const title = res.data
|
||||||
.toString('utf8')
|
.toString('utf8')
|
||||||
.match(/<title>(.*?)<\/title>/)?.[1];
|
.match(/<title.*>(.*?)<\/title>/)?.[1];
|
||||||
|
|
||||||
const linkDocs = splittedText.map((text) => {
|
const linkDocs = splittedText.map((text) => {
|
||||||
return new Document({
|
return new Document({
|
||||||
|
Reference in New Issue
Block a user