mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-16 14:58:29 +00:00
Compare commits
25 Commits
feat/deeps
...
4e7072349e
Author | SHA1 | Date | |
---|---|---|---|
4e7072349e | |||
701819d018 | |||
68e151b2bd | |||
06ff272541 | |||
4154d5e4b1 | |||
64136b8410 | |||
1862491496 | |||
073b5e897c | |||
9a332e79e4 | |||
72450b9217 | |||
7e1dc33a08 | |||
aa240009ab | |||
41b258e4d8 | |||
da1123d84b | |||
627775c430 | |||
245573efca | |||
28b9cca413 | |||
a85f762c58 | |||
3ddcceda0a | |||
e226645bc7 | |||
5447530ece | |||
ed6d46a440 | |||
bf705afc21 | |||
2e4433a6b3 | |||
8aaee2c40c |
17
README.md
17
README.md
@ -84,16 +84,18 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
|||||||
|
|
||||||
3. After cloning, navigate to the directory containing the project files.
|
3. After cloning, navigate to the directory containing the project files.
|
||||||
|
|
||||||
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
|
4. Update environment variables in `docker-compose.yml` file to configure `config.toml`.
|
||||||
|
|
||||||
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
|
Example:
|
||||||
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
|
||||||
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
|
|
||||||
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
|
|
||||||
|
|
||||||
**Note**: You can change these after starting Perplexica from the settings dialog.
|
Below section in `config.toml` can be configured using variables `MODELS_CUSTOM_OPENAI_API_KEY="sk-123456"`, `MODELS_CUSTOM_OPENAI_API_URL="http://localopenai:11134"` and `MODELS_CUSTOM_OPENAI_MODEL_NAME="meta-llama/llama-4"`
|
||||||
|
|
||||||
- `SIMILARITY_MEASURE`: The similarity measure to use (This is filled by default; you can leave it as is if you are unsure about it.)
|
```toml
|
||||||
|
[MODELS.CUSTOM_OPENAI]
|
||||||
|
API_KEY = "sk-123456"
|
||||||
|
API_URL = "http://localopenai:11134"
|
||||||
|
MODEL_NAME = "meta-llama/llama-4"
|
||||||
|
```
|
||||||
|
|
||||||
5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute:
|
5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute:
|
||||||
|
|
||||||
@ -159,6 +161,7 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
|
|||||||
|
|
||||||
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
||||||
[](https://repocloud.io/details/?app_id=267)
|
[](https://repocloud.io/details/?app_id=267)
|
||||||
|
[](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
|
||||||
|
|
||||||
## Upcoming Features
|
## Upcoming Features
|
||||||
|
|
||||||
|
@ -21,7 +21,9 @@ COPY --from=builder /home/perplexica/.next/static ./public/_next/static
|
|||||||
|
|
||||||
COPY --from=builder /home/perplexica/.next/standalone ./
|
COPY --from=builder /home/perplexica/.next/standalone ./
|
||||||
COPY --from=builder /home/perplexica/data ./data
|
COPY --from=builder /home/perplexica/data ./data
|
||||||
|
COPY sample.config.toml /home/perplexica/config.toml
|
||||||
|
COPY container_entrypoint.sh /home/perplexica/container_entrypoint.sh
|
||||||
|
|
||||||
RUN mkdir /home/perplexica/uploads
|
RUN mkdir /home/perplexica/uploads
|
||||||
|
|
||||||
CMD ["node", "server.js"]
|
CMD ["bash", "/home/perplexica/container_entrypoint.sh"]
|
49
container_entrypoint.sh
Normal file
49
container_entrypoint.sh
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CONFIG_TOML_FILE=/home/perplexica/config.toml
|
||||||
|
|
||||||
|
TMP_FILE=${CONFIG_TOML_FILE}.tmp
|
||||||
|
touch $TMP_FILE
|
||||||
|
|
||||||
|
while IFS= read -r line; do
|
||||||
|
# Check if line is a section header (e.g., "[GENERAL]")
|
||||||
|
if [[ "$line" =~ ^\[([^]]+)\] ]]; then
|
||||||
|
current_section="${BASH_REMATCH[1]}"
|
||||||
|
echo "$line" >> "$TMP_FILE"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Skip empty lines and comments
|
||||||
|
if [[ -z "$line" || "$line" =~ ^[[:space:]]*\# ]]; then
|
||||||
|
echo "$line" >> "$TMP_FILE"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract key and value (handling quoted values)
|
||||||
|
key=$(echo "$line" | cut -d '=' -f 1 | xargs)
|
||||||
|
value=$(echo "$line" | cut -d '=' -f 2- | xargs)
|
||||||
|
|
||||||
|
|
||||||
|
# Construct the environment variable name in form of SECTION_KEY (e.g., GENERAL_SIMILARITY_MEASURE, MODELS_GEMINI_API_KEY)
|
||||||
|
current_section=$(echo "$current_section" | sed 's/\./_/')
|
||||||
|
env_var_name="${current_section}_${key}"
|
||||||
|
|
||||||
|
# Check if the environment variable exists
|
||||||
|
env_var_value=$(echo "${!env_var_name}")
|
||||||
|
if [ -n "$env_var_value" ]; then
|
||||||
|
new_value="$env_var_value"
|
||||||
|
echo "$key = $new_value" >> "$TMP_FILE"
|
||||||
|
else
|
||||||
|
# Keep original line if no env var exists
|
||||||
|
echo "$line" >> "$TMP_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
done < "$CONFIG_TOML_FILE"
|
||||||
|
|
||||||
|
# Replace the original file
|
||||||
|
mv "$TMP_FILE" "$CONFIG_TOML_FILE"
|
||||||
|
|
||||||
|
echo "Config file updated successfully."
|
||||||
|
|
||||||
|
# Start server
|
||||||
|
node server.js
|
@ -16,6 +16,19 @@ services:
|
|||||||
dockerfile: app.dockerfile
|
dockerfile: app.dockerfile
|
||||||
environment:
|
environment:
|
||||||
- SEARXNG_API_URL=http://searxng:8080
|
- SEARXNG_API_URL=http://searxng:8080
|
||||||
|
- GENERAL_SIMILARITY_MEASURE="cosine" # "cosine" or "dot"
|
||||||
|
- GENERAL_KEEP_ALIVE="5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
|
||||||
|
- MODELS_OPENAI_API_KEY=""
|
||||||
|
- MODELS_GROQ_API_KEY=""
|
||||||
|
- MODELS_ANTHROPIC_API_KEY=""
|
||||||
|
- MODELS_GEMINI_API_KEY=""
|
||||||
|
- MODELS_CUSTOM_OPENAI_API_KEY=""
|
||||||
|
- MODELS_CUSTOM_OPENAI_API_URL=""
|
||||||
|
- MODELS_CUSTOM_OPENAI_MODEL_NAME=""
|
||||||
|
- MODELS_OLLAMA_API_KEY="" # Ollama API URL - http://host.docker.internal:11434
|
||||||
|
- MODELS_DEEPSEEK_API_KEY=""
|
||||||
|
- MODELS_LM_STUDIO_API_KEY="" # LM Studio API URL - http://host.docker.internal:1234
|
||||||
|
- API_ENDPOINTS_SEARXNG="" # SearxNG API URL - http://localhost:32768
|
||||||
ports:
|
ports:
|
||||||
- 3000:3000
|
- 3000:3000
|
||||||
networks:
|
networks:
|
||||||
@ -23,7 +36,6 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- backend-dbstore:/home/perplexica/data
|
- backend-dbstore:/home/perplexica/data
|
||||||
- uploads:/home/perplexica/uploads
|
- uploads:/home/perplexica/uploads
|
||||||
- ./config.toml:/home/perplexica/config.toml
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "perplexica-frontend",
|
"name": "perplexica-frontend",
|
||||||
"version": "1.10.1",
|
"version": "1.10.2",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"author": "ItzCrazyKns",
|
"author": "ItzCrazyKns",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
|||||||
[MODELS.DEEPSEEK]
|
[MODELS.DEEPSEEK]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
|
|
||||||
|
[MODELS.LM_STUDIO]
|
||||||
|
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
||||||
|
@ -8,6 +8,7 @@ import {
|
|||||||
getOllamaApiEndpoint,
|
getOllamaApiEndpoint,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
getDeepseekApiKey,
|
getDeepseekApiKey,
|
||||||
|
getLMStudioApiEndpoint,
|
||||||
updateConfig,
|
updateConfig,
|
||||||
} from '@/lib/config';
|
} from '@/lib/config';
|
||||||
import {
|
import {
|
||||||
@ -51,6 +52,7 @@ export const GET = async (req: Request) => {
|
|||||||
|
|
||||||
config['openaiApiKey'] = getOpenaiApiKey();
|
config['openaiApiKey'] = getOpenaiApiKey();
|
||||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||||
|
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
||||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
config['geminiApiKey'] = getGeminiApiKey();
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
@ -93,6 +95,9 @@ export const POST = async (req: Request) => {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: config.deepseekApiKey,
|
API_KEY: config.deepseekApiKey,
|
||||||
},
|
},
|
||||||
|
LM_STUDIO: {
|
||||||
|
API_URL: config.lmStudioApiUrl,
|
||||||
|
},
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: config.customOpenaiApiUrl,
|
API_URL: config.customOpenaiApiUrl,
|
||||||
API_KEY: config.customOpenaiApiKey,
|
API_KEY: config.customOpenaiApiKey,
|
||||||
|
@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
|
|||||||
import ThemeSwitcher from '@/components/theme/Switcher';
|
import ThemeSwitcher from '@/components/theme/Switcher';
|
||||||
import { ImagesIcon, VideoIcon } from 'lucide-react';
|
import { ImagesIcon, VideoIcon } from 'lucide-react';
|
||||||
import Link from 'next/link';
|
import Link from 'next/link';
|
||||||
|
import { PROVIDER_METADATA } from '@/lib/providers';
|
||||||
|
|
||||||
interface SettingsType {
|
interface SettingsType {
|
||||||
chatModelProviders: {
|
chatModelProviders: {
|
||||||
@ -20,6 +21,7 @@ interface SettingsType {
|
|||||||
anthropicApiKey: string;
|
anthropicApiKey: string;
|
||||||
geminiApiKey: string;
|
geminiApiKey: string;
|
||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
|
lmStudioApiUrl: string;
|
||||||
deepseekApiKey: string;
|
deepseekApiKey: string;
|
||||||
customOpenaiApiKey: string;
|
customOpenaiApiKey: string;
|
||||||
customOpenaiApiUrl: string;
|
customOpenaiApiUrl: string;
|
||||||
@ -548,8 +550,9 @@ const Page = () => {
|
|||||||
(provider) => ({
|
(provider) => ({
|
||||||
value: provider,
|
value: provider,
|
||||||
label:
|
label:
|
||||||
|
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
||||||
provider.charAt(0).toUpperCase() +
|
provider.charAt(0).toUpperCase() +
|
||||||
provider.slice(1),
|
provider.slice(1),
|
||||||
}),
|
}),
|
||||||
)}
|
)}
|
||||||
/>
|
/>
|
||||||
@ -690,8 +693,9 @@ const Page = () => {
|
|||||||
(provider) => ({
|
(provider) => ({
|
||||||
value: provider,
|
value: provider,
|
||||||
label:
|
label:
|
||||||
|
(PROVIDER_METADATA as any)[provider]?.displayName ||
|
||||||
provider.charAt(0).toUpperCase() +
|
provider.charAt(0).toUpperCase() +
|
||||||
provider.slice(1),
|
provider.slice(1),
|
||||||
}),
|
}),
|
||||||
)}
|
)}
|
||||||
/>
|
/>
|
||||||
@ -858,6 +862,25 @@ const Page = () => {
|
|||||||
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
onSave={(value) => saveConfig('deepseekApiKey', value)}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
LM Studio API URL
|
||||||
|
</p>
|
||||||
|
<Input
|
||||||
|
type="text"
|
||||||
|
placeholder="LM Studio API URL"
|
||||||
|
value={config.lmStudioApiUrl}
|
||||||
|
isSaving={savingStates['lmStudioApiUrl']}
|
||||||
|
onChange={(e) => {
|
||||||
|
setConfig((prev) => ({
|
||||||
|
...prev!,
|
||||||
|
lmStudioApiUrl: e.target.value,
|
||||||
|
}));
|
||||||
|
}}
|
||||||
|
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</SettingsSection>
|
</SettingsSection>
|
||||||
</div>
|
</div>
|
||||||
|
@ -48,6 +48,7 @@ const MessageBox = ({
|
|||||||
const [speechMessage, setSpeechMessage] = useState(message.content);
|
const [speechMessage, setSpeechMessage] = useState(message.content);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
const citationRegex = /\[([^\]]+)\]/g;
|
||||||
const regex = /\[(\d+)\]/g;
|
const regex = /\[(\d+)\]/g;
|
||||||
let processedMessage = message.content;
|
let processedMessage = message.content;
|
||||||
|
|
||||||
@ -67,13 +68,36 @@ const MessageBox = ({
|
|||||||
) {
|
) {
|
||||||
setParsedMessage(
|
setParsedMessage(
|
||||||
processedMessage.replace(
|
processedMessage.replace(
|
||||||
regex,
|
citationRegex,
|
||||||
(_, number) =>
|
(_, capturedContent: string) => {
|
||||||
`<a href="${
|
const numbers = capturedContent
|
||||||
message.sources?.[number - 1]?.metadata?.url
|
.split(',')
|
||||||
}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
.map((numStr) => numStr.trim());
|
||||||
|
|
||||||
|
const linksHtml = numbers
|
||||||
|
.map((numStr) => {
|
||||||
|
const number = parseInt(numStr);
|
||||||
|
|
||||||
|
if (isNaN(number) || number <= 0) {
|
||||||
|
return `[${numStr}]`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const source = message.sources?.[number - 1];
|
||||||
|
const url = source?.metadata?.url;
|
||||||
|
|
||||||
|
if (url) {
|
||||||
|
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
|
||||||
|
} else {
|
||||||
|
return `[${numStr}]`;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.join('');
|
||||||
|
|
||||||
|
return linksHtml;
|
||||||
|
},
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
setSpeechMessage(message.content.replace(regex, ''));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,14 @@
|
|||||||
import fs from 'fs';
|
|
||||||
import path from 'path';
|
|
||||||
import toml from '@iarna/toml';
|
import toml from '@iarna/toml';
|
||||||
|
|
||||||
|
// Use dynamic imports for Node.js modules to prevent client-side errors
|
||||||
|
let fs: any;
|
||||||
|
let path: any;
|
||||||
|
if (typeof window === 'undefined') {
|
||||||
|
// We're on the server
|
||||||
|
fs = require('fs');
|
||||||
|
path = require('path');
|
||||||
|
}
|
||||||
|
|
||||||
const configFileName = 'config.toml';
|
const configFileName = 'config.toml';
|
||||||
|
|
||||||
interface Config {
|
interface Config {
|
||||||
@ -28,6 +35,9 @@ interface Config {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
};
|
};
|
||||||
|
LM_STUDIO: {
|
||||||
|
API_URL: string;
|
||||||
|
};
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
@ -43,10 +53,17 @@ type RecursivePartial<T> = {
|
|||||||
[P in keyof T]?: RecursivePartial<T[P]>;
|
[P in keyof T]?: RecursivePartial<T[P]>;
|
||||||
};
|
};
|
||||||
|
|
||||||
const loadConfig = () =>
|
const loadConfig = () => {
|
||||||
toml.parse(
|
// Server-side only
|
||||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
if (typeof window === 'undefined') {
|
||||||
) as any as Config;
|
return toml.parse(
|
||||||
|
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||||
|
) as any as Config;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client-side fallback - settings will be loaded via API
|
||||||
|
return {} as Config;
|
||||||
|
};
|
||||||
|
|
||||||
export const getSimilarityMeasure = () =>
|
export const getSimilarityMeasure = () =>
|
||||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||||
@ -77,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
|
|||||||
export const getCustomOpenaiModelName = () =>
|
export const getCustomOpenaiModelName = () =>
|
||||||
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
||||||
|
|
||||||
|
export const getLMStudioApiEndpoint = () =>
|
||||||
|
loadConfig().MODELS.LM_STUDIO.API_URL;
|
||||||
|
|
||||||
const mergeConfigs = (current: any, update: any): any => {
|
const mergeConfigs = (current: any, update: any): any => {
|
||||||
if (update === null || update === undefined) {
|
if (update === null || update === undefined) {
|
||||||
return current;
|
return current;
|
||||||
@ -109,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||||
const currentConfig = loadConfig();
|
// Server-side only
|
||||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
if (typeof window === 'undefined') {
|
||||||
fs.writeFileSync(
|
const currentConfig = loadConfig();
|
||||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||||
toml.stringify(mergedConfig),
|
fs.writeFileSync(
|
||||||
);
|
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||||
|
toml.stringify(mergedConfig),
|
||||||
|
);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
import { ChatAnthropic } from '@langchain/anthropic';
|
import { ChatAnthropic } from '@langchain/anthropic';
|
||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
import { getAnthropicApiKey } from '../config';
|
import { getAnthropicApiKey } from '../config';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'anthropic',
|
||||||
|
displayName: 'Anthropic',
|
||||||
|
};
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
const anthropicChatModels: Record<string, string>[] = [
|
const anthropicChatModels: Record<string, string>[] = [
|
||||||
|
@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config';
|
|||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'deepseek',
|
||||||
|
displayName: 'Deepseek AI',
|
||||||
|
};
|
||||||
|
|
||||||
const deepseekChatModels: Record<string, string>[] = [
|
const deepseekChatModels: Record<string, string>[] = [
|
||||||
{
|
{
|
||||||
displayName: 'Deepseek Chat (Deepseek V3)',
|
displayName: 'Deepseek Chat (Deepseek V3)',
|
||||||
|
@ -4,6 +4,11 @@ import {
|
|||||||
} from '@langchain/google-genai';
|
} from '@langchain/google-genai';
|
||||||
import { getGeminiApiKey } from '../config';
|
import { getGeminiApiKey } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'gemini',
|
||||||
|
displayName: 'Google Gemini',
|
||||||
|
};
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
@ -40,8 +45,12 @@ const geminiChatModels: Record<string, string>[] = [
|
|||||||
|
|
||||||
const geminiEmbeddingModels: Record<string, string>[] = [
|
const geminiEmbeddingModels: Record<string, string>[] = [
|
||||||
{
|
{
|
||||||
displayName: 'Gemini Embedding',
|
displayName: 'Text Embedding 004',
|
||||||
key: 'gemini-embedding-exp',
|
key: 'models/text-embedding-004',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Embedding 001',
|
||||||
|
key: 'models/embedding-001',
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
import { ChatOpenAI } from '@langchain/openai';
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
import { getGroqApiKey } from '../config';
|
import { getGroqApiKey } from '../config';
|
||||||
import { ChatModel } from '.';
|
import { ChatModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'groq',
|
||||||
|
displayName: 'Groq',
|
||||||
|
};
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
const groqChatModels: Record<string, string>[] = [
|
const groqChatModels: Record<string, string>[] = [
|
||||||
@ -72,6 +77,14 @@ const groqChatModels: Record<string, string>[] = [
|
|||||||
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
|
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
|
||||||
key: 'llama-3.2-90b-vision-preview',
|
key: 'llama-3.2-90b-vision-preview',
|
||||||
},
|
},
|
||||||
|
/* {
|
||||||
|
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
|
||||||
|
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
|
||||||
|
}, */
|
||||||
|
{
|
||||||
|
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
|
||||||
|
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
export const loadGroqChatModels = async () => {
|
export const loadGroqChatModels = async () => {
|
||||||
|
@ -1,18 +1,60 @@
|
|||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
|
import {
|
||||||
|
loadOpenAIChatModels,
|
||||||
|
loadOpenAIEmbeddingModels,
|
||||||
|
PROVIDER_INFO as OpenAIInfo,
|
||||||
|
PROVIDER_INFO,
|
||||||
|
} from './openai';
|
||||||
import {
|
import {
|
||||||
getCustomOpenaiApiKey,
|
getCustomOpenaiApiKey,
|
||||||
getCustomOpenaiApiUrl,
|
getCustomOpenaiApiUrl,
|
||||||
getCustomOpenaiModelName,
|
getCustomOpenaiModelName,
|
||||||
} from '../config';
|
} from '../config';
|
||||||
import { ChatOpenAI } from '@langchain/openai';
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
|
import {
|
||||||
import { loadGroqChatModels } from './groq';
|
loadOllamaChatModels,
|
||||||
import { loadAnthropicChatModels } from './anthropic';
|
loadOllamaEmbeddingModels,
|
||||||
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
|
PROVIDER_INFO as OllamaInfo,
|
||||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
} from './ollama';
|
||||||
import { loadDeepseekChatModels } from './deepseek';
|
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
|
||||||
|
import {
|
||||||
|
loadAnthropicChatModels,
|
||||||
|
PROVIDER_INFO as AnthropicInfo,
|
||||||
|
} from './anthropic';
|
||||||
|
import {
|
||||||
|
loadGeminiChatModels,
|
||||||
|
loadGeminiEmbeddingModels,
|
||||||
|
PROVIDER_INFO as GeminiInfo,
|
||||||
|
} from './gemini';
|
||||||
|
import {
|
||||||
|
loadTransformersEmbeddingsModels,
|
||||||
|
PROVIDER_INFO as TransformersInfo,
|
||||||
|
} from './transformers';
|
||||||
|
import {
|
||||||
|
loadDeepseekChatModels,
|
||||||
|
PROVIDER_INFO as DeepseekInfo,
|
||||||
|
} from './deepseek';
|
||||||
|
import {
|
||||||
|
loadLMStudioChatModels,
|
||||||
|
loadLMStudioEmbeddingsModels,
|
||||||
|
PROVIDER_INFO as LMStudioInfo,
|
||||||
|
} from './lmstudio';
|
||||||
|
|
||||||
|
export const PROVIDER_METADATA = {
|
||||||
|
openai: OpenAIInfo,
|
||||||
|
ollama: OllamaInfo,
|
||||||
|
groq: GroqInfo,
|
||||||
|
anthropic: AnthropicInfo,
|
||||||
|
gemini: GeminiInfo,
|
||||||
|
transformers: TransformersInfo,
|
||||||
|
deepseek: DeepseekInfo,
|
||||||
|
lmstudio: LMStudioInfo,
|
||||||
|
custom_openai: {
|
||||||
|
key: 'custom_openai',
|
||||||
|
displayName: 'Custom OpenAI',
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
export interface ChatModel {
|
export interface ChatModel {
|
||||||
displayName: string;
|
displayName: string;
|
||||||
@ -34,6 +76,7 @@ export const chatModelProviders: Record<
|
|||||||
anthropic: loadAnthropicChatModels,
|
anthropic: loadAnthropicChatModels,
|
||||||
gemini: loadGeminiChatModels,
|
gemini: loadGeminiChatModels,
|
||||||
deepseek: loadDeepseekChatModels,
|
deepseek: loadDeepseekChatModels,
|
||||||
|
lmstudio: loadLMStudioChatModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const embeddingModelProviders: Record<
|
export const embeddingModelProviders: Record<
|
||||||
@ -44,6 +87,7 @@ export const embeddingModelProviders: Record<
|
|||||||
ollama: loadOllamaEmbeddingModels,
|
ollama: loadOllamaEmbeddingModels,
|
||||||
gemini: loadGeminiEmbeddingModels,
|
gemini: loadGeminiEmbeddingModels,
|
||||||
transformers: loadTransformersEmbeddingsModels,
|
transformers: loadTransformersEmbeddingsModels,
|
||||||
|
lmstudio: loadLMStudioEmbeddingsModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getAvailableChatModelProviders = async () => {
|
export const getAvailableChatModelProviders = async () => {
|
||||||
|
100
src/lib/providers/lmstudio.ts
Normal file
100
src/lib/providers/lmstudio.ts
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
|
||||||
|
import axios from 'axios';
|
||||||
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'lmstudio',
|
||||||
|
displayName: 'LM Studio',
|
||||||
|
};
|
||||||
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
|
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||||
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
|
interface LMStudioModel {
|
||||||
|
id: string;
|
||||||
|
name?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ensureV1Endpoint = (endpoint: string): string =>
|
||||||
|
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
||||||
|
|
||||||
|
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
|
||||||
|
try {
|
||||||
|
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLMStudioChatModels = async () => {
|
||||||
|
const endpoint = getLMStudioApiEndpoint();
|
||||||
|
|
||||||
|
if (!endpoint) return {};
|
||||||
|
if (!(await checkServerAvailability(endpoint))) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
|
||||||
|
const chatModels: Record<string, ChatModel> = {};
|
||||||
|
|
||||||
|
response.data.data.forEach((model: LMStudioModel) => {
|
||||||
|
chatModels[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new ChatOpenAI({
|
||||||
|
openAIApiKey: 'lm-studio',
|
||||||
|
configuration: {
|
||||||
|
baseURL: ensureV1Endpoint(endpoint),
|
||||||
|
},
|
||||||
|
modelName: model.id,
|
||||||
|
temperature: 0.7,
|
||||||
|
streaming: true,
|
||||||
|
maxRetries: 3,
|
||||||
|
}) as unknown as BaseChatModel,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading LM Studio models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLMStudioEmbeddingsModels = async () => {
|
||||||
|
const endpoint = getLMStudioApiEndpoint();
|
||||||
|
|
||||||
|
if (!endpoint) return {};
|
||||||
|
if (!(await checkServerAvailability(endpoint))) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
});
|
||||||
|
|
||||||
|
const embeddingsModels: Record<string, EmbeddingModel> = {};
|
||||||
|
|
||||||
|
response.data.data.forEach((model: LMStudioModel) => {
|
||||||
|
embeddingsModels[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new OpenAIEmbeddings({
|
||||||
|
openAIApiKey: 'lm-studio',
|
||||||
|
configuration: {
|
||||||
|
baseURL: ensureV1Endpoint(endpoint),
|
||||||
|
},
|
||||||
|
modelName: model.id,
|
||||||
|
}) as unknown as Embeddings,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
return embeddingsModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
@ -1,6 +1,11 @@
|
|||||||
import axios from 'axios';
|
import axios from 'axios';
|
||||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'ollama',
|
||||||
|
displayName: 'Ollama',
|
||||||
|
};
|
||||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||||
|
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||||
import { getOpenaiApiKey } from '../config';
|
import { getOpenaiApiKey } from '../config';
|
||||||
import { ChatModel, EmbeddingModel } from '.';
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'openai',
|
||||||
|
displayName: 'OpenAI',
|
||||||
|
};
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
@ -25,6 +30,18 @@ const openaiChatModels: Record<string, string>[] = [
|
|||||||
displayName: 'GPT-4 omni mini',
|
displayName: 'GPT-4 omni mini',
|
||||||
key: 'gpt-4o-mini',
|
key: 'gpt-4o-mini',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
displayName: 'GPT 4.1 nano',
|
||||||
|
key: 'gpt-4.1-nano',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'GPT 4.1 mini',
|
||||||
|
key: 'gpt-4.1-mini',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'GPT 4.1',
|
||||||
|
key: 'gpt-4.1',
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
const openaiEmbeddingModels: Record<string, string>[] = [
|
const openaiEmbeddingModels: Record<string, string>[] = [
|
||||||
|
@ -1,5 +1,10 @@
|
|||||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'transformers',
|
||||||
|
displayName: 'Hugging Face',
|
||||||
|
};
|
||||||
|
|
||||||
export const loadTransformersEmbeddingsModels = async () => {
|
export const loadTransformersEmbeddingsModels = async () => {
|
||||||
try {
|
try {
|
||||||
const embeddingModels = {
|
const embeddingModels = {
|
||||||
|
@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
|||||||
const splittedText = await splitter.splitText(parsedText);
|
const splittedText = await splitter.splitText(parsedText);
|
||||||
const title = res.data
|
const title = res.data
|
||||||
.toString('utf8')
|
.toString('utf8')
|
||||||
.match(/<title>(.*?)<\/title>/)?.[1];
|
.match(/<title.*>(.*?)<\/title>/)?.[1];
|
||||||
|
|
||||||
const linkDocs = splittedText.map((text) => {
|
const linkDocs = splittedText.map((text) => {
|
||||||
return new Document({
|
return new Document({
|
||||||
|
Reference in New Issue
Block a user