Compare commits

...

20 Commits

Author SHA1 Message Date
Lars Erhardt
438e14cfb6 Merge 590a52d38c into 701819d018 2025-05-14 22:59:09 +03:00
ItzCrazyKns
701819d018 Revert "Update README.md"
This reverts commit 68e151b2bd.
2025-05-13 20:14:08 +05:30
ItzCrazyKns
68e151b2bd Update README.md 2025-04-29 17:13:30 +05:30
ItzCrazyKns
06ff272541 feat(openai): add GPT 4.1 models 2025-04-29 13:10:14 +05:30
ItzCrazyKns
4154d5e4b1 Merge branch 'pr/629' 2025-04-23 20:35:52 +05:30
ItzCrazyKns
1862491496 feat(settings): add LM Studio API URL 2025-04-12 11:59:05 +05:30
ItzCrazyKns
073b5e897c feat(app): lint & beautify 2025-04-12 11:58:52 +05:30
Rami
9a332e79e4 Merge branch 'ItzCrazyKns:master' into feature/lm-studio-provider 2025-04-11 20:07:58 +04:00
ItzCrazyKns
72450b9217 Merge pull request #731 from ClawCloud-Ron/master
docs: add ClawCloud Run button
2025-04-11 21:20:44 +05:30
haddadrm
7e1dc33a08 Implement provider formatting improvements and fix client-side compatibility
- Add PROVIDER_INFO metadata to each provider file with proper display names
- Create centralized PROVIDER_METADATA in index.ts for consistent reference
- Update settings UI to use provider metadata for display names
- Fix client/server compatibility for Node.js modules in config.ts
2025-04-11 19:18:19 +04:00
haddadrm
aa240009ab Feature: Add LM Studio provider integration - Added LM Studio provider to support OpenAI compatible API - Implemented chat and embeddings model loading - Updated config to include LM Studio API endpoint 2025-04-11 19:18:19 +04:00
sjiampojamarn
41b258e4d8 Set speech message before return 2025-04-08 23:17:52 -07:00
ItzCrazyKns
da1123d84b feat(groq): update model name 2025-04-07 23:30:51 +05:30
ItzCrazyKns
627775c430 feat(groq): remove maverick (not being run yet) 2025-04-07 23:29:51 +05:30
ItzCrazyKns
245573efca feat(groq): update model list 2025-04-07 23:23:18 +05:30
ClawCloud-Ron
28b9cca413 docs: add ClawCloud Run button 2025-04-07 16:49:59 +08:00
wellCh4n
8aaee2c40c feat(app): support complex title 2025-02-15 16:48:21 +08:00
Lars Erhardt
590a52d38c Redict config isnt really nearcessary 2025-02-10 17:43:32 +01:00
Lars Erhardt
ca3fad6632 Searx Settings modified after mistake 2025-02-10 17:18:59 +01:00
Lars Erhardt
e0d5787c5d Add redis support and disabled qwant by default.
Larger instances will benefit from this change massively.
Also QWant was spamming the logs with some chaptcha problem so best to disable it for now.
2025-02-10 16:36:54 +01:00
18 changed files with 295 additions and 23 deletions

View File

@@ -159,6 +159,7 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica) [![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267) [![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
[![Run on ClawCloud](https://raw.githubusercontent.com/ClawCloud/Run-Template/refs/heads/main/Run-on-ClawCloud.svg)](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
## Upcoming Features ## Upcoming Features

View File

@@ -26,9 +26,21 @@ services:
- ./config.toml:/home/perplexica/config.toml - ./config.toml:/home/perplexica/config.toml
restart: unless-stopped restart: unless-stopped
redict:
image: registry.redict.io/redict:latest
container_name: perplexica-redict
ports:
- "6379:6379"
volumes:
- redict_data:/data
networks:
- perplexica-network
restart: unless-stopped
networks: networks:
perplexica-network: perplexica-network:
volumes: volumes:
backend-dbstore: backend-dbstore:
uploads: uploads:
redict_data:

View File

@@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK] [MODELS.DEEPSEEK]
API_KEY = "" API_KEY = ""
[MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
[API_ENDPOINTS] [API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768 SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@@ -12,6 +12,11 @@ search:
server: server:
secret_key: 'a2fb23f1b02e6ee83875b09826990de0f6bd908b6638e8c10277d415f6ab852b' # Is overwritten by ${SEARXNG_SECRET} secret_key: 'a2fb23f1b02e6ee83875b09826990de0f6bd908b6638e8c10277d415f6ab852b' # Is overwritten by ${SEARXNG_SECRET}
redis:
url: redis://redict:6379/0
engines: engines:
- name: wolframalpha - name: wolframalpha
disabled: false disabled: false
- name: qwant
disabled: true

View File

@@ -8,6 +8,7 @@ import {
getOllamaApiEndpoint, getOllamaApiEndpoint,
getOpenaiApiKey, getOpenaiApiKey,
getDeepseekApiKey, getDeepseekApiKey,
getLMStudioApiEndpoint,
updateConfig, updateConfig,
} from '@/lib/config'; } from '@/lib/config';
import { import {
@@ -51,6 +52,7 @@ export const GET = async (req: Request) => {
config['openaiApiKey'] = getOpenaiApiKey(); config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint(); config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey(); config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey(); config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey(); config['geminiApiKey'] = getGeminiApiKey();
@@ -93,6 +95,9 @@ export const POST = async (req: Request) => {
DEEPSEEK: { DEEPSEEK: {
API_KEY: config.deepseekApiKey, API_KEY: config.deepseekApiKey,
}, },
LM_STUDIO: {
API_URL: config.lmStudioApiUrl,
},
CUSTOM_OPENAI: { CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl, API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey, API_KEY: config.customOpenaiApiKey,

View File

@@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
import ThemeSwitcher from '@/components/theme/Switcher'; import ThemeSwitcher from '@/components/theme/Switcher';
import { ImagesIcon, VideoIcon } from 'lucide-react'; import { ImagesIcon, VideoIcon } from 'lucide-react';
import Link from 'next/link'; import Link from 'next/link';
import { PROVIDER_METADATA } from '@/lib/providers';
interface SettingsType { interface SettingsType {
chatModelProviders: { chatModelProviders: {
@@ -20,6 +21,7 @@ interface SettingsType {
anthropicApiKey: string; anthropicApiKey: string;
geminiApiKey: string; geminiApiKey: string;
ollamaApiUrl: string; ollamaApiUrl: string;
lmStudioApiUrl: string;
deepseekApiKey: string; deepseekApiKey: string;
customOpenaiApiKey: string; customOpenaiApiKey: string;
customOpenaiApiUrl: string; customOpenaiApiUrl: string;
@@ -548,8 +550,9 @@ const Page = () => {
(provider) => ({ (provider) => ({
value: provider, value: provider,
label: label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() + provider.charAt(0).toUpperCase() +
provider.slice(1), provider.slice(1),
}), }),
)} )}
/> />
@@ -690,8 +693,9 @@ const Page = () => {
(provider) => ({ (provider) => ({
value: provider, value: provider,
label: label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() + provider.charAt(0).toUpperCase() +
provider.slice(1), provider.slice(1),
}), }),
)} )}
/> />
@@ -858,6 +862,25 @@ const Page = () => {
onSave={(value) => saveConfig('deepseekApiKey', value)} onSave={(value) => saveConfig('deepseekApiKey', value)}
/> />
</div> </div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL
</p>
<Input
type="text"
placeholder="LM Studio API URL"
value={config.lmStudioApiUrl}
isSaving={savingStates['lmStudioApiUrl']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lmStudioApiUrl: e.target.value,
}));
}}
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
/>
</div>
</div> </div>
</SettingsSection> </SettingsSection>
</div> </div>

View File

@@ -97,6 +97,7 @@ const MessageBox = ({
}, },
), ),
); );
setSpeechMessage(message.content.replace(regex, ''));
return; return;
} }

View File

@@ -1,7 +1,14 @@
import fs from 'fs';
import path from 'path';
import toml from '@iarna/toml'; import toml from '@iarna/toml';
// Use dynamic imports for Node.js modules to prevent client-side errors
let fs: any;
let path: any;
if (typeof window === 'undefined') {
// We're on the server
fs = require('fs');
path = require('path');
}
const configFileName = 'config.toml'; const configFileName = 'config.toml';
interface Config { interface Config {
@@ -28,6 +35,9 @@ interface Config {
DEEPSEEK: { DEEPSEEK: {
API_KEY: string; API_KEY: string;
}; };
LM_STUDIO: {
API_URL: string;
};
CUSTOM_OPENAI: { CUSTOM_OPENAI: {
API_URL: string; API_URL: string;
API_KEY: string; API_KEY: string;
@@ -43,10 +53,17 @@ type RecursivePartial<T> = {
[P in keyof T]?: RecursivePartial<T[P]>; [P in keyof T]?: RecursivePartial<T[P]>;
}; };
const loadConfig = () => const loadConfig = () => {
toml.parse( // Server-side only
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), if (typeof window === 'undefined') {
) as any as Config; return toml.parse(
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
}
// Client-side fallback - settings will be loaded via API
return {} as Config;
};
export const getSimilarityMeasure = () => export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE; loadConfig().GENERAL.SIMILARITY_MEASURE;
@@ -77,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
export const getCustomOpenaiModelName = () => export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME; loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
export const getLMStudioApiEndpoint = () =>
loadConfig().MODELS.LM_STUDIO.API_URL;
const mergeConfigs = (current: any, update: any): any => { const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) { if (update === null || update === undefined) {
return current; return current;
@@ -109,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
}; };
export const updateConfig = (config: RecursivePartial<Config>) => { export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig(); // Server-side only
const mergedConfig = mergeConfigs(currentConfig, config); if (typeof window === 'undefined') {
fs.writeFileSync( const currentConfig = loadConfig();
path.join(path.join(process.cwd(), `${configFileName}`)), const mergedConfig = mergeConfigs(currentConfig, config);
toml.stringify(mergedConfig), fs.writeFileSync(
); path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig),
);
}
}; };

View File

@@ -1,6 +1,11 @@
import { ChatAnthropic } from '@langchain/anthropic'; import { ChatAnthropic } from '@langchain/anthropic';
import { ChatModel } from '.'; import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config'; import { getAnthropicApiKey } from '../config';
export const PROVIDER_INFO = {
key: 'anthropic',
displayName: 'Anthropic',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [ const anthropicChatModels: Record<string, string>[] = [

View File

@@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config';
import { ChatModel } from '.'; import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
export const PROVIDER_INFO = {
key: 'deepseek',
displayName: 'Deepseek AI',
};
const deepseekChatModels: Record<string, string>[] = [ const deepseekChatModels: Record<string, string>[] = [
{ {
displayName: 'Deepseek Chat (Deepseek V3)', displayName: 'Deepseek Chat (Deepseek V3)',

View File

@@ -4,6 +4,11 @@ import {
} from '@langchain/google-genai'; } from '@langchain/google-genai';
import { getGeminiApiKey } from '../config'; import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'gemini',
displayName: 'Google Gemini',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';

View File

@@ -1,6 +1,11 @@
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../config'; import { getGroqApiKey } from '../config';
import { ChatModel } from '.'; import { ChatModel } from '.';
export const PROVIDER_INFO = {
key: 'groq',
displayName: 'Groq',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [ const groqChatModels: Record<string, string>[] = [
@@ -72,6 +77,14 @@ const groqChatModels: Record<string, string>[] = [
displayName: 'Llama 3.2 90B Vision Preview (Preview)', displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview', key: 'llama-3.2-90b-vision-preview',
}, },
/* {
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
}, */
{
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
},
]; ];
export const loadGroqChatModels = async () => { export const loadGroqChatModels = async () => {

View File

@@ -1,18 +1,60 @@
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai'; import {
loadOpenAIChatModels,
loadOpenAIEmbeddingModels,
PROVIDER_INFO as OpenAIInfo,
PROVIDER_INFO,
} from './openai';
import { import {
getCustomOpenaiApiKey, getCustomOpenaiApiKey,
getCustomOpenaiApiUrl, getCustomOpenaiApiUrl,
getCustomOpenaiModelName, getCustomOpenaiModelName,
} from '../config'; } from '../config';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama'; import {
import { loadGroqChatModels } from './groq'; loadOllamaChatModels,
import { loadAnthropicChatModels } from './anthropic'; loadOllamaEmbeddingModels,
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; PROVIDER_INFO as OllamaInfo,
import { loadTransformersEmbeddingsModels } from './transformers'; } from './ollama';
import { loadDeepseekChatModels } from './deepseek'; import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
import {
loadAnthropicChatModels,
PROVIDER_INFO as AnthropicInfo,
} from './anthropic';
import {
loadGeminiChatModels,
loadGeminiEmbeddingModels,
PROVIDER_INFO as GeminiInfo,
} from './gemini';
import {
loadTransformersEmbeddingsModels,
PROVIDER_INFO as TransformersInfo,
} from './transformers';
import {
loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo,
} from './deepseek';
import {
loadLMStudioChatModels,
loadLMStudioEmbeddingsModels,
PROVIDER_INFO as LMStudioInfo,
} from './lmstudio';
export const PROVIDER_METADATA = {
openai: OpenAIInfo,
ollama: OllamaInfo,
groq: GroqInfo,
anthropic: AnthropicInfo,
gemini: GeminiInfo,
transformers: TransformersInfo,
deepseek: DeepseekInfo,
lmstudio: LMStudioInfo,
custom_openai: {
key: 'custom_openai',
displayName: 'Custom OpenAI',
},
};
export interface ChatModel { export interface ChatModel {
displayName: string; displayName: string;
@@ -34,6 +76,7 @@ export const chatModelProviders: Record<
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels, deepseek: loadDeepseekChatModels,
lmstudio: loadLMStudioChatModels,
}; };
export const embeddingModelProviders: Record< export const embeddingModelProviders: Record<
@@ -44,6 +87,7 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels, ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels, gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels, transformers: loadTransformersEmbeddingsModels,
lmstudio: loadLMStudioEmbeddingsModels,
}; };
export const getAvailableChatModelProviders = async () => { export const getAvailableChatModelProviders = async () => {

View File

@@ -0,0 +1,100 @@
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
import axios from 'axios';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'lmstudio',
displayName: 'LM Studio',
};
import { ChatOpenAI } from '@langchain/openai';
import { OpenAIEmbeddings } from '@langchain/openai';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
interface LMStudioModel {
id: string;
name?: string;
}
const ensureV1Endpoint = (endpoint: string): string =>
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
try {
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
return true;
} catch {
return false;
}
};
export const loadLMStudioChatModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
temperature: 0.7,
streaming: true,
maxRetries: 3,
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading LM Studio models: ${err}`);
return {};
}
};
export const loadLMStudioEmbeddingsModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const embeddingsModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
}) as unknown as Embeddings,
};
});
return embeddingsModels;
} catch (err) {
console.error(`Error loading LM Studio embeddings model: ${err}`);
return {};
}
};

View File

@@ -1,6 +1,11 @@
import axios from 'axios'; import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config'; import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';

View File

@@ -1,6 +1,11 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../config'; import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'openai',
displayName: 'OpenAI',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';
@@ -25,6 +30,18 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT-4 omni mini', displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini', key: 'gpt-4o-mini',
}, },
{
displayName: 'GPT 4.1 nano',
key: 'gpt-4.1-nano',
},
{
displayName: 'GPT 4.1 mini',
key: 'gpt-4.1-mini',
},
{
displayName: 'GPT 4.1',
key: 'gpt-4.1',
},
]; ];
const openaiEmbeddingModels: Record<string, string>[] = [ const openaiEmbeddingModels: Record<string, string>[] = [

View File

@@ -1,5 +1,10 @@
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const PROVIDER_INFO = {
key: 'transformers',
displayName: 'Hugging Face',
};
export const loadTransformersEmbeddingsModels = async () => { export const loadTransformersEmbeddingsModels = async () => {
try { try {
const embeddingModels = { const embeddingModels = {

View File

@@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splittedText = await splitter.splitText(parsedText); const splittedText = await splitter.splitText(parsedText);
const title = res.data const title = res.data
.toString('utf8') .toString('utf8')
.match(/<title>(.*?)<\/title>/)?.[1]; .match(/<title.*>(.*?)<\/title>/)?.[1];
const linkDocs = splittedText.map((text) => { const linkDocs = splittedText.map((text) => {
return new Document({ return new Document({