mirror of
				https://github.com/ItzCrazyKns/Perplexica.git
				synced 2025-11-03 20:28:14 +00:00 
			
		
		
		
	Compare commits
	
		
			13 Commits
		
	
	
		
			da1123d84b
			...
			feat/model
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					701819d018 | ||
| 
						 | 
					68e151b2bd | ||
| 
						 | 
					06ff272541 | ||
| 
						 | 
					4154d5e4b1 | ||
| 
						 | 
					1862491496 | ||
| 
						 | 
					073b5e897c | ||
| 
						 | 
					9a332e79e4 | ||
| 
						 | 
					72450b9217 | ||
| 
						 | 
					7e1dc33a08 | ||
| 
						 | 
					aa240009ab | ||
| 
						 | 
					41b258e4d8 | ||
| 
						 | 
					28b9cca413 | ||
| 
						 | 
					8aaee2c40c | 
@@ -159,6 +159,7 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
 | 
			
		||||
 | 
			
		||||
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
 | 
			
		||||
[](https://repocloud.io/details/?app_id=267)
 | 
			
		||||
[](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
 | 
			
		||||
 | 
			
		||||
## Upcoming Features
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
 | 
			
		||||
[MODELS.DEEPSEEK]
 | 
			
		||||
API_KEY = ""
 | 
			
		||||
 | 
			
		||||
[MODELS.LM_STUDIO]
 | 
			
		||||
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
 | 
			
		||||
 | 
			
		||||
[API_ENDPOINTS]
 | 
			
		||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
 | 
			
		||||
@@ -8,6 +8,7 @@ import {
 | 
			
		||||
  getOllamaApiEndpoint,
 | 
			
		||||
  getOpenaiApiKey,
 | 
			
		||||
  getDeepseekApiKey,
 | 
			
		||||
  getLMStudioApiEndpoint,
 | 
			
		||||
  updateConfig,
 | 
			
		||||
} from '@/lib/config';
 | 
			
		||||
import {
 | 
			
		||||
@@ -51,6 +52,7 @@ export const GET = async (req: Request) => {
 | 
			
		||||
 | 
			
		||||
    config['openaiApiKey'] = getOpenaiApiKey();
 | 
			
		||||
    config['ollamaApiUrl'] = getOllamaApiEndpoint();
 | 
			
		||||
    config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
 | 
			
		||||
    config['anthropicApiKey'] = getAnthropicApiKey();
 | 
			
		||||
    config['groqApiKey'] = getGroqApiKey();
 | 
			
		||||
    config['geminiApiKey'] = getGeminiApiKey();
 | 
			
		||||
@@ -93,6 +95,9 @@ export const POST = async (req: Request) => {
 | 
			
		||||
        DEEPSEEK: {
 | 
			
		||||
          API_KEY: config.deepseekApiKey,
 | 
			
		||||
        },
 | 
			
		||||
        LM_STUDIO: {
 | 
			
		||||
          API_URL: config.lmStudioApiUrl,
 | 
			
		||||
        },
 | 
			
		||||
        CUSTOM_OPENAI: {
 | 
			
		||||
          API_URL: config.customOpenaiApiUrl,
 | 
			
		||||
          API_KEY: config.customOpenaiApiKey,
 | 
			
		||||
 
 | 
			
		||||
@@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
 | 
			
		||||
import ThemeSwitcher from '@/components/theme/Switcher';
 | 
			
		||||
import { ImagesIcon, VideoIcon } from 'lucide-react';
 | 
			
		||||
import Link from 'next/link';
 | 
			
		||||
import { PROVIDER_METADATA } from '@/lib/providers';
 | 
			
		||||
 | 
			
		||||
interface SettingsType {
 | 
			
		||||
  chatModelProviders: {
 | 
			
		||||
@@ -20,6 +21,7 @@ interface SettingsType {
 | 
			
		||||
  anthropicApiKey: string;
 | 
			
		||||
  geminiApiKey: string;
 | 
			
		||||
  ollamaApiUrl: string;
 | 
			
		||||
  lmStudioApiUrl: string;
 | 
			
		||||
  deepseekApiKey: string;
 | 
			
		||||
  customOpenaiApiKey: string;
 | 
			
		||||
  customOpenaiApiUrl: string;
 | 
			
		||||
@@ -548,6 +550,7 @@ const Page = () => {
 | 
			
		||||
                        (provider) => ({
 | 
			
		||||
                          value: provider,
 | 
			
		||||
                          label:
 | 
			
		||||
                            (PROVIDER_METADATA as any)[provider]?.displayName ||
 | 
			
		||||
                            provider.charAt(0).toUpperCase() +
 | 
			
		||||
                              provider.slice(1),
 | 
			
		||||
                        }),
 | 
			
		||||
@@ -690,6 +693,7 @@ const Page = () => {
 | 
			
		||||
                        (provider) => ({
 | 
			
		||||
                          value: provider,
 | 
			
		||||
                          label:
 | 
			
		||||
                            (PROVIDER_METADATA as any)[provider]?.displayName ||
 | 
			
		||||
                            provider.charAt(0).toUpperCase() +
 | 
			
		||||
                              provider.slice(1),
 | 
			
		||||
                        }),
 | 
			
		||||
@@ -858,6 +862,25 @@ const Page = () => {
 | 
			
		||||
                    onSave={(value) => saveConfig('deepseekApiKey', value)}
 | 
			
		||||
                  />
 | 
			
		||||
                </div>
 | 
			
		||||
 | 
			
		||||
                <div className="flex flex-col space-y-1">
 | 
			
		||||
                  <p className="text-black/70 dark:text-white/70 text-sm">
 | 
			
		||||
                    LM Studio API URL
 | 
			
		||||
                  </p>
 | 
			
		||||
                  <Input
 | 
			
		||||
                    type="text"
 | 
			
		||||
                    placeholder="LM Studio API URL"
 | 
			
		||||
                    value={config.lmStudioApiUrl}
 | 
			
		||||
                    isSaving={savingStates['lmStudioApiUrl']}
 | 
			
		||||
                    onChange={(e) => {
 | 
			
		||||
                      setConfig((prev) => ({
 | 
			
		||||
                        ...prev!,
 | 
			
		||||
                        lmStudioApiUrl: e.target.value,
 | 
			
		||||
                      }));
 | 
			
		||||
                    }}
 | 
			
		||||
                    onSave={(value) => saveConfig('lmStudioApiUrl', value)}
 | 
			
		||||
                  />
 | 
			
		||||
                </div>
 | 
			
		||||
              </div>
 | 
			
		||||
            </SettingsSection>
 | 
			
		||||
          </div>
 | 
			
		||||
 
 | 
			
		||||
@@ -97,6 +97,7 @@ const MessageBox = ({
 | 
			
		||||
          },
 | 
			
		||||
        ),
 | 
			
		||||
      );
 | 
			
		||||
      setSpeechMessage(message.content.replace(regex, ''));
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,14 @@
 | 
			
		||||
import fs from 'fs';
 | 
			
		||||
import path from 'path';
 | 
			
		||||
import toml from '@iarna/toml';
 | 
			
		||||
 | 
			
		||||
// Use dynamic imports for Node.js modules to prevent client-side errors
 | 
			
		||||
let fs: any;
 | 
			
		||||
let path: any;
 | 
			
		||||
if (typeof window === 'undefined') {
 | 
			
		||||
  // We're on the server
 | 
			
		||||
  fs = require('fs');
 | 
			
		||||
  path = require('path');
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const configFileName = 'config.toml';
 | 
			
		||||
 | 
			
		||||
interface Config {
 | 
			
		||||
@@ -28,6 +35,9 @@ interface Config {
 | 
			
		||||
    DEEPSEEK: {
 | 
			
		||||
      API_KEY: string;
 | 
			
		||||
    };
 | 
			
		||||
    LM_STUDIO: {
 | 
			
		||||
      API_URL: string;
 | 
			
		||||
    };
 | 
			
		||||
    CUSTOM_OPENAI: {
 | 
			
		||||
      API_URL: string;
 | 
			
		||||
      API_KEY: string;
 | 
			
		||||
@@ -43,10 +53,17 @@ type RecursivePartial<T> = {
 | 
			
		||||
  [P in keyof T]?: RecursivePartial<T[P]>;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const loadConfig = () =>
 | 
			
		||||
  toml.parse(
 | 
			
		||||
const loadConfig = () => {
 | 
			
		||||
  // Server-side only
 | 
			
		||||
  if (typeof window === 'undefined') {
 | 
			
		||||
    return toml.parse(
 | 
			
		||||
      fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
 | 
			
		||||
    ) as any as Config;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Client-side fallback - settings will be loaded via API
 | 
			
		||||
  return {} as Config;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const getSimilarityMeasure = () =>
 | 
			
		||||
  loadConfig().GENERAL.SIMILARITY_MEASURE;
 | 
			
		||||
@@ -77,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
 | 
			
		||||
export const getCustomOpenaiModelName = () =>
 | 
			
		||||
  loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
 | 
			
		||||
 | 
			
		||||
export const getLMStudioApiEndpoint = () =>
 | 
			
		||||
  loadConfig().MODELS.LM_STUDIO.API_URL;
 | 
			
		||||
 | 
			
		||||
const mergeConfigs = (current: any, update: any): any => {
 | 
			
		||||
  if (update === null || update === undefined) {
 | 
			
		||||
    return current;
 | 
			
		||||
@@ -109,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const updateConfig = (config: RecursivePartial<Config>) => {
 | 
			
		||||
  // Server-side only
 | 
			
		||||
  if (typeof window === 'undefined') {
 | 
			
		||||
    const currentConfig = loadConfig();
 | 
			
		||||
    const mergedConfig = mergeConfigs(currentConfig, config);
 | 
			
		||||
    fs.writeFileSync(
 | 
			
		||||
      path.join(path.join(process.cwd(), `${configFileName}`)),
 | 
			
		||||
      toml.stringify(mergedConfig),
 | 
			
		||||
    );
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,11 @@
 | 
			
		||||
import { ChatAnthropic } from '@langchain/anthropic';
 | 
			
		||||
import { ChatModel } from '.';
 | 
			
		||||
import { getAnthropicApiKey } from '../config';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_INFO = {
 | 
			
		||||
  key: 'anthropic',
 | 
			
		||||
  displayName: 'Anthropic',
 | 
			
		||||
};
 | 
			
		||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
			
		||||
 | 
			
		||||
const anthropicChatModels: Record<string, string>[] = [
 | 
			
		||||
 
 | 
			
		||||
@@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config';
 | 
			
		||||
import { ChatModel } from '.';
 | 
			
		||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_INFO = {
 | 
			
		||||
  key: 'deepseek',
 | 
			
		||||
  displayName: 'Deepseek AI',
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const deepseekChatModels: Record<string, string>[] = [
 | 
			
		||||
  {
 | 
			
		||||
    displayName: 'Deepseek Chat (Deepseek V3)',
 | 
			
		||||
 
 | 
			
		||||
@@ -4,6 +4,11 @@ import {
 | 
			
		||||
} from '@langchain/google-genai';
 | 
			
		||||
import { getGeminiApiKey } from '../config';
 | 
			
		||||
import { ChatModel, EmbeddingModel } from '.';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_INFO = {
 | 
			
		||||
  key: 'gemini',
 | 
			
		||||
  displayName: 'Google Gemini',
 | 
			
		||||
};
 | 
			
		||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
			
		||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,11 @@
 | 
			
		||||
import { ChatOpenAI } from '@langchain/openai';
 | 
			
		||||
import { getGroqApiKey } from '../config';
 | 
			
		||||
import { ChatModel } from '.';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_INFO = {
 | 
			
		||||
  key: 'groq',
 | 
			
		||||
  displayName: 'Groq',
 | 
			
		||||
};
 | 
			
		||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
			
		||||
 | 
			
		||||
const groqChatModels: Record<string, string>[] = [
 | 
			
		||||
 
 | 
			
		||||
@@ -1,18 +1,60 @@
 | 
			
		||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
			
		||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
			
		||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
 | 
			
		||||
import {
 | 
			
		||||
  loadOpenAIChatModels,
 | 
			
		||||
  loadOpenAIEmbeddingModels,
 | 
			
		||||
  PROVIDER_INFO as OpenAIInfo,
 | 
			
		||||
  PROVIDER_INFO,
 | 
			
		||||
} from './openai';
 | 
			
		||||
import {
 | 
			
		||||
  getCustomOpenaiApiKey,
 | 
			
		||||
  getCustomOpenaiApiUrl,
 | 
			
		||||
  getCustomOpenaiModelName,
 | 
			
		||||
} from '../config';
 | 
			
		||||
import { ChatOpenAI } from '@langchain/openai';
 | 
			
		||||
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
 | 
			
		||||
import { loadGroqChatModels } from './groq';
 | 
			
		||||
import { loadAnthropicChatModels } from './anthropic';
 | 
			
		||||
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
 | 
			
		||||
import { loadTransformersEmbeddingsModels } from './transformers';
 | 
			
		||||
import { loadDeepseekChatModels } from './deepseek';
 | 
			
		||||
import {
 | 
			
		||||
  loadOllamaChatModels,
 | 
			
		||||
  loadOllamaEmbeddingModels,
 | 
			
		||||
  PROVIDER_INFO as OllamaInfo,
 | 
			
		||||
} from './ollama';
 | 
			
		||||
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
 | 
			
		||||
import {
 | 
			
		||||
  loadAnthropicChatModels,
 | 
			
		||||
  PROVIDER_INFO as AnthropicInfo,
 | 
			
		||||
} from './anthropic';
 | 
			
		||||
import {
 | 
			
		||||
  loadGeminiChatModels,
 | 
			
		||||
  loadGeminiEmbeddingModels,
 | 
			
		||||
  PROVIDER_INFO as GeminiInfo,
 | 
			
		||||
} from './gemini';
 | 
			
		||||
import {
 | 
			
		||||
  loadTransformersEmbeddingsModels,
 | 
			
		||||
  PROVIDER_INFO as TransformersInfo,
 | 
			
		||||
} from './transformers';
 | 
			
		||||
import {
 | 
			
		||||
  loadDeepseekChatModels,
 | 
			
		||||
  PROVIDER_INFO as DeepseekInfo,
 | 
			
		||||
} from './deepseek';
 | 
			
		||||
import {
 | 
			
		||||
  loadLMStudioChatModels,
 | 
			
		||||
  loadLMStudioEmbeddingsModels,
 | 
			
		||||
  PROVIDER_INFO as LMStudioInfo,
 | 
			
		||||
} from './lmstudio';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_METADATA = {
 | 
			
		||||
  openai: OpenAIInfo,
 | 
			
		||||
  ollama: OllamaInfo,
 | 
			
		||||
  groq: GroqInfo,
 | 
			
		||||
  anthropic: AnthropicInfo,
 | 
			
		||||
  gemini: GeminiInfo,
 | 
			
		||||
  transformers: TransformersInfo,
 | 
			
		||||
  deepseek: DeepseekInfo,
 | 
			
		||||
  lmstudio: LMStudioInfo,
 | 
			
		||||
  custom_openai: {
 | 
			
		||||
    key: 'custom_openai',
 | 
			
		||||
    displayName: 'Custom OpenAI',
 | 
			
		||||
  },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export interface ChatModel {
 | 
			
		||||
  displayName: string;
 | 
			
		||||
@@ -34,6 +76,7 @@ export const chatModelProviders: Record<
 | 
			
		||||
  anthropic: loadAnthropicChatModels,
 | 
			
		||||
  gemini: loadGeminiChatModels,
 | 
			
		||||
  deepseek: loadDeepseekChatModels,
 | 
			
		||||
  lmstudio: loadLMStudioChatModels,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const embeddingModelProviders: Record<
 | 
			
		||||
@@ -44,6 +87,7 @@ export const embeddingModelProviders: Record<
 | 
			
		||||
  ollama: loadOllamaEmbeddingModels,
 | 
			
		||||
  gemini: loadGeminiEmbeddingModels,
 | 
			
		||||
  transformers: loadTransformersEmbeddingsModels,
 | 
			
		||||
  lmstudio: loadLMStudioEmbeddingsModels,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const getAvailableChatModelProviders = async () => {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										100
									
								
								src/lib/providers/lmstudio.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								src/lib/providers/lmstudio.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,100 @@
 | 
			
		||||
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
 | 
			
		||||
import axios from 'axios';
 | 
			
		||||
import { ChatModel, EmbeddingModel } from '.';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_INFO = {
 | 
			
		||||
  key: 'lmstudio',
 | 
			
		||||
  displayName: 'LM Studio',
 | 
			
		||||
};
 | 
			
		||||
import { ChatOpenAI } from '@langchain/openai';
 | 
			
		||||
import { OpenAIEmbeddings } from '@langchain/openai';
 | 
			
		||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
			
		||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
			
		||||
 | 
			
		||||
interface LMStudioModel {
 | 
			
		||||
  id: string;
 | 
			
		||||
  name?: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const ensureV1Endpoint = (endpoint: string): string =>
 | 
			
		||||
  endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
 | 
			
		||||
 | 
			
		||||
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
 | 
			
		||||
  try {
 | 
			
		||||
    await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
			
		||||
      headers: { 'Content-Type': 'application/json' },
 | 
			
		||||
    });
 | 
			
		||||
    return true;
 | 
			
		||||
  } catch {
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const loadLMStudioChatModels = async () => {
 | 
			
		||||
  const endpoint = getLMStudioApiEndpoint();
 | 
			
		||||
 | 
			
		||||
  if (!endpoint) return {};
 | 
			
		||||
  if (!(await checkServerAvailability(endpoint))) return {};
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
			
		||||
      headers: { 'Content-Type': 'application/json' },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const chatModels: Record<string, ChatModel> = {};
 | 
			
		||||
 | 
			
		||||
    response.data.data.forEach((model: LMStudioModel) => {
 | 
			
		||||
      chatModels[model.id] = {
 | 
			
		||||
        displayName: model.name || model.id,
 | 
			
		||||
        model: new ChatOpenAI({
 | 
			
		||||
          openAIApiKey: 'lm-studio',
 | 
			
		||||
          configuration: {
 | 
			
		||||
            baseURL: ensureV1Endpoint(endpoint),
 | 
			
		||||
          },
 | 
			
		||||
          modelName: model.id,
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
          streaming: true,
 | 
			
		||||
          maxRetries: 3,
 | 
			
		||||
        }) as unknown as BaseChatModel,
 | 
			
		||||
      };
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    return chatModels;
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    console.error(`Error loading LM Studio models: ${err}`);
 | 
			
		||||
    return {};
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const loadLMStudioEmbeddingsModels = async () => {
 | 
			
		||||
  const endpoint = getLMStudioApiEndpoint();
 | 
			
		||||
 | 
			
		||||
  if (!endpoint) return {};
 | 
			
		||||
  if (!(await checkServerAvailability(endpoint))) return {};
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
			
		||||
      headers: { 'Content-Type': 'application/json' },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const embeddingsModels: Record<string, EmbeddingModel> = {};
 | 
			
		||||
 | 
			
		||||
    response.data.data.forEach((model: LMStudioModel) => {
 | 
			
		||||
      embeddingsModels[model.id] = {
 | 
			
		||||
        displayName: model.name || model.id,
 | 
			
		||||
        model: new OpenAIEmbeddings({
 | 
			
		||||
          openAIApiKey: 'lm-studio',
 | 
			
		||||
          configuration: {
 | 
			
		||||
            baseURL: ensureV1Endpoint(endpoint),
 | 
			
		||||
          },
 | 
			
		||||
          modelName: model.id,
 | 
			
		||||
        }) as unknown as Embeddings,
 | 
			
		||||
      };
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    return embeddingsModels;
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    console.error(`Error loading LM Studio embeddings model: ${err}`);
 | 
			
		||||
    return {};
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
@@ -1,6 +1,11 @@
 | 
			
		||||
import axios from 'axios';
 | 
			
		||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
 | 
			
		||||
import { ChatModel, EmbeddingModel } from '.';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_INFO = {
 | 
			
		||||
  key: 'ollama',
 | 
			
		||||
  displayName: 'Ollama',
 | 
			
		||||
};
 | 
			
		||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
 | 
			
		||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,11 @@
 | 
			
		||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
 | 
			
		||||
import { getOpenaiApiKey } from '../config';
 | 
			
		||||
import { ChatModel, EmbeddingModel } from '.';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_INFO = {
 | 
			
		||||
  key: 'openai',
 | 
			
		||||
  displayName: 'OpenAI',
 | 
			
		||||
};
 | 
			
		||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
			
		||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
			
		||||
 | 
			
		||||
@@ -25,6 +30,18 @@ const openaiChatModels: Record<string, string>[] = [
 | 
			
		||||
    displayName: 'GPT-4 omni mini',
 | 
			
		||||
    key: 'gpt-4o-mini',
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    displayName: 'GPT 4.1 nano',
 | 
			
		||||
    key: 'gpt-4.1-nano',
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    displayName: 'GPT 4.1 mini',
 | 
			
		||||
    key: 'gpt-4.1-mini',
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    displayName: 'GPT 4.1',
 | 
			
		||||
    key: 'gpt-4.1',
 | 
			
		||||
  },
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
const openaiEmbeddingModels: Record<string, string>[] = [
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,10 @@
 | 
			
		||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
 | 
			
		||||
 | 
			
		||||
export const PROVIDER_INFO = {
 | 
			
		||||
  key: 'transformers',
 | 
			
		||||
  displayName: 'Hugging Face',
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const loadTransformersEmbeddingsModels = async () => {
 | 
			
		||||
  try {
 | 
			
		||||
    const embeddingModels = {
 | 
			
		||||
 
 | 
			
		||||
@@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
 | 
			
		||||
        const splittedText = await splitter.splitText(parsedText);
 | 
			
		||||
        const title = res.data
 | 
			
		||||
          .toString('utf8')
 | 
			
		||||
          .match(/<title>(.*?)<\/title>/)?.[1];
 | 
			
		||||
          .match(/<title.*>(.*?)<\/title>/)?.[1];
 | 
			
		||||
 | 
			
		||||
        const linkDocs = splittedText.map((text) => {
 | 
			
		||||
          return new Document({
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user