mirror of
				https://github.com/ItzCrazyKns/Perplexica.git
				synced 2025-11-03 20:28:14 +00:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			feat/model
			...
			53b49845b2
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					53b49845b2 | 
@@ -25,8 +25,5 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
 | 
				
			|||||||
[MODELS.DEEPSEEK]
 | 
					[MODELS.DEEPSEEK]
 | 
				
			||||||
API_KEY = ""
 | 
					API_KEY = ""
 | 
				
			||||||
 | 
					
 | 
				
			||||||
[MODELS.LM_STUDIO]
 | 
					 | 
				
			||||||
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
[API_ENDPOINTS]
 | 
					[API_ENDPOINTS]
 | 
				
			||||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
 | 
					SEARXNG = "" # SearxNG API URL - http://localhost:32768
 | 
				
			||||||
@@ -8,7 +8,6 @@ import {
 | 
				
			|||||||
  getOllamaApiEndpoint,
 | 
					  getOllamaApiEndpoint,
 | 
				
			||||||
  getOpenaiApiKey,
 | 
					  getOpenaiApiKey,
 | 
				
			||||||
  getDeepseekApiKey,
 | 
					  getDeepseekApiKey,
 | 
				
			||||||
  getLMStudioApiEndpoint,
 | 
					 | 
				
			||||||
  updateConfig,
 | 
					  updateConfig,
 | 
				
			||||||
} from '@/lib/config';
 | 
					} from '@/lib/config';
 | 
				
			||||||
import {
 | 
					import {
 | 
				
			||||||
@@ -52,7 +51,6 @@ export const GET = async (req: Request) => {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    config['openaiApiKey'] = getOpenaiApiKey();
 | 
					    config['openaiApiKey'] = getOpenaiApiKey();
 | 
				
			||||||
    config['ollamaApiUrl'] = getOllamaApiEndpoint();
 | 
					    config['ollamaApiUrl'] = getOllamaApiEndpoint();
 | 
				
			||||||
    config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
 | 
					 | 
				
			||||||
    config['anthropicApiKey'] = getAnthropicApiKey();
 | 
					    config['anthropicApiKey'] = getAnthropicApiKey();
 | 
				
			||||||
    config['groqApiKey'] = getGroqApiKey();
 | 
					    config['groqApiKey'] = getGroqApiKey();
 | 
				
			||||||
    config['geminiApiKey'] = getGeminiApiKey();
 | 
					    config['geminiApiKey'] = getGeminiApiKey();
 | 
				
			||||||
@@ -95,9 +93,6 @@ export const POST = async (req: Request) => {
 | 
				
			|||||||
        DEEPSEEK: {
 | 
					        DEEPSEEK: {
 | 
				
			||||||
          API_KEY: config.deepseekApiKey,
 | 
					          API_KEY: config.deepseekApiKey,
 | 
				
			||||||
        },
 | 
					        },
 | 
				
			||||||
        LM_STUDIO: {
 | 
					 | 
				
			||||||
          API_URL: config.lmStudioApiUrl,
 | 
					 | 
				
			||||||
        },
 | 
					 | 
				
			||||||
        CUSTOM_OPENAI: {
 | 
					        CUSTOM_OPENAI: {
 | 
				
			||||||
          API_URL: config.customOpenaiApiUrl,
 | 
					          API_URL: config.customOpenaiApiUrl,
 | 
				
			||||||
          API_KEY: config.customOpenaiApiKey,
 | 
					          API_KEY: config.customOpenaiApiKey,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -7,7 +7,6 @@ import { Switch } from '@headlessui/react';
 | 
				
			|||||||
import ThemeSwitcher from '@/components/theme/Switcher';
 | 
					import ThemeSwitcher from '@/components/theme/Switcher';
 | 
				
			||||||
import { ImagesIcon, VideoIcon } from 'lucide-react';
 | 
					import { ImagesIcon, VideoIcon } from 'lucide-react';
 | 
				
			||||||
import Link from 'next/link';
 | 
					import Link from 'next/link';
 | 
				
			||||||
import { PROVIDER_METADATA } from '@/lib/providers';
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
interface SettingsType {
 | 
					interface SettingsType {
 | 
				
			||||||
  chatModelProviders: {
 | 
					  chatModelProviders: {
 | 
				
			||||||
@@ -21,7 +20,6 @@ interface SettingsType {
 | 
				
			|||||||
  anthropicApiKey: string;
 | 
					  anthropicApiKey: string;
 | 
				
			||||||
  geminiApiKey: string;
 | 
					  geminiApiKey: string;
 | 
				
			||||||
  ollamaApiUrl: string;
 | 
					  ollamaApiUrl: string;
 | 
				
			||||||
  lmStudioApiUrl: string;
 | 
					 | 
				
			||||||
  deepseekApiKey: string;
 | 
					  deepseekApiKey: string;
 | 
				
			||||||
  customOpenaiApiKey: string;
 | 
					  customOpenaiApiKey: string;
 | 
				
			||||||
  customOpenaiApiUrl: string;
 | 
					  customOpenaiApiUrl: string;
 | 
				
			||||||
@@ -550,9 +548,8 @@ const Page = () => {
 | 
				
			|||||||
                        (provider) => ({
 | 
					                        (provider) => ({
 | 
				
			||||||
                          value: provider,
 | 
					                          value: provider,
 | 
				
			||||||
                          label:
 | 
					                          label:
 | 
				
			||||||
                            (PROVIDER_METADATA as any)[provider]?.displayName ||
 | 
					 | 
				
			||||||
                            provider.charAt(0).toUpperCase() +
 | 
					                            provider.charAt(0).toUpperCase() +
 | 
				
			||||||
                              provider.slice(1),
 | 
					                            provider.slice(1),
 | 
				
			||||||
                        }),
 | 
					                        }),
 | 
				
			||||||
                      )}
 | 
					                      )}
 | 
				
			||||||
                    />
 | 
					                    />
 | 
				
			||||||
@@ -693,9 +690,8 @@ const Page = () => {
 | 
				
			|||||||
                        (provider) => ({
 | 
					                        (provider) => ({
 | 
				
			||||||
                          value: provider,
 | 
					                          value: provider,
 | 
				
			||||||
                          label:
 | 
					                          label:
 | 
				
			||||||
                            (PROVIDER_METADATA as any)[provider]?.displayName ||
 | 
					 | 
				
			||||||
                            provider.charAt(0).toUpperCase() +
 | 
					                            provider.charAt(0).toUpperCase() +
 | 
				
			||||||
                              provider.slice(1),
 | 
					                            provider.slice(1),
 | 
				
			||||||
                        }),
 | 
					                        }),
 | 
				
			||||||
                      )}
 | 
					                      )}
 | 
				
			||||||
                    />
 | 
					                    />
 | 
				
			||||||
@@ -862,25 +858,6 @@ const Page = () => {
 | 
				
			|||||||
                    onSave={(value) => saveConfig('deepseekApiKey', value)}
 | 
					                    onSave={(value) => saveConfig('deepseekApiKey', value)}
 | 
				
			||||||
                  />
 | 
					                  />
 | 
				
			||||||
                </div>
 | 
					                </div>
 | 
				
			||||||
 | 
					 | 
				
			||||||
                <div className="flex flex-col space-y-1">
 | 
					 | 
				
			||||||
                  <p className="text-black/70 dark:text-white/70 text-sm">
 | 
					 | 
				
			||||||
                    LM Studio API URL
 | 
					 | 
				
			||||||
                  </p>
 | 
					 | 
				
			||||||
                  <Input
 | 
					 | 
				
			||||||
                    type="text"
 | 
					 | 
				
			||||||
                    placeholder="LM Studio API URL"
 | 
					 | 
				
			||||||
                    value={config.lmStudioApiUrl}
 | 
					 | 
				
			||||||
                    isSaving={savingStates['lmStudioApiUrl']}
 | 
					 | 
				
			||||||
                    onChange={(e) => {
 | 
					 | 
				
			||||||
                      setConfig((prev) => ({
 | 
					 | 
				
			||||||
                        ...prev!,
 | 
					 | 
				
			||||||
                        lmStudioApiUrl: e.target.value,
 | 
					 | 
				
			||||||
                      }));
 | 
					 | 
				
			||||||
                    }}
 | 
					 | 
				
			||||||
                    onSave={(value) => saveConfig('lmStudioApiUrl', value)}
 | 
					 | 
				
			||||||
                  />
 | 
					 | 
				
			||||||
                </div>
 | 
					 | 
				
			||||||
              </div>
 | 
					              </div>
 | 
				
			||||||
            </SettingsSection>
 | 
					            </SettingsSection>
 | 
				
			||||||
          </div>
 | 
					          </div>
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,14 +1,7 @@
 | 
				
			|||||||
 | 
					import fs from 'fs';
 | 
				
			||||||
 | 
					import path from 'path';
 | 
				
			||||||
import toml from '@iarna/toml';
 | 
					import toml from '@iarna/toml';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Use dynamic imports for Node.js modules to prevent client-side errors
 | 
					 | 
				
			||||||
let fs: any;
 | 
					 | 
				
			||||||
let path: any;
 | 
					 | 
				
			||||||
if (typeof window === 'undefined') {
 | 
					 | 
				
			||||||
  // We're on the server
 | 
					 | 
				
			||||||
  fs = require('fs');
 | 
					 | 
				
			||||||
  path = require('path');
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
const configFileName = 'config.toml';
 | 
					const configFileName = 'config.toml';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
interface Config {
 | 
					interface Config {
 | 
				
			||||||
@@ -35,9 +28,6 @@ interface Config {
 | 
				
			|||||||
    DEEPSEEK: {
 | 
					    DEEPSEEK: {
 | 
				
			||||||
      API_KEY: string;
 | 
					      API_KEY: string;
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
    LM_STUDIO: {
 | 
					 | 
				
			||||||
      API_URL: string;
 | 
					 | 
				
			||||||
    };
 | 
					 | 
				
			||||||
    CUSTOM_OPENAI: {
 | 
					    CUSTOM_OPENAI: {
 | 
				
			||||||
      API_URL: string;
 | 
					      API_URL: string;
 | 
				
			||||||
      API_KEY: string;
 | 
					      API_KEY: string;
 | 
				
			||||||
@@ -53,17 +43,10 @@ type RecursivePartial<T> = {
 | 
				
			|||||||
  [P in keyof T]?: RecursivePartial<T[P]>;
 | 
					  [P in keyof T]?: RecursivePartial<T[P]>;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const loadConfig = () => {
 | 
					const loadConfig = () =>
 | 
				
			||||||
  // Server-side only
 | 
					  toml.parse(
 | 
				
			||||||
  if (typeof window === 'undefined') {
 | 
					    fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
 | 
				
			||||||
    return toml.parse(
 | 
					  ) as any as Config;
 | 
				
			||||||
      fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
 | 
					 | 
				
			||||||
    ) as any as Config;
 | 
					 | 
				
			||||||
  }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
  // Client-side fallback - settings will be loaded via API
 | 
					 | 
				
			||||||
  return {} as Config;
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const getSimilarityMeasure = () =>
 | 
					export const getSimilarityMeasure = () =>
 | 
				
			||||||
  loadConfig().GENERAL.SIMILARITY_MEASURE;
 | 
					  loadConfig().GENERAL.SIMILARITY_MEASURE;
 | 
				
			||||||
@@ -94,9 +77,6 @@ export const getCustomOpenaiApiUrl = () =>
 | 
				
			|||||||
export const getCustomOpenaiModelName = () =>
 | 
					export const getCustomOpenaiModelName = () =>
 | 
				
			||||||
  loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
 | 
					  loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const getLMStudioApiEndpoint = () =>
 | 
					 | 
				
			||||||
  loadConfig().MODELS.LM_STUDIO.API_URL;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
const mergeConfigs = (current: any, update: any): any => {
 | 
					const mergeConfigs = (current: any, update: any): any => {
 | 
				
			||||||
  if (update === null || update === undefined) {
 | 
					  if (update === null || update === undefined) {
 | 
				
			||||||
    return current;
 | 
					    return current;
 | 
				
			||||||
@@ -129,13 +109,10 @@ const mergeConfigs = (current: any, update: any): any => {
 | 
				
			|||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const updateConfig = (config: RecursivePartial<Config>) => {
 | 
					export const updateConfig = (config: RecursivePartial<Config>) => {
 | 
				
			||||||
  // Server-side only
 | 
					  const currentConfig = loadConfig();
 | 
				
			||||||
  if (typeof window === 'undefined') {
 | 
					  const mergedConfig = mergeConfigs(currentConfig, config);
 | 
				
			||||||
    const currentConfig = loadConfig();
 | 
					  fs.writeFileSync(
 | 
				
			||||||
    const mergedConfig = mergeConfigs(currentConfig, config);
 | 
					    path.join(path.join(process.cwd(), `${configFileName}`)),
 | 
				
			||||||
    fs.writeFileSync(
 | 
					    toml.stringify(mergedConfig),
 | 
				
			||||||
      path.join(path.join(process.cwd(), `${configFileName}`)),
 | 
					  );
 | 
				
			||||||
      toml.stringify(mergedConfig),
 | 
					 | 
				
			||||||
    );
 | 
					 | 
				
			||||||
  }
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,11 +1,6 @@
 | 
				
			|||||||
import { ChatAnthropic } from '@langchain/anthropic';
 | 
					import { ChatAnthropic } from '@langchain/anthropic';
 | 
				
			||||||
import { ChatModel } from '.';
 | 
					import { ChatModel } from '.';
 | 
				
			||||||
import { getAnthropicApiKey } from '../config';
 | 
					import { getAnthropicApiKey } from '../config';
 | 
				
			||||||
 | 
					 | 
				
			||||||
export const PROVIDER_INFO = {
 | 
					 | 
				
			||||||
  key: 'anthropic',
 | 
					 | 
				
			||||||
  displayName: 'Anthropic',
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const anthropicChatModels: Record<string, string>[] = [
 | 
					const anthropicChatModels: Record<string, string>[] = [
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -3,11 +3,6 @@ import { getDeepseekApiKey } from '../config';
 | 
				
			|||||||
import { ChatModel } from '.';
 | 
					import { ChatModel } from '.';
 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const PROVIDER_INFO = {
 | 
					 | 
				
			||||||
  key: 'deepseek',
 | 
					 | 
				
			||||||
  displayName: 'Deepseek AI',
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
const deepseekChatModels: Record<string, string>[] = [
 | 
					const deepseekChatModels: Record<string, string>[] = [
 | 
				
			||||||
  {
 | 
					  {
 | 
				
			||||||
    displayName: 'Deepseek Chat (Deepseek V3)',
 | 
					    displayName: 'Deepseek Chat (Deepseek V3)',
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -4,11 +4,6 @@ import {
 | 
				
			|||||||
} from '@langchain/google-genai';
 | 
					} from '@langchain/google-genai';
 | 
				
			||||||
import { getGeminiApiKey } from '../config';
 | 
					import { getGeminiApiKey } from '../config';
 | 
				
			||||||
import { ChatModel, EmbeddingModel } from '.';
 | 
					import { ChatModel, EmbeddingModel } from '.';
 | 
				
			||||||
 | 
					 | 
				
			||||||
export const PROVIDER_INFO = {
 | 
					 | 
				
			||||||
  key: 'gemini',
 | 
					 | 
				
			||||||
  displayName: 'Google Gemini',
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
					import { Embeddings } from '@langchain/core/embeddings';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,11 +1,6 @@
 | 
				
			|||||||
import { ChatOpenAI } from '@langchain/openai';
 | 
					import { ChatOpenAI } from '@langchain/openai';
 | 
				
			||||||
import { getGroqApiKey } from '../config';
 | 
					import { getGroqApiKey } from '../config';
 | 
				
			||||||
import { ChatModel } from '.';
 | 
					import { ChatModel } from '.';
 | 
				
			||||||
 | 
					 | 
				
			||||||
export const PROVIDER_INFO = {
 | 
					 | 
				
			||||||
  key: 'groq',
 | 
					 | 
				
			||||||
  displayName: 'Groq',
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const groqChatModels: Record<string, string>[] = [
 | 
					const groqChatModels: Record<string, string>[] = [
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,60 +1,18 @@
 | 
				
			|||||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
					import { Embeddings } from '@langchain/core/embeddings';
 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
import {
 | 
					import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
 | 
				
			||||||
  loadOpenAIChatModels,
 | 
					 | 
				
			||||||
  loadOpenAIEmbeddingModels,
 | 
					 | 
				
			||||||
  PROVIDER_INFO as OpenAIInfo,
 | 
					 | 
				
			||||||
  PROVIDER_INFO,
 | 
					 | 
				
			||||||
} from './openai';
 | 
					 | 
				
			||||||
import {
 | 
					import {
 | 
				
			||||||
  getCustomOpenaiApiKey,
 | 
					  getCustomOpenaiApiKey,
 | 
				
			||||||
  getCustomOpenaiApiUrl,
 | 
					  getCustomOpenaiApiUrl,
 | 
				
			||||||
  getCustomOpenaiModelName,
 | 
					  getCustomOpenaiModelName,
 | 
				
			||||||
} from '../config';
 | 
					} from '../config';
 | 
				
			||||||
import { ChatOpenAI } from '@langchain/openai';
 | 
					import { ChatOpenAI } from '@langchain/openai';
 | 
				
			||||||
import {
 | 
					import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
 | 
				
			||||||
  loadOllamaChatModels,
 | 
					import { loadGroqChatModels } from './groq';
 | 
				
			||||||
  loadOllamaEmbeddingModels,
 | 
					import { loadAnthropicChatModels } from './anthropic';
 | 
				
			||||||
  PROVIDER_INFO as OllamaInfo,
 | 
					import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
 | 
				
			||||||
} from './ollama';
 | 
					import { loadTransformersEmbeddingsModels } from './transformers';
 | 
				
			||||||
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
 | 
					import { loadDeepseekChatModels } from './deepseek';
 | 
				
			||||||
import {
 | 
					 | 
				
			||||||
  loadAnthropicChatModels,
 | 
					 | 
				
			||||||
  PROVIDER_INFO as AnthropicInfo,
 | 
					 | 
				
			||||||
} from './anthropic';
 | 
					 | 
				
			||||||
import {
 | 
					 | 
				
			||||||
  loadGeminiChatModels,
 | 
					 | 
				
			||||||
  loadGeminiEmbeddingModels,
 | 
					 | 
				
			||||||
  PROVIDER_INFO as GeminiInfo,
 | 
					 | 
				
			||||||
} from './gemini';
 | 
					 | 
				
			||||||
import {
 | 
					 | 
				
			||||||
  loadTransformersEmbeddingsModels,
 | 
					 | 
				
			||||||
  PROVIDER_INFO as TransformersInfo,
 | 
					 | 
				
			||||||
} from './transformers';
 | 
					 | 
				
			||||||
import {
 | 
					 | 
				
			||||||
  loadDeepseekChatModels,
 | 
					 | 
				
			||||||
  PROVIDER_INFO as DeepseekInfo,
 | 
					 | 
				
			||||||
} from './deepseek';
 | 
					 | 
				
			||||||
import {
 | 
					 | 
				
			||||||
  loadLMStudioChatModels,
 | 
					 | 
				
			||||||
  loadLMStudioEmbeddingsModels,
 | 
					 | 
				
			||||||
  PROVIDER_INFO as LMStudioInfo,
 | 
					 | 
				
			||||||
} from './lmstudio';
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
export const PROVIDER_METADATA = {
 | 
					 | 
				
			||||||
  openai: OpenAIInfo,
 | 
					 | 
				
			||||||
  ollama: OllamaInfo,
 | 
					 | 
				
			||||||
  groq: GroqInfo,
 | 
					 | 
				
			||||||
  anthropic: AnthropicInfo,
 | 
					 | 
				
			||||||
  gemini: GeminiInfo,
 | 
					 | 
				
			||||||
  transformers: TransformersInfo,
 | 
					 | 
				
			||||||
  deepseek: DeepseekInfo,
 | 
					 | 
				
			||||||
  lmstudio: LMStudioInfo,
 | 
					 | 
				
			||||||
  custom_openai: {
 | 
					 | 
				
			||||||
    key: 'custom_openai',
 | 
					 | 
				
			||||||
    displayName: 'Custom OpenAI',
 | 
					 | 
				
			||||||
  },
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
export interface ChatModel {
 | 
					export interface ChatModel {
 | 
				
			||||||
  displayName: string;
 | 
					  displayName: string;
 | 
				
			||||||
@@ -76,7 +34,6 @@ export const chatModelProviders: Record<
 | 
				
			|||||||
  anthropic: loadAnthropicChatModels,
 | 
					  anthropic: loadAnthropicChatModels,
 | 
				
			||||||
  gemini: loadGeminiChatModels,
 | 
					  gemini: loadGeminiChatModels,
 | 
				
			||||||
  deepseek: loadDeepseekChatModels,
 | 
					  deepseek: loadDeepseekChatModels,
 | 
				
			||||||
  lmstudio: loadLMStudioChatModels,
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const embeddingModelProviders: Record<
 | 
					export const embeddingModelProviders: Record<
 | 
				
			||||||
@@ -87,7 +44,6 @@ export const embeddingModelProviders: Record<
 | 
				
			|||||||
  ollama: loadOllamaEmbeddingModels,
 | 
					  ollama: loadOllamaEmbeddingModels,
 | 
				
			||||||
  gemini: loadGeminiEmbeddingModels,
 | 
					  gemini: loadGeminiEmbeddingModels,
 | 
				
			||||||
  transformers: loadTransformersEmbeddingsModels,
 | 
					  transformers: loadTransformersEmbeddingsModels,
 | 
				
			||||||
  lmstudio: loadLMStudioEmbeddingsModels,
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const getAvailableChatModelProviders = async () => {
 | 
					export const getAvailableChatModelProviders = async () => {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,100 +0,0 @@
 | 
				
			|||||||
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
 | 
					 | 
				
			||||||
import axios from 'axios';
 | 
					 | 
				
			||||||
import { ChatModel, EmbeddingModel } from '.';
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
export const PROVIDER_INFO = {
 | 
					 | 
				
			||||||
  key: 'lmstudio',
 | 
					 | 
				
			||||||
  displayName: 'LM Studio',
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
import { ChatOpenAI } from '@langchain/openai';
 | 
					 | 
				
			||||||
import { OpenAIEmbeddings } from '@langchain/openai';
 | 
					 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					 | 
				
			||||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
interface LMStudioModel {
 | 
					 | 
				
			||||||
  id: string;
 | 
					 | 
				
			||||||
  name?: string;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
const ensureV1Endpoint = (endpoint: string): string =>
 | 
					 | 
				
			||||||
  endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
 | 
					 | 
				
			||||||
  try {
 | 
					 | 
				
			||||||
    await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
					 | 
				
			||||||
      headers: { 'Content-Type': 'application/json' },
 | 
					 | 
				
			||||||
    });
 | 
					 | 
				
			||||||
    return true;
 | 
					 | 
				
			||||||
  } catch {
 | 
					 | 
				
			||||||
    return false;
 | 
					 | 
				
			||||||
  }
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
export const loadLMStudioChatModels = async () => {
 | 
					 | 
				
			||||||
  const endpoint = getLMStudioApiEndpoint();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
  if (!endpoint) return {};
 | 
					 | 
				
			||||||
  if (!(await checkServerAvailability(endpoint))) return {};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
  try {
 | 
					 | 
				
			||||||
    const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
					 | 
				
			||||||
      headers: { 'Content-Type': 'application/json' },
 | 
					 | 
				
			||||||
    });
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    const chatModels: Record<string, ChatModel> = {};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    response.data.data.forEach((model: LMStudioModel) => {
 | 
					 | 
				
			||||||
      chatModels[model.id] = {
 | 
					 | 
				
			||||||
        displayName: model.name || model.id,
 | 
					 | 
				
			||||||
        model: new ChatOpenAI({
 | 
					 | 
				
			||||||
          openAIApiKey: 'lm-studio',
 | 
					 | 
				
			||||||
          configuration: {
 | 
					 | 
				
			||||||
            baseURL: ensureV1Endpoint(endpoint),
 | 
					 | 
				
			||||||
          },
 | 
					 | 
				
			||||||
          modelName: model.id,
 | 
					 | 
				
			||||||
          temperature: 0.7,
 | 
					 | 
				
			||||||
          streaming: true,
 | 
					 | 
				
			||||||
          maxRetries: 3,
 | 
					 | 
				
			||||||
        }) as unknown as BaseChatModel,
 | 
					 | 
				
			||||||
      };
 | 
					 | 
				
			||||||
    });
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    return chatModels;
 | 
					 | 
				
			||||||
  } catch (err) {
 | 
					 | 
				
			||||||
    console.error(`Error loading LM Studio models: ${err}`);
 | 
					 | 
				
			||||||
    return {};
 | 
					 | 
				
			||||||
  }
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
export const loadLMStudioEmbeddingsModels = async () => {
 | 
					 | 
				
			||||||
  const endpoint = getLMStudioApiEndpoint();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
  if (!endpoint) return {};
 | 
					 | 
				
			||||||
  if (!(await checkServerAvailability(endpoint))) return {};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
  try {
 | 
					 | 
				
			||||||
    const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
					 | 
				
			||||||
      headers: { 'Content-Type': 'application/json' },
 | 
					 | 
				
			||||||
    });
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    const embeddingsModels: Record<string, EmbeddingModel> = {};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    response.data.data.forEach((model: LMStudioModel) => {
 | 
					 | 
				
			||||||
      embeddingsModels[model.id] = {
 | 
					 | 
				
			||||||
        displayName: model.name || model.id,
 | 
					 | 
				
			||||||
        model: new OpenAIEmbeddings({
 | 
					 | 
				
			||||||
          openAIApiKey: 'lm-studio',
 | 
					 | 
				
			||||||
          configuration: {
 | 
					 | 
				
			||||||
            baseURL: ensureV1Endpoint(endpoint),
 | 
					 | 
				
			||||||
          },
 | 
					 | 
				
			||||||
          modelName: model.id,
 | 
					 | 
				
			||||||
        }) as unknown as Embeddings,
 | 
					 | 
				
			||||||
      };
 | 
					 | 
				
			||||||
    });
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    return embeddingsModels;
 | 
					 | 
				
			||||||
  } catch (err) {
 | 
					 | 
				
			||||||
    console.error(`Error loading LM Studio embeddings model: ${err}`);
 | 
					 | 
				
			||||||
    return {};
 | 
					 | 
				
			||||||
  }
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
@@ -1,11 +1,6 @@
 | 
				
			|||||||
import axios from 'axios';
 | 
					import axios from 'axios';
 | 
				
			||||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
 | 
					import { getKeepAlive, getOllamaApiEndpoint } from '../config';
 | 
				
			||||||
import { ChatModel, EmbeddingModel } from '.';
 | 
					import { ChatModel, EmbeddingModel } from '.';
 | 
				
			||||||
 | 
					 | 
				
			||||||
export const PROVIDER_INFO = {
 | 
					 | 
				
			||||||
  key: 'ollama',
 | 
					 | 
				
			||||||
  displayName: 'Ollama',
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
 | 
					import { ChatOllama } from '@langchain/community/chat_models/ollama';
 | 
				
			||||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
 | 
					import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,11 +1,6 @@
 | 
				
			|||||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
 | 
					import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
 | 
				
			||||||
import { getOpenaiApiKey } from '../config';
 | 
					import { getOpenaiApiKey } from '../config';
 | 
				
			||||||
import { ChatModel, EmbeddingModel } from '.';
 | 
					import { ChatModel, EmbeddingModel } from '.';
 | 
				
			||||||
 | 
					 | 
				
			||||||
export const PROVIDER_INFO = {
 | 
					 | 
				
			||||||
  key: 'openai',
 | 
					 | 
				
			||||||
  displayName: 'OpenAI',
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
					import { Embeddings } from '@langchain/core/embeddings';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -30,18 +25,6 @@ const openaiChatModels: Record<string, string>[] = [
 | 
				
			|||||||
    displayName: 'GPT-4 omni mini',
 | 
					    displayName: 'GPT-4 omni mini',
 | 
				
			||||||
    key: 'gpt-4o-mini',
 | 
					    key: 'gpt-4o-mini',
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
  {
 | 
					 | 
				
			||||||
    displayName: 'GPT 4.1 nano',
 | 
					 | 
				
			||||||
    key: 'gpt-4.1-nano',
 | 
					 | 
				
			||||||
  },
 | 
					 | 
				
			||||||
  {
 | 
					 | 
				
			||||||
    displayName: 'GPT 4.1 mini',
 | 
					 | 
				
			||||||
    key: 'gpt-4.1-mini',
 | 
					 | 
				
			||||||
  },
 | 
					 | 
				
			||||||
  {
 | 
					 | 
				
			||||||
    displayName: 'GPT 4.1',
 | 
					 | 
				
			||||||
    key: 'gpt-4.1',
 | 
					 | 
				
			||||||
  },
 | 
					 | 
				
			||||||
];
 | 
					];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const openaiEmbeddingModels: Record<string, string>[] = [
 | 
					const openaiEmbeddingModels: Record<string, string>[] = [
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,10 +1,5 @@
 | 
				
			|||||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
 | 
					import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const PROVIDER_INFO = {
 | 
					 | 
				
			||||||
  key: 'transformers',
 | 
					 | 
				
			||||||
  displayName: 'Hugging Face',
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
export const loadTransformersEmbeddingsModels = async () => {
 | 
					export const loadTransformersEmbeddingsModels = async () => {
 | 
				
			||||||
  try {
 | 
					  try {
 | 
				
			||||||
    const embeddingModels = {
 | 
					    const embeddingModels = {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
 | 
				
			|||||||
        const splittedText = await splitter.splitText(parsedText);
 | 
					        const splittedText = await splitter.splitText(parsedText);
 | 
				
			||||||
        const title = res.data
 | 
					        const title = res.data
 | 
				
			||||||
          .toString('utf8')
 | 
					          .toString('utf8')
 | 
				
			||||||
          .match(/<title.*>(.*?)<\/title>/)?.[1];
 | 
					          .match(/<title>(.*?)<\/title>/)?.[1];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        const linkDocs = splittedText.map((text) => {
 | 
					        const linkDocs = splittedText.map((text) => {
 | 
				
			||||||
          return new Document({
 | 
					          return new Document({
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user