mirror of
				https://github.com/ItzCrazyKns/Perplexica.git
				synced 2025-11-03 20:28:14 +00:00 
			
		
		
		
	Compare commits
	
		
			11 Commits
		
	
	
		
			53b49845b2
			...
			feat/model
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					701819d018 | ||
| 
						 | 
					68e151b2bd | ||
| 
						 | 
					06ff272541 | ||
| 
						 | 
					4154d5e4b1 | ||
| 
						 | 
					1862491496 | ||
| 
						 | 
					073b5e897c | ||
| 
						 | 
					9a332e79e4 | ||
| 
						 | 
					72450b9217 | ||
| 
						 | 
					7e1dc33a08 | ||
| 
						 | 
					aa240009ab | ||
| 
						 | 
					8aaee2c40c | 
@@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
 | 
				
			|||||||
[MODELS.DEEPSEEK]
 | 
					[MODELS.DEEPSEEK]
 | 
				
			||||||
API_KEY = ""
 | 
					API_KEY = ""
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					[MODELS.LM_STUDIO]
 | 
				
			||||||
 | 
					API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
 | 
				
			||||||
 | 
					
 | 
				
			||||||
[API_ENDPOINTS]
 | 
					[API_ENDPOINTS]
 | 
				
			||||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
 | 
					SEARXNG = "" # SearxNG API URL - http://localhost:32768
 | 
				
			||||||
@@ -8,6 +8,7 @@ import {
 | 
				
			|||||||
  getOllamaApiEndpoint,
 | 
					  getOllamaApiEndpoint,
 | 
				
			||||||
  getOpenaiApiKey,
 | 
					  getOpenaiApiKey,
 | 
				
			||||||
  getDeepseekApiKey,
 | 
					  getDeepseekApiKey,
 | 
				
			||||||
 | 
					  getLMStudioApiEndpoint,
 | 
				
			||||||
  updateConfig,
 | 
					  updateConfig,
 | 
				
			||||||
} from '@/lib/config';
 | 
					} from '@/lib/config';
 | 
				
			||||||
import {
 | 
					import {
 | 
				
			||||||
@@ -51,6 +52,7 @@ export const GET = async (req: Request) => {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    config['openaiApiKey'] = getOpenaiApiKey();
 | 
					    config['openaiApiKey'] = getOpenaiApiKey();
 | 
				
			||||||
    config['ollamaApiUrl'] = getOllamaApiEndpoint();
 | 
					    config['ollamaApiUrl'] = getOllamaApiEndpoint();
 | 
				
			||||||
 | 
					    config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
 | 
				
			||||||
    config['anthropicApiKey'] = getAnthropicApiKey();
 | 
					    config['anthropicApiKey'] = getAnthropicApiKey();
 | 
				
			||||||
    config['groqApiKey'] = getGroqApiKey();
 | 
					    config['groqApiKey'] = getGroqApiKey();
 | 
				
			||||||
    config['geminiApiKey'] = getGeminiApiKey();
 | 
					    config['geminiApiKey'] = getGeminiApiKey();
 | 
				
			||||||
@@ -93,6 +95,9 @@ export const POST = async (req: Request) => {
 | 
				
			|||||||
        DEEPSEEK: {
 | 
					        DEEPSEEK: {
 | 
				
			||||||
          API_KEY: config.deepseekApiKey,
 | 
					          API_KEY: config.deepseekApiKey,
 | 
				
			||||||
        },
 | 
					        },
 | 
				
			||||||
 | 
					        LM_STUDIO: {
 | 
				
			||||||
 | 
					          API_URL: config.lmStudioApiUrl,
 | 
				
			||||||
 | 
					        },
 | 
				
			||||||
        CUSTOM_OPENAI: {
 | 
					        CUSTOM_OPENAI: {
 | 
				
			||||||
          API_URL: config.customOpenaiApiUrl,
 | 
					          API_URL: config.customOpenaiApiUrl,
 | 
				
			||||||
          API_KEY: config.customOpenaiApiKey,
 | 
					          API_KEY: config.customOpenaiApiKey,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react';
 | 
				
			|||||||
import ThemeSwitcher from '@/components/theme/Switcher';
 | 
					import ThemeSwitcher from '@/components/theme/Switcher';
 | 
				
			||||||
import { ImagesIcon, VideoIcon } from 'lucide-react';
 | 
					import { ImagesIcon, VideoIcon } from 'lucide-react';
 | 
				
			||||||
import Link from 'next/link';
 | 
					import Link from 'next/link';
 | 
				
			||||||
 | 
					import { PROVIDER_METADATA } from '@/lib/providers';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
interface SettingsType {
 | 
					interface SettingsType {
 | 
				
			||||||
  chatModelProviders: {
 | 
					  chatModelProviders: {
 | 
				
			||||||
@@ -20,6 +21,7 @@ interface SettingsType {
 | 
				
			|||||||
  anthropicApiKey: string;
 | 
					  anthropicApiKey: string;
 | 
				
			||||||
  geminiApiKey: string;
 | 
					  geminiApiKey: string;
 | 
				
			||||||
  ollamaApiUrl: string;
 | 
					  ollamaApiUrl: string;
 | 
				
			||||||
 | 
					  lmStudioApiUrl: string;
 | 
				
			||||||
  deepseekApiKey: string;
 | 
					  deepseekApiKey: string;
 | 
				
			||||||
  customOpenaiApiKey: string;
 | 
					  customOpenaiApiKey: string;
 | 
				
			||||||
  customOpenaiApiUrl: string;
 | 
					  customOpenaiApiUrl: string;
 | 
				
			||||||
@@ -548,6 +550,7 @@ const Page = () => {
 | 
				
			|||||||
                        (provider) => ({
 | 
					                        (provider) => ({
 | 
				
			||||||
                          value: provider,
 | 
					                          value: provider,
 | 
				
			||||||
                          label:
 | 
					                          label:
 | 
				
			||||||
 | 
					                            (PROVIDER_METADATA as any)[provider]?.displayName ||
 | 
				
			||||||
                            provider.charAt(0).toUpperCase() +
 | 
					                            provider.charAt(0).toUpperCase() +
 | 
				
			||||||
                              provider.slice(1),
 | 
					                              provider.slice(1),
 | 
				
			||||||
                        }),
 | 
					                        }),
 | 
				
			||||||
@@ -690,6 +693,7 @@ const Page = () => {
 | 
				
			|||||||
                        (provider) => ({
 | 
					                        (provider) => ({
 | 
				
			||||||
                          value: provider,
 | 
					                          value: provider,
 | 
				
			||||||
                          label:
 | 
					                          label:
 | 
				
			||||||
 | 
					                            (PROVIDER_METADATA as any)[provider]?.displayName ||
 | 
				
			||||||
                            provider.charAt(0).toUpperCase() +
 | 
					                            provider.charAt(0).toUpperCase() +
 | 
				
			||||||
                              provider.slice(1),
 | 
					                              provider.slice(1),
 | 
				
			||||||
                        }),
 | 
					                        }),
 | 
				
			||||||
@@ -858,6 +862,25 @@ const Page = () => {
 | 
				
			|||||||
                    onSave={(value) => saveConfig('deepseekApiKey', value)}
 | 
					                    onSave={(value) => saveConfig('deepseekApiKey', value)}
 | 
				
			||||||
                  />
 | 
					                  />
 | 
				
			||||||
                </div>
 | 
					                </div>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                <div className="flex flex-col space-y-1">
 | 
				
			||||||
 | 
					                  <p className="text-black/70 dark:text-white/70 text-sm">
 | 
				
			||||||
 | 
					                    LM Studio API URL
 | 
				
			||||||
 | 
					                  </p>
 | 
				
			||||||
 | 
					                  <Input
 | 
				
			||||||
 | 
					                    type="text"
 | 
				
			||||||
 | 
					                    placeholder="LM Studio API URL"
 | 
				
			||||||
 | 
					                    value={config.lmStudioApiUrl}
 | 
				
			||||||
 | 
					                    isSaving={savingStates['lmStudioApiUrl']}
 | 
				
			||||||
 | 
					                    onChange={(e) => {
 | 
				
			||||||
 | 
					                      setConfig((prev) => ({
 | 
				
			||||||
 | 
					                        ...prev!,
 | 
				
			||||||
 | 
					                        lmStudioApiUrl: e.target.value,
 | 
				
			||||||
 | 
					                      }));
 | 
				
			||||||
 | 
					                    }}
 | 
				
			||||||
 | 
					                    onSave={(value) => saveConfig('lmStudioApiUrl', value)}
 | 
				
			||||||
 | 
					                  />
 | 
				
			||||||
 | 
					                </div>
 | 
				
			||||||
              </div>
 | 
					              </div>
 | 
				
			||||||
            </SettingsSection>
 | 
					            </SettingsSection>
 | 
				
			||||||
          </div>
 | 
					          </div>
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,7 +1,14 @@
 | 
				
			|||||||
import fs from 'fs';
 | 
					 | 
				
			||||||
import path from 'path';
 | 
					 | 
				
			||||||
import toml from '@iarna/toml';
 | 
					import toml from '@iarna/toml';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Use dynamic imports for Node.js modules to prevent client-side errors
 | 
				
			||||||
 | 
					let fs: any;
 | 
				
			||||||
 | 
					let path: any;
 | 
				
			||||||
 | 
					if (typeof window === 'undefined') {
 | 
				
			||||||
 | 
					  // We're on the server
 | 
				
			||||||
 | 
					  fs = require('fs');
 | 
				
			||||||
 | 
					  path = require('path');
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const configFileName = 'config.toml';
 | 
					const configFileName = 'config.toml';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
interface Config {
 | 
					interface Config {
 | 
				
			||||||
@@ -28,6 +35,9 @@ interface Config {
 | 
				
			|||||||
    DEEPSEEK: {
 | 
					    DEEPSEEK: {
 | 
				
			||||||
      API_KEY: string;
 | 
					      API_KEY: string;
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
 | 
					    LM_STUDIO: {
 | 
				
			||||||
 | 
					      API_URL: string;
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
    CUSTOM_OPENAI: {
 | 
					    CUSTOM_OPENAI: {
 | 
				
			||||||
      API_URL: string;
 | 
					      API_URL: string;
 | 
				
			||||||
      API_KEY: string;
 | 
					      API_KEY: string;
 | 
				
			||||||
@@ -43,10 +53,17 @@ type RecursivePartial<T> = {
 | 
				
			|||||||
  [P in keyof T]?: RecursivePartial<T[P]>;
 | 
					  [P in keyof T]?: RecursivePartial<T[P]>;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const loadConfig = () =>
 | 
					const loadConfig = () => {
 | 
				
			||||||
  toml.parse(
 | 
					  // Server-side only
 | 
				
			||||||
 | 
					  if (typeof window === 'undefined') {
 | 
				
			||||||
 | 
					    return toml.parse(
 | 
				
			||||||
      fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
 | 
					      fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
 | 
				
			||||||
    ) as any as Config;
 | 
					    ) as any as Config;
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  // Client-side fallback - settings will be loaded via API
 | 
				
			||||||
 | 
					  return {} as Config;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const getSimilarityMeasure = () =>
 | 
					export const getSimilarityMeasure = () =>
 | 
				
			||||||
  loadConfig().GENERAL.SIMILARITY_MEASURE;
 | 
					  loadConfig().GENERAL.SIMILARITY_MEASURE;
 | 
				
			||||||
@@ -77,6 +94,9 @@ export const getCustomOpenaiApiUrl = () =>
 | 
				
			|||||||
export const getCustomOpenaiModelName = () =>
 | 
					export const getCustomOpenaiModelName = () =>
 | 
				
			||||||
  loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
 | 
					  loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const getLMStudioApiEndpoint = () =>
 | 
				
			||||||
 | 
					  loadConfig().MODELS.LM_STUDIO.API_URL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const mergeConfigs = (current: any, update: any): any => {
 | 
					const mergeConfigs = (current: any, update: any): any => {
 | 
				
			||||||
  if (update === null || update === undefined) {
 | 
					  if (update === null || update === undefined) {
 | 
				
			||||||
    return current;
 | 
					    return current;
 | 
				
			||||||
@@ -109,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => {
 | 
				
			|||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const updateConfig = (config: RecursivePartial<Config>) => {
 | 
					export const updateConfig = (config: RecursivePartial<Config>) => {
 | 
				
			||||||
 | 
					  // Server-side only
 | 
				
			||||||
 | 
					  if (typeof window === 'undefined') {
 | 
				
			||||||
    const currentConfig = loadConfig();
 | 
					    const currentConfig = loadConfig();
 | 
				
			||||||
    const mergedConfig = mergeConfigs(currentConfig, config);
 | 
					    const mergedConfig = mergeConfigs(currentConfig, config);
 | 
				
			||||||
    fs.writeFileSync(
 | 
					    fs.writeFileSync(
 | 
				
			||||||
      path.join(path.join(process.cwd(), `${configFileName}`)),
 | 
					      path.join(path.join(process.cwd(), `${configFileName}`)),
 | 
				
			||||||
      toml.stringify(mergedConfig),
 | 
					      toml.stringify(mergedConfig),
 | 
				
			||||||
    );
 | 
					    );
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,6 +1,11 @@
 | 
				
			|||||||
import { ChatAnthropic } from '@langchain/anthropic';
 | 
					import { ChatAnthropic } from '@langchain/anthropic';
 | 
				
			||||||
import { ChatModel } from '.';
 | 
					import { ChatModel } from '.';
 | 
				
			||||||
import { getAnthropicApiKey } from '../config';
 | 
					import { getAnthropicApiKey } from '../config';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_INFO = {
 | 
				
			||||||
 | 
					  key: 'anthropic',
 | 
				
			||||||
 | 
					  displayName: 'Anthropic',
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const anthropicChatModels: Record<string, string>[] = [
 | 
					const anthropicChatModels: Record<string, string>[] = [
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config';
 | 
				
			|||||||
import { ChatModel } from '.';
 | 
					import { ChatModel } from '.';
 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_INFO = {
 | 
				
			||||||
 | 
					  key: 'deepseek',
 | 
				
			||||||
 | 
					  displayName: 'Deepseek AI',
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const deepseekChatModels: Record<string, string>[] = [
 | 
					const deepseekChatModels: Record<string, string>[] = [
 | 
				
			||||||
  {
 | 
					  {
 | 
				
			||||||
    displayName: 'Deepseek Chat (Deepseek V3)',
 | 
					    displayName: 'Deepseek Chat (Deepseek V3)',
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -4,6 +4,11 @@ import {
 | 
				
			|||||||
} from '@langchain/google-genai';
 | 
					} from '@langchain/google-genai';
 | 
				
			||||||
import { getGeminiApiKey } from '../config';
 | 
					import { getGeminiApiKey } from '../config';
 | 
				
			||||||
import { ChatModel, EmbeddingModel } from '.';
 | 
					import { ChatModel, EmbeddingModel } from '.';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_INFO = {
 | 
				
			||||||
 | 
					  key: 'gemini',
 | 
				
			||||||
 | 
					  displayName: 'Google Gemini',
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
					import { Embeddings } from '@langchain/core/embeddings';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,6 +1,11 @@
 | 
				
			|||||||
import { ChatOpenAI } from '@langchain/openai';
 | 
					import { ChatOpenAI } from '@langchain/openai';
 | 
				
			||||||
import { getGroqApiKey } from '../config';
 | 
					import { getGroqApiKey } from '../config';
 | 
				
			||||||
import { ChatModel } from '.';
 | 
					import { ChatModel } from '.';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_INFO = {
 | 
				
			||||||
 | 
					  key: 'groq',
 | 
				
			||||||
 | 
					  displayName: 'Groq',
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const groqChatModels: Record<string, string>[] = [
 | 
					const groqChatModels: Record<string, string>[] = [
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,18 +1,60 @@
 | 
				
			|||||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
					import { Embeddings } from '@langchain/core/embeddings';
 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
 | 
					import {
 | 
				
			||||||
 | 
					  loadOpenAIChatModels,
 | 
				
			||||||
 | 
					  loadOpenAIEmbeddingModels,
 | 
				
			||||||
 | 
					  PROVIDER_INFO as OpenAIInfo,
 | 
				
			||||||
 | 
					  PROVIDER_INFO,
 | 
				
			||||||
 | 
					} from './openai';
 | 
				
			||||||
import {
 | 
					import {
 | 
				
			||||||
  getCustomOpenaiApiKey,
 | 
					  getCustomOpenaiApiKey,
 | 
				
			||||||
  getCustomOpenaiApiUrl,
 | 
					  getCustomOpenaiApiUrl,
 | 
				
			||||||
  getCustomOpenaiModelName,
 | 
					  getCustomOpenaiModelName,
 | 
				
			||||||
} from '../config';
 | 
					} from '../config';
 | 
				
			||||||
import { ChatOpenAI } from '@langchain/openai';
 | 
					import { ChatOpenAI } from '@langchain/openai';
 | 
				
			||||||
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
 | 
					import {
 | 
				
			||||||
import { loadGroqChatModels } from './groq';
 | 
					  loadOllamaChatModels,
 | 
				
			||||||
import { loadAnthropicChatModels } from './anthropic';
 | 
					  loadOllamaEmbeddingModels,
 | 
				
			||||||
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
 | 
					  PROVIDER_INFO as OllamaInfo,
 | 
				
			||||||
import { loadTransformersEmbeddingsModels } from './transformers';
 | 
					} from './ollama';
 | 
				
			||||||
import { loadDeepseekChatModels } from './deepseek';
 | 
					import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
 | 
				
			||||||
 | 
					import {
 | 
				
			||||||
 | 
					  loadAnthropicChatModels,
 | 
				
			||||||
 | 
					  PROVIDER_INFO as AnthropicInfo,
 | 
				
			||||||
 | 
					} from './anthropic';
 | 
				
			||||||
 | 
					import {
 | 
				
			||||||
 | 
					  loadGeminiChatModels,
 | 
				
			||||||
 | 
					  loadGeminiEmbeddingModels,
 | 
				
			||||||
 | 
					  PROVIDER_INFO as GeminiInfo,
 | 
				
			||||||
 | 
					} from './gemini';
 | 
				
			||||||
 | 
					import {
 | 
				
			||||||
 | 
					  loadTransformersEmbeddingsModels,
 | 
				
			||||||
 | 
					  PROVIDER_INFO as TransformersInfo,
 | 
				
			||||||
 | 
					} from './transformers';
 | 
				
			||||||
 | 
					import {
 | 
				
			||||||
 | 
					  loadDeepseekChatModels,
 | 
				
			||||||
 | 
					  PROVIDER_INFO as DeepseekInfo,
 | 
				
			||||||
 | 
					} from './deepseek';
 | 
				
			||||||
 | 
					import {
 | 
				
			||||||
 | 
					  loadLMStudioChatModels,
 | 
				
			||||||
 | 
					  loadLMStudioEmbeddingsModels,
 | 
				
			||||||
 | 
					  PROVIDER_INFO as LMStudioInfo,
 | 
				
			||||||
 | 
					} from './lmstudio';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_METADATA = {
 | 
				
			||||||
 | 
					  openai: OpenAIInfo,
 | 
				
			||||||
 | 
					  ollama: OllamaInfo,
 | 
				
			||||||
 | 
					  groq: GroqInfo,
 | 
				
			||||||
 | 
					  anthropic: AnthropicInfo,
 | 
				
			||||||
 | 
					  gemini: GeminiInfo,
 | 
				
			||||||
 | 
					  transformers: TransformersInfo,
 | 
				
			||||||
 | 
					  deepseek: DeepseekInfo,
 | 
				
			||||||
 | 
					  lmstudio: LMStudioInfo,
 | 
				
			||||||
 | 
					  custom_openai: {
 | 
				
			||||||
 | 
					    key: 'custom_openai',
 | 
				
			||||||
 | 
					    displayName: 'Custom OpenAI',
 | 
				
			||||||
 | 
					  },
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export interface ChatModel {
 | 
					export interface ChatModel {
 | 
				
			||||||
  displayName: string;
 | 
					  displayName: string;
 | 
				
			||||||
@@ -34,6 +76,7 @@ export const chatModelProviders: Record<
 | 
				
			|||||||
  anthropic: loadAnthropicChatModels,
 | 
					  anthropic: loadAnthropicChatModels,
 | 
				
			||||||
  gemini: loadGeminiChatModels,
 | 
					  gemini: loadGeminiChatModels,
 | 
				
			||||||
  deepseek: loadDeepseekChatModels,
 | 
					  deepseek: loadDeepseekChatModels,
 | 
				
			||||||
 | 
					  lmstudio: loadLMStudioChatModels,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const embeddingModelProviders: Record<
 | 
					export const embeddingModelProviders: Record<
 | 
				
			||||||
@@ -44,6 +87,7 @@ export const embeddingModelProviders: Record<
 | 
				
			|||||||
  ollama: loadOllamaEmbeddingModels,
 | 
					  ollama: loadOllamaEmbeddingModels,
 | 
				
			||||||
  gemini: loadGeminiEmbeddingModels,
 | 
					  gemini: loadGeminiEmbeddingModels,
 | 
				
			||||||
  transformers: loadTransformersEmbeddingsModels,
 | 
					  transformers: loadTransformersEmbeddingsModels,
 | 
				
			||||||
 | 
					  lmstudio: loadLMStudioEmbeddingsModels,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const getAvailableChatModelProviders = async () => {
 | 
					export const getAvailableChatModelProviders = async () => {
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										100
									
								
								src/lib/providers/lmstudio.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								src/lib/providers/lmstudio.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,100 @@
 | 
				
			|||||||
 | 
					import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
 | 
				
			||||||
 | 
					import axios from 'axios';
 | 
				
			||||||
 | 
					import { ChatModel, EmbeddingModel } from '.';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_INFO = {
 | 
				
			||||||
 | 
					  key: 'lmstudio',
 | 
				
			||||||
 | 
					  displayName: 'LM Studio',
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					import { ChatOpenAI } from '@langchain/openai';
 | 
				
			||||||
 | 
					import { OpenAIEmbeddings } from '@langchain/openai';
 | 
				
			||||||
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
 | 
					import { Embeddings } from '@langchain/core/embeddings';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					interface LMStudioModel {
 | 
				
			||||||
 | 
					  id: string;
 | 
				
			||||||
 | 
					  name?: string;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					const ensureV1Endpoint = (endpoint: string): string =>
 | 
				
			||||||
 | 
					  endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
 | 
				
			||||||
 | 
					  try {
 | 
				
			||||||
 | 
					    await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
				
			||||||
 | 
					      headers: { 'Content-Type': 'application/json' },
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					    return true;
 | 
				
			||||||
 | 
					  } catch {
 | 
				
			||||||
 | 
					    return false;
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const loadLMStudioChatModels = async () => {
 | 
				
			||||||
 | 
					  const endpoint = getLMStudioApiEndpoint();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  if (!endpoint) return {};
 | 
				
			||||||
 | 
					  if (!(await checkServerAvailability(endpoint))) return {};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  try {
 | 
				
			||||||
 | 
					    const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
				
			||||||
 | 
					      headers: { 'Content-Type': 'application/json' },
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    const chatModels: Record<string, ChatModel> = {};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    response.data.data.forEach((model: LMStudioModel) => {
 | 
				
			||||||
 | 
					      chatModels[model.id] = {
 | 
				
			||||||
 | 
					        displayName: model.name || model.id,
 | 
				
			||||||
 | 
					        model: new ChatOpenAI({
 | 
				
			||||||
 | 
					          openAIApiKey: 'lm-studio',
 | 
				
			||||||
 | 
					          configuration: {
 | 
				
			||||||
 | 
					            baseURL: ensureV1Endpoint(endpoint),
 | 
				
			||||||
 | 
					          },
 | 
				
			||||||
 | 
					          modelName: model.id,
 | 
				
			||||||
 | 
					          temperature: 0.7,
 | 
				
			||||||
 | 
					          streaming: true,
 | 
				
			||||||
 | 
					          maxRetries: 3,
 | 
				
			||||||
 | 
					        }) as unknown as BaseChatModel,
 | 
				
			||||||
 | 
					      };
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    return chatModels;
 | 
				
			||||||
 | 
					  } catch (err) {
 | 
				
			||||||
 | 
					    console.error(`Error loading LM Studio models: ${err}`);
 | 
				
			||||||
 | 
					    return {};
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const loadLMStudioEmbeddingsModels = async () => {
 | 
				
			||||||
 | 
					  const endpoint = getLMStudioApiEndpoint();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  if (!endpoint) return {};
 | 
				
			||||||
 | 
					  if (!(await checkServerAvailability(endpoint))) return {};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  try {
 | 
				
			||||||
 | 
					    const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
 | 
				
			||||||
 | 
					      headers: { 'Content-Type': 'application/json' },
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    const embeddingsModels: Record<string, EmbeddingModel> = {};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    response.data.data.forEach((model: LMStudioModel) => {
 | 
				
			||||||
 | 
					      embeddingsModels[model.id] = {
 | 
				
			||||||
 | 
					        displayName: model.name || model.id,
 | 
				
			||||||
 | 
					        model: new OpenAIEmbeddings({
 | 
				
			||||||
 | 
					          openAIApiKey: 'lm-studio',
 | 
				
			||||||
 | 
					          configuration: {
 | 
				
			||||||
 | 
					            baseURL: ensureV1Endpoint(endpoint),
 | 
				
			||||||
 | 
					          },
 | 
				
			||||||
 | 
					          modelName: model.id,
 | 
				
			||||||
 | 
					        }) as unknown as Embeddings,
 | 
				
			||||||
 | 
					      };
 | 
				
			||||||
 | 
					    });
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    return embeddingsModels;
 | 
				
			||||||
 | 
					  } catch (err) {
 | 
				
			||||||
 | 
					    console.error(`Error loading LM Studio embeddings model: ${err}`);
 | 
				
			||||||
 | 
					    return {};
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
@@ -1,6 +1,11 @@
 | 
				
			|||||||
import axios from 'axios';
 | 
					import axios from 'axios';
 | 
				
			||||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
 | 
					import { getKeepAlive, getOllamaApiEndpoint } from '../config';
 | 
				
			||||||
import { ChatModel, EmbeddingModel } from '.';
 | 
					import { ChatModel, EmbeddingModel } from '.';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_INFO = {
 | 
				
			||||||
 | 
					  key: 'ollama',
 | 
				
			||||||
 | 
					  displayName: 'Ollama',
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
 | 
					import { ChatOllama } from '@langchain/community/chat_models/ollama';
 | 
				
			||||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
 | 
					import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,6 +1,11 @@
 | 
				
			|||||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
 | 
					import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
 | 
				
			||||||
import { getOpenaiApiKey } from '../config';
 | 
					import { getOpenaiApiKey } from '../config';
 | 
				
			||||||
import { ChatModel, EmbeddingModel } from '.';
 | 
					import { ChatModel, EmbeddingModel } from '.';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_INFO = {
 | 
				
			||||||
 | 
					  key: 'openai',
 | 
				
			||||||
 | 
					  displayName: 'OpenAI',
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
					import { BaseChatModel } from '@langchain/core/language_models/chat_models';
 | 
				
			||||||
import { Embeddings } from '@langchain/core/embeddings';
 | 
					import { Embeddings } from '@langchain/core/embeddings';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -25,6 +30,18 @@ const openaiChatModels: Record<string, string>[] = [
 | 
				
			|||||||
    displayName: 'GPT-4 omni mini',
 | 
					    displayName: 'GPT-4 omni mini',
 | 
				
			||||||
    key: 'gpt-4o-mini',
 | 
					    key: 'gpt-4o-mini',
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
 | 
					  {
 | 
				
			||||||
 | 
					    displayName: 'GPT 4.1 nano',
 | 
				
			||||||
 | 
					    key: 'gpt-4.1-nano',
 | 
				
			||||||
 | 
					  },
 | 
				
			||||||
 | 
					  {
 | 
				
			||||||
 | 
					    displayName: 'GPT 4.1 mini',
 | 
				
			||||||
 | 
					    key: 'gpt-4.1-mini',
 | 
				
			||||||
 | 
					  },
 | 
				
			||||||
 | 
					  {
 | 
				
			||||||
 | 
					    displayName: 'GPT 4.1',
 | 
				
			||||||
 | 
					    key: 'gpt-4.1',
 | 
				
			||||||
 | 
					  },
 | 
				
			||||||
];
 | 
					];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const openaiEmbeddingModels: Record<string, string>[] = [
 | 
					const openaiEmbeddingModels: Record<string, string>[] = [
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,5 +1,10 @@
 | 
				
			|||||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
 | 
					import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export const PROVIDER_INFO = {
 | 
				
			||||||
 | 
					  key: 'transformers',
 | 
				
			||||||
 | 
					  displayName: 'Hugging Face',
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const loadTransformersEmbeddingsModels = async () => {
 | 
					export const loadTransformersEmbeddingsModels = async () => {
 | 
				
			||||||
  try {
 | 
					  try {
 | 
				
			||||||
    const embeddingModels = {
 | 
					    const embeddingModels = {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
 | 
				
			|||||||
        const splittedText = await splitter.splitText(parsedText);
 | 
					        const splittedText = await splitter.splitText(parsedText);
 | 
				
			||||||
        const title = res.data
 | 
					        const title = res.data
 | 
				
			||||||
          .toString('utf8')
 | 
					          .toString('utf8')
 | 
				
			||||||
          .match(/<title>(.*?)<\/title>/)?.[1];
 | 
					          .match(/<title.*>(.*?)<\/title>/)?.[1];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        const linkDocs = splittedText.map((text) => {
 | 
					        const linkDocs = splittedText.map((text) => {
 | 
				
			||||||
          return new Document({
 | 
					          return new Document({
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user