From cc5eea17e4779c702cf23072e29e20bd61b257f3 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:53:05 +0530 Subject: [PATCH] feat(app): remove old providers & registry --- src/lib/config.ts | 158 --------------------------- src/lib/providers/aimlapi.ts | 94 ----------------- src/lib/providers/anthropic.ts | 78 -------------- src/lib/providers/deepseek.ts | 49 --------- src/lib/providers/gemini.ts | 114 -------------------- src/lib/providers/groq.ts | 44 -------- src/lib/providers/index.ts | 170 ------------------------------ src/lib/providers/lemonade.ts | 94 ----------------- src/lib/providers/lmstudio.ts | 100 ------------------ src/lib/providers/ollama.ts | 86 --------------- src/lib/providers/openai.ts | 159 ---------------------------- src/lib/providers/transformers.ts | 36 ------- 12 files changed, 1182 deletions(-) delete mode 100644 src/lib/config.ts delete mode 100644 src/lib/providers/aimlapi.ts delete mode 100644 src/lib/providers/anthropic.ts delete mode 100644 src/lib/providers/deepseek.ts delete mode 100644 src/lib/providers/gemini.ts delete mode 100644 src/lib/providers/groq.ts delete mode 100644 src/lib/providers/index.ts delete mode 100644 src/lib/providers/lemonade.ts delete mode 100644 src/lib/providers/lmstudio.ts delete mode 100644 src/lib/providers/ollama.ts delete mode 100644 src/lib/providers/openai.ts delete mode 100644 src/lib/providers/transformers.ts diff --git a/src/lib/config.ts b/src/lib/config.ts deleted file mode 100644 index b79ec94..0000000 --- a/src/lib/config.ts +++ /dev/null @@ -1,158 +0,0 @@ -import toml from '@iarna/toml'; - -// Use dynamic imports for Node.js modules to prevent client-side errors -let fs: any; -let path: any; -if (typeof window === 'undefined') { - // We're on the server - fs = require('fs'); - path = require('path'); -} - -const configFileName = 'config.toml'; - -interface Config { - GENERAL: { - SIMILARITY_MEASURE: string; - KEEP_ALIVE: string; - }; - MODELS: { - OPENAI: { - API_KEY: string; - }; - GROQ: { - API_KEY: string; - }; - ANTHROPIC: { - API_KEY: string; - }; - GEMINI: { - API_KEY: string; - }; - OLLAMA: { - API_URL: string; - API_KEY: string; - }; - DEEPSEEK: { - API_KEY: string; - }; - AIMLAPI: { - API_KEY: string; - }; - LM_STUDIO: { - API_URL: string; - }; - LEMONADE: { - API_URL: string; - API_KEY: string; - }; - CUSTOM_OPENAI: { - API_URL: string; - API_KEY: string; - MODEL_NAME: string; - }; - }; - API_ENDPOINTS: { - SEARXNG: string; - }; -} - -type RecursivePartial = { - [P in keyof T]?: RecursivePartial; -}; - -const loadConfig = () => { - // Server-side only - if (typeof window === 'undefined') { - return toml.parse( - fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), - ) as any as Config; - } - - // Client-side fallback - settings will be loaded via API - return {} as Config; -}; - -export const getSimilarityMeasure = () => - loadConfig().GENERAL.SIMILARITY_MEASURE; - -export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE; - -export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY; - -export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY; - -export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY; - -export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY; - -export const getSearxngApiEndpoint = () => - process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG; - -export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL; - -export const getOllamaApiKey = () => loadConfig().MODELS.OLLAMA.API_KEY; - -export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY; - -export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY; - -export const getCustomOpenaiApiKey = () => - loadConfig().MODELS.CUSTOM_OPENAI.API_KEY; - -export const getCustomOpenaiApiUrl = () => - loadConfig().MODELS.CUSTOM_OPENAI.API_URL; - -export const getCustomOpenaiModelName = () => - loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME; - -export const getLMStudioApiEndpoint = () => - loadConfig().MODELS.LM_STUDIO.API_URL; - -export const getLemonadeApiEndpoint = () => - loadConfig().MODELS.LEMONADE.API_URL; - -export const getLemonadeApiKey = () => loadConfig().MODELS.LEMONADE.API_KEY; - -const mergeConfigs = (current: any, update: any): any => { - if (update === null || update === undefined) { - return current; - } - - if (typeof current !== 'object' || current === null) { - return update; - } - - const result = { ...current }; - - for (const key in update) { - if (Object.prototype.hasOwnProperty.call(update, key)) { - const updateValue = update[key]; - - if ( - typeof updateValue === 'object' && - updateValue !== null && - typeof result[key] === 'object' && - result[key] !== null - ) { - result[key] = mergeConfigs(result[key], updateValue); - } else if (updateValue !== undefined) { - result[key] = updateValue; - } - } - } - - return result; -}; - -export const updateConfig = (config: RecursivePartial) => { - // Server-side only - if (typeof window === 'undefined') { - const currentConfig = loadConfig(); - const mergedConfig = mergeConfigs(currentConfig, config); - fs.writeFileSync( - path.join(path.join(process.cwd(), `${configFileName}`)), - toml.stringify(mergedConfig), - ); - } -}; diff --git a/src/lib/providers/aimlapi.ts b/src/lib/providers/aimlapi.ts deleted file mode 100644 index 9c982fe..0000000 --- a/src/lib/providers/aimlapi.ts +++ /dev/null @@ -1,94 +0,0 @@ -import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; -import { getAimlApiKey } from '../config'; -import { ChatModel, EmbeddingModel } from '.'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { Embeddings } from '@langchain/core/embeddings'; -import axios from 'axios'; - -export const PROVIDER_INFO = { - key: 'aimlapi', - displayName: 'AI/ML API', -}; - -interface AimlApiModel { - id: string; - name?: string; - type?: string; -} - -const API_URL = 'https://api.aimlapi.com'; - -export const loadAimlApiChatModels = async () => { - const apiKey = getAimlApiKey(); - - if (!apiKey) return {}; - - try { - const response = await axios.get(`${API_URL}/models`, { - headers: { - 'Content-Type': 'application/json', - Authorization: `Bearer ${apiKey}`, - }, - }); - - const chatModels: Record = {}; - - response.data.data.forEach((model: AimlApiModel) => { - if (model.type === 'chat-completion') { - chatModels[model.id] = { - displayName: model.name || model.id, - model: new ChatOpenAI({ - apiKey: apiKey, - modelName: model.id, - temperature: 0.7, - configuration: { - baseURL: API_URL, - }, - }) as unknown as BaseChatModel, - }; - } - }); - - return chatModels; - } catch (err) { - console.error(`Error loading AI/ML API models: ${err}`); - return {}; - } -}; - -export const loadAimlApiEmbeddingModels = async () => { - const apiKey = getAimlApiKey(); - - if (!apiKey) return {}; - - try { - const response = await axios.get(`${API_URL}/models`, { - headers: { - 'Content-Type': 'application/json', - Authorization: `Bearer ${apiKey}`, - }, - }); - - const embeddingModels: Record = {}; - - response.data.data.forEach((model: AimlApiModel) => { - if (model.type === 'embedding') { - embeddingModels[model.id] = { - displayName: model.name || model.id, - model: new OpenAIEmbeddings({ - apiKey: apiKey, - modelName: model.id, - configuration: { - baseURL: API_URL, - }, - }) as unknown as Embeddings, - }; - } - }); - - return embeddingModels; - } catch (err) { - console.error(`Error loading AI/ML API embeddings models: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/anthropic.ts b/src/lib/providers/anthropic.ts deleted file mode 100644 index 6af2115..0000000 --- a/src/lib/providers/anthropic.ts +++ /dev/null @@ -1,78 +0,0 @@ -import { ChatAnthropic } from '@langchain/anthropic'; -import { ChatModel } from '.'; -import { getAnthropicApiKey } from '../config'; - -export const PROVIDER_INFO = { - key: 'anthropic', - displayName: 'Anthropic', -}; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; - -const anthropicChatModels: Record[] = [ - { - displayName: 'Claude 4.1 Opus', - key: 'claude-opus-4-1-20250805', - }, - { - displayName: 'Claude 4 Opus', - key: 'claude-opus-4-20250514', - }, - { - displayName: 'Claude 4 Sonnet', - key: 'claude-sonnet-4-20250514', - }, - { - displayName: 'Claude 3.7 Sonnet', - key: 'claude-3-7-sonnet-20250219', - }, - { - displayName: 'Claude 3.5 Haiku', - key: 'claude-3-5-haiku-20241022', - }, - { - displayName: 'Claude 3.5 Sonnet v2', - key: 'claude-3-5-sonnet-20241022', - }, - { - displayName: 'Claude 3.5 Sonnet', - key: 'claude-3-5-sonnet-20240620', - }, - { - displayName: 'Claude 3 Opus', - key: 'claude-3-opus-20240229', - }, - { - displayName: 'Claude 3 Sonnet', - key: 'claude-3-sonnet-20240229', - }, - { - displayName: 'Claude 3 Haiku', - key: 'claude-3-haiku-20240307', - }, -]; - -export const loadAnthropicChatModels = async () => { - const anthropicApiKey = getAnthropicApiKey(); - - if (!anthropicApiKey) return {}; - - try { - const chatModels: Record = {}; - - anthropicChatModels.forEach((model) => { - chatModels[model.key] = { - displayName: model.displayName, - model: new ChatAnthropic({ - apiKey: anthropicApiKey, - modelName: model.key, - temperature: 0.7, - }) as unknown as BaseChatModel, - }; - }); - - return chatModels; - } catch (err) { - console.error(`Error loading Anthropic models: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/deepseek.ts b/src/lib/providers/deepseek.ts deleted file mode 100644 index 9c9ef5a..0000000 --- a/src/lib/providers/deepseek.ts +++ /dev/null @@ -1,49 +0,0 @@ -import { ChatOpenAI } from '@langchain/openai'; -import { getDeepseekApiKey } from '../config'; -import { ChatModel } from '.'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; - -export const PROVIDER_INFO = { - key: 'deepseek', - displayName: 'Deepseek AI', -}; - -const deepseekChatModels: Record[] = [ - { - displayName: 'Deepseek Chat (Deepseek V3)', - key: 'deepseek-chat', - }, - { - displayName: 'Deepseek Reasoner (Deepseek R1)', - key: 'deepseek-reasoner', - }, -]; - -export const loadDeepseekChatModels = async () => { - const deepseekApiKey = getDeepseekApiKey(); - - if (!deepseekApiKey) return {}; - - try { - const chatModels: Record = {}; - - deepseekChatModels.forEach((model) => { - chatModels[model.key] = { - displayName: model.displayName, - model: new ChatOpenAI({ - apiKey: deepseekApiKey, - modelName: model.key, - temperature: 0.7, - configuration: { - baseURL: 'https://api.deepseek.com', - }, - }) as unknown as BaseChatModel, - }; - }); - - return chatModels; - } catch (err) { - console.error(`Error loading Deepseek models: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/gemini.ts b/src/lib/providers/gemini.ts deleted file mode 100644 index 418e0a4..0000000 --- a/src/lib/providers/gemini.ts +++ /dev/null @@ -1,114 +0,0 @@ -import { - ChatGoogleGenerativeAI, - GoogleGenerativeAIEmbeddings, -} from '@langchain/google-genai'; -import { getGeminiApiKey } from '../config'; -import { ChatModel, EmbeddingModel } from '.'; - -export const PROVIDER_INFO = { - key: 'gemini', - displayName: 'Google Gemini', -}; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { Embeddings } from '@langchain/core/embeddings'; - -const geminiChatModels: Record[] = [ - { - displayName: 'Gemini 2.5 Flash', - key: 'gemini-2.5-flash', - }, - { - displayName: 'Gemini 2.5 Flash-Lite', - key: 'gemini-2.5-flash-lite', - }, - { - displayName: 'Gemini 2.5 Pro', - key: 'gemini-2.5-pro', - }, - { - displayName: 'Gemini 2.0 Flash', - key: 'gemini-2.0-flash', - }, - { - displayName: 'Gemini 2.0 Flash-Lite', - key: 'gemini-2.0-flash-lite', - }, - { - displayName: 'Gemini 2.0 Flash Thinking Experimental', - key: 'gemini-2.0-flash-thinking-exp-01-21', - }, - { - displayName: 'Gemini 1.5 Flash', - key: 'gemini-1.5-flash', - }, - { - displayName: 'Gemini 1.5 Flash-8B', - key: 'gemini-1.5-flash-8b', - }, - { - displayName: 'Gemini 1.5 Pro', - key: 'gemini-1.5-pro', - }, -]; - -const geminiEmbeddingModels: Record[] = [ - { - displayName: 'Text Embedding 004', - key: 'models/text-embedding-004', - }, - { - displayName: 'Embedding 001', - key: 'models/embedding-001', - }, -]; - -export const loadGeminiChatModels = async () => { - const geminiApiKey = getGeminiApiKey(); - - if (!geminiApiKey) return {}; - - try { - const chatModels: Record = {}; - - geminiChatModels.forEach((model) => { - chatModels[model.key] = { - displayName: model.displayName, - model: new ChatGoogleGenerativeAI({ - apiKey: geminiApiKey, - model: model.key, - temperature: 0.7, - }) as unknown as BaseChatModel, - }; - }); - - return chatModels; - } catch (err) { - console.error(`Error loading Gemini models: ${err}`); - return {}; - } -}; - -export const loadGeminiEmbeddingModels = async () => { - const geminiApiKey = getGeminiApiKey(); - - if (!geminiApiKey) return {}; - - try { - const embeddingModels: Record = {}; - - geminiEmbeddingModels.forEach((model) => { - embeddingModels[model.key] = { - displayName: model.displayName, - model: new GoogleGenerativeAIEmbeddings({ - apiKey: geminiApiKey, - modelName: model.key, - }) as unknown as Embeddings, - }; - }); - - return embeddingModels; - } catch (err) { - console.error(`Error loading Gemini embeddings models: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts deleted file mode 100644 index 4e7db51..0000000 --- a/src/lib/providers/groq.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { ChatGroq } from '@langchain/groq'; -import { getGroqApiKey } from '../config'; -import { ChatModel } from '.'; - -export const PROVIDER_INFO = { - key: 'groq', - displayName: 'Groq', -}; - -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; - -export const loadGroqChatModels = async () => { - const groqApiKey = getGroqApiKey(); - if (!groqApiKey) return {}; - - try { - const res = await fetch('https://api.groq.com/openai/v1/models', { - method: 'GET', - headers: { - Authorization: `bearer ${groqApiKey}`, - 'Content-Type': 'application/json', - }, - }); - - const groqChatModels = (await res.json()).data; - const chatModels: Record = {}; - - groqChatModels.forEach((model: any) => { - chatModels[model.id] = { - displayName: model.id, - model: new ChatGroq({ - apiKey: groqApiKey, - model: model.id, - temperature: 0.7, - }) as unknown as BaseChatModel, - }; - }); - - return chatModels; - } catch (err) { - console.error(`Error loading Groq models: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts deleted file mode 100644 index d4e4248..0000000 --- a/src/lib/providers/index.ts +++ /dev/null @@ -1,170 +0,0 @@ -import { Embeddings } from '@langchain/core/embeddings'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { - loadOpenAIChatModels, - loadOpenAIEmbeddingModels, - PROVIDER_INFO as OpenAIInfo, - PROVIDER_INFO, -} from './openai'; -import { - getCustomOpenaiApiKey, - getCustomOpenaiApiUrl, - getCustomOpenaiModelName, -} from '../config'; -import { ChatOpenAI } from '@langchain/openai'; -import { - loadOllamaChatModels, - loadOllamaEmbeddingModels, - PROVIDER_INFO as OllamaInfo, -} from './ollama'; -import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq'; -import { - loadAnthropicChatModels, - PROVIDER_INFO as AnthropicInfo, -} from './anthropic'; -import { - loadGeminiChatModels, - loadGeminiEmbeddingModels, - PROVIDER_INFO as GeminiInfo, -} from './gemini'; -import { - loadTransformersEmbeddingsModels, - PROVIDER_INFO as TransformersInfo, -} from './transformers'; -import { - loadDeepseekChatModels, - PROVIDER_INFO as DeepseekInfo, -} from './deepseek'; -import { - loadAimlApiChatModels, - loadAimlApiEmbeddingModels, - PROVIDER_INFO as AimlApiInfo, -} from './aimlapi'; -import { - loadLMStudioChatModels, - loadLMStudioEmbeddingsModels, - PROVIDER_INFO as LMStudioInfo, -} from './lmstudio'; -import { - loadLemonadeChatModels, - loadLemonadeEmbeddingModels, - PROVIDER_INFO as LemonadeInfo, -} from './lemonade'; - -export const PROVIDER_METADATA = { - openai: OpenAIInfo, - ollama: OllamaInfo, - groq: GroqInfo, - anthropic: AnthropicInfo, - gemini: GeminiInfo, - transformers: TransformersInfo, - deepseek: DeepseekInfo, - aimlapi: AimlApiInfo, - lmstudio: LMStudioInfo, - lemonade: LemonadeInfo, - custom_openai: { - key: 'custom_openai', - displayName: 'Custom OpenAI', - }, -}; - -export interface ChatModel { - displayName: string; - model: BaseChatModel; -} - -export interface EmbeddingModel { - displayName: string; - model: Embeddings; -} - -export const chatModelProviders: Record< - string, - () => Promise> -> = { - openai: loadOpenAIChatModels, - ollama: loadOllamaChatModels, - groq: loadGroqChatModels, - anthropic: loadAnthropicChatModels, - gemini: loadGeminiChatModels, - deepseek: loadDeepseekChatModels, - aimlapi: loadAimlApiChatModels, - lmstudio: loadLMStudioChatModels, - lemonade: loadLemonadeChatModels, -}; - -export const embeddingModelProviders: Record< - string, - () => Promise> -> = { - openai: loadOpenAIEmbeddingModels, - ollama: loadOllamaEmbeddingModels, - gemini: loadGeminiEmbeddingModels, - transformers: loadTransformersEmbeddingsModels, - aimlapi: loadAimlApiEmbeddingModels, - lmstudio: loadLMStudioEmbeddingsModels, - lemonade: loadLemonadeEmbeddingModels, -}; - -export const getAvailableChatModelProviders = async () => { - const models: Record> = {}; - - for (const provider in chatModelProviders) { - const providerModels = await chatModelProviders[provider](); - if (Object.keys(providerModels).length > 0) { - models[provider] = providerModels; - } - } - - const customOpenAiApiKey = getCustomOpenaiApiKey(); - const customOpenAiApiUrl = getCustomOpenaiApiUrl(); - const customOpenAiModelName = getCustomOpenaiModelName(); - - models['custom_openai'] = { - ...(customOpenAiApiKey && customOpenAiApiUrl && customOpenAiModelName - ? { - [customOpenAiModelName]: { - displayName: customOpenAiModelName, - model: new ChatOpenAI({ - apiKey: customOpenAiApiKey, - modelName: customOpenAiModelName, - ...(() => { - const temperatureRestrictedModels = [ - 'gpt-5-nano', - 'gpt-5', - 'gpt-5-mini', - 'o1', - 'o3', - 'o3-mini', - 'o4-mini', - ]; - const isTemperatureRestricted = - temperatureRestrictedModels.some((restrictedModel) => - customOpenAiModelName.includes(restrictedModel), - ); - return isTemperatureRestricted ? {} : { temperature: 0.7 }; - })(), - configuration: { - baseURL: customOpenAiApiUrl, - }, - }) as unknown as BaseChatModel, - }, - } - : {}), - }; - - return models; -}; - -export const getAvailableEmbeddingModelProviders = async () => { - const models: Record> = {}; - - for (const provider in embeddingModelProviders) { - const providerModels = await embeddingModelProviders[provider](); - if (Object.keys(providerModels).length > 0) { - models[provider] = providerModels; - } - } - - return models; -}; diff --git a/src/lib/providers/lemonade.ts b/src/lib/providers/lemonade.ts deleted file mode 100644 index d87e678..0000000 --- a/src/lib/providers/lemonade.ts +++ /dev/null @@ -1,94 +0,0 @@ -import axios from 'axios'; -import { getLemonadeApiEndpoint, getLemonadeApiKey } from '../config'; -import { ChatModel, EmbeddingModel } from '.'; - -export const PROVIDER_INFO = { - key: 'lemonade', - displayName: 'Lemonade', -}; - -import { ChatOpenAI } from '@langchain/openai'; -import { OpenAIEmbeddings } from '@langchain/openai'; - -export const loadLemonadeChatModels = async () => { - const lemonadeApiEndpoint = getLemonadeApiEndpoint(); - const lemonadeApiKey = getLemonadeApiKey(); - - if (!lemonadeApiEndpoint) return {}; - - try { - const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, { - headers: { - 'Content-Type': 'application/json', - ...(lemonadeApiKey - ? { Authorization: `Bearer ${lemonadeApiKey}` } - : {}), - }, - }); - - const { data: models } = res.data; - - const chatModels: Record = {}; - - models.forEach((model: any) => { - chatModels[model.id] = { - displayName: model.id, - model: new ChatOpenAI({ - apiKey: lemonadeApiKey || 'lemonade-key', - modelName: model.id, - temperature: 0.7, - configuration: { - baseURL: `${lemonadeApiEndpoint}/api/v1`, - }, - }), - }; - }); - - return chatModels; - } catch (err) { - console.error(`Error loading Lemonade models: ${err}`); - return {}; - } -}; - -export const loadLemonadeEmbeddingModels = async () => { - const lemonadeApiEndpoint = getLemonadeApiEndpoint(); - const lemonadeApiKey = getLemonadeApiKey(); - - if (!lemonadeApiEndpoint) return {}; - - try { - const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, { - headers: { - 'Content-Type': 'application/json', - ...(lemonadeApiKey - ? { Authorization: `Bearer ${lemonadeApiKey}` } - : {}), - }, - }); - - const { data: models } = res.data; - - const embeddingModels: Record = {}; - - // Filter models that support embeddings (if Lemonade provides this info) - // For now, we'll assume all models can be used for embeddings - models.forEach((model: any) => { - embeddingModels[model.id] = { - displayName: model.id, - model: new OpenAIEmbeddings({ - apiKey: lemonadeApiKey || 'lemonade-key', - modelName: model.id, - configuration: { - baseURL: `${lemonadeApiEndpoint}/api/v1`, - }, - }), - }; - }); - - return embeddingModels; - } catch (err) { - console.error(`Error loading Lemonade embedding models: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/lmstudio.ts b/src/lib/providers/lmstudio.ts deleted file mode 100644 index f79c0aa..0000000 --- a/src/lib/providers/lmstudio.ts +++ /dev/null @@ -1,100 +0,0 @@ -import { getKeepAlive, getLMStudioApiEndpoint } from '../config'; -import axios from 'axios'; -import { ChatModel, EmbeddingModel } from '.'; - -export const PROVIDER_INFO = { - key: 'lmstudio', - displayName: 'LM Studio', -}; -import { ChatOpenAI } from '@langchain/openai'; -import { OpenAIEmbeddings } from '@langchain/openai'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { Embeddings } from '@langchain/core/embeddings'; - -interface LMStudioModel { - id: string; - name?: string; -} - -const ensureV1Endpoint = (endpoint: string): string => - endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`; - -const checkServerAvailability = async (endpoint: string): Promise => { - try { - await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { - headers: { 'Content-Type': 'application/json' }, - }); - return true; - } catch { - return false; - } -}; - -export const loadLMStudioChatModels = async () => { - const endpoint = getLMStudioApiEndpoint(); - - if (!endpoint) return {}; - if (!(await checkServerAvailability(endpoint))) return {}; - - try { - const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { - headers: { 'Content-Type': 'application/json' }, - }); - - const chatModels: Record = {}; - - response.data.data.forEach((model: LMStudioModel) => { - chatModels[model.id] = { - displayName: model.name || model.id, - model: new ChatOpenAI({ - apiKey: 'lm-studio', - configuration: { - baseURL: ensureV1Endpoint(endpoint), - }, - modelName: model.id, - temperature: 0.7, - streaming: true, - maxRetries: 3, - }) as unknown as BaseChatModel, - }; - }); - - return chatModels; - } catch (err) { - console.error(`Error loading LM Studio models: ${err}`); - return {}; - } -}; - -export const loadLMStudioEmbeddingsModels = async () => { - const endpoint = getLMStudioApiEndpoint(); - - if (!endpoint) return {}; - if (!(await checkServerAvailability(endpoint))) return {}; - - try { - const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { - headers: { 'Content-Type': 'application/json' }, - }); - - const embeddingsModels: Record = {}; - - response.data.data.forEach((model: LMStudioModel) => { - embeddingsModels[model.id] = { - displayName: model.name || model.id, - model: new OpenAIEmbeddings({ - apiKey: 'lm-studio', - configuration: { - baseURL: ensureV1Endpoint(endpoint), - }, - modelName: model.id, - }) as unknown as Embeddings, - }; - }); - - return embeddingsModels; - } catch (err) { - console.error(`Error loading LM Studio embeddings model: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts deleted file mode 100644 index cb0b848..0000000 --- a/src/lib/providers/ollama.ts +++ /dev/null @@ -1,86 +0,0 @@ -import axios from 'axios'; -import { getKeepAlive, getOllamaApiEndpoint, getOllamaApiKey } from '../config'; -import { ChatModel, EmbeddingModel } from '.'; - -export const PROVIDER_INFO = { - key: 'ollama', - displayName: 'Ollama', -}; -import { ChatOllama } from '@langchain/ollama'; -import { OllamaEmbeddings } from '@langchain/ollama'; - -export const loadOllamaChatModels = async () => { - const ollamaApiEndpoint = getOllamaApiEndpoint(); - const ollamaApiKey = getOllamaApiKey(); - - if (!ollamaApiEndpoint) return {}; - - try { - const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, { - headers: { - 'Content-Type': 'application/json', - }, - }); - - const { models } = res.data; - - const chatModels: Record = {}; - - models.forEach((model: any) => { - chatModels[model.model] = { - displayName: model.name, - model: new ChatOllama({ - baseUrl: ollamaApiEndpoint, - model: model.model, - temperature: 0.7, - keepAlive: getKeepAlive(), - ...(ollamaApiKey - ? { headers: { Authorization: `Bearer ${ollamaApiKey}` } } - : {}), - }), - }; - }); - - return chatModels; - } catch (err) { - console.error(`Error loading Ollama models: ${err}`); - return {}; - } -}; - -export const loadOllamaEmbeddingModels = async () => { - const ollamaApiEndpoint = getOllamaApiEndpoint(); - const ollamaApiKey = getOllamaApiKey(); - - if (!ollamaApiEndpoint) return {}; - - try { - const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, { - headers: { - 'Content-Type': 'application/json', - }, - }); - - const { models } = res.data; - - const embeddingModels: Record = {}; - - models.forEach((model: any) => { - embeddingModels[model.model] = { - displayName: model.name, - model: new OllamaEmbeddings({ - baseUrl: ollamaApiEndpoint, - model: model.model, - ...(ollamaApiKey - ? { headers: { Authorization: `Bearer ${ollamaApiKey}` } } - : {}), - }), - }; - }); - - return embeddingModels; - } catch (err) { - console.error(`Error loading Ollama embeddings models: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts deleted file mode 100644 index 0c5379a..0000000 --- a/src/lib/providers/openai.ts +++ /dev/null @@ -1,159 +0,0 @@ -import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; -import { getOpenaiApiKey } from '../config'; -import { ChatModel, EmbeddingModel } from '.'; - -export const PROVIDER_INFO = { - key: 'openai', - displayName: 'OpenAI', -}; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { Embeddings } from '@langchain/core/embeddings'; - -const openaiChatModels: Record[] = [ - { - displayName: 'GPT-3.5 Turbo', - key: 'gpt-3.5-turbo', - }, - { - displayName: 'GPT-4', - key: 'gpt-4', - }, - { - displayName: 'GPT-4 turbo', - key: 'gpt-4-turbo', - }, - { - displayName: 'GPT-4 omni', - key: 'gpt-4o', - }, - { - displayName: 'GPT-4o (2024-05-13)', - key: 'gpt-4o-2024-05-13', - }, - { - displayName: 'GPT-4 omni mini', - key: 'gpt-4o-mini', - }, - { - displayName: 'GPT 4.1 nano', - key: 'gpt-4.1-nano', - }, - { - displayName: 'GPT 4.1 mini', - key: 'gpt-4.1-mini', - }, - { - displayName: 'GPT 4.1', - key: 'gpt-4.1', - }, - { - displayName: 'GPT 5 nano', - key: 'gpt-5-nano', - }, - { - displayName: 'GPT 5', - key: 'gpt-5', - }, - { - displayName: 'GPT 5 Mini', - key: 'gpt-5-mini', - }, - { - displayName: 'o1', - key: 'o1', - }, - { - displayName: 'o3', - key: 'o3', - }, - { - displayName: 'o3 Mini', - key: 'o3-mini', - }, - { - displayName: 'o4 Mini', - key: 'o4-mini', - }, -]; - -const openaiEmbeddingModels: Record[] = [ - { - displayName: 'Text Embedding 3 Small', - key: 'text-embedding-3-small', - }, - { - displayName: 'Text Embedding 3 Large', - key: 'text-embedding-3-large', - }, -]; - -export const loadOpenAIChatModels = async () => { - const openaiApiKey = getOpenaiApiKey(); - - if (!openaiApiKey) return {}; - - try { - const chatModels: Record = {}; - - openaiChatModels.forEach((model) => { - // Models that only support temperature = 1 - const temperatureRestrictedModels = [ - 'gpt-5-nano', - 'gpt-5', - 'gpt-5-mini', - 'o1', - 'o3', - 'o3-mini', - 'o4-mini', - ]; - const isTemperatureRestricted = temperatureRestrictedModels.some( - (restrictedModel) => model.key.includes(restrictedModel), - ); - - const modelConfig: any = { - apiKey: openaiApiKey, - modelName: model.key, - }; - - // Only add temperature if the model supports it - if (!isTemperatureRestricted) { - modelConfig.temperature = 0.7; - } - - chatModels[model.key] = { - displayName: model.displayName, - model: new ChatOpenAI(modelConfig) as unknown as BaseChatModel, - }; - }); - - return chatModels; - } catch (err) { - console.error(`Error loading OpenAI models: ${err}`); - return {}; - } -}; - -export const loadOpenAIEmbeddingModels = async () => { - const openaiApiKey = getOpenaiApiKey(); - - if (!openaiApiKey) return {}; - - try { - const embeddingModels: Record = {}; - - openaiEmbeddingModels.forEach((model) => { - embeddingModels[model.key] = { - displayName: model.displayName, - model: new OpenAIEmbeddings({ - apiKey: openaiApiKey, - modelName: model.key, - }) as unknown as Embeddings, - }; - }); - - return embeddingModels; - } catch (err) { - console.error(`Error loading OpenAI embeddings models: ${err}`); - return {}; - } -}; diff --git a/src/lib/providers/transformers.ts b/src/lib/providers/transformers.ts deleted file mode 100644 index 3098d9f..0000000 --- a/src/lib/providers/transformers.ts +++ /dev/null @@ -1,36 +0,0 @@ -import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; - -export const PROVIDER_INFO = { - key: 'transformers', - displayName: 'Hugging Face', -}; - -export const loadTransformersEmbeddingsModels = async () => { - try { - const embeddingModels = { - 'xenova-bge-small-en-v1.5': { - displayName: 'BGE Small', - model: new HuggingFaceTransformersEmbeddings({ - modelName: 'Xenova/bge-small-en-v1.5', - }), - }, - 'xenova-gte-small': { - displayName: 'GTE Small', - model: new HuggingFaceTransformersEmbeddings({ - modelName: 'Xenova/gte-small', - }), - }, - 'xenova-bert-base-multilingual-uncased': { - displayName: 'Bert Multilingual', - model: new HuggingFaceTransformersEmbeddings({ - modelName: 'Xenova/bert-base-multilingual-uncased', - }), - }, - }; - - return embeddingModels; - } catch (err) { - console.error(`Error loading Transformers embeddings model: ${err}`); - return {}; - } -};