From 042ce33cf45462993c76ce5d85a24b1fa7d4175f Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Mon, 20 Oct 2025 16:35:44 +0530 Subject: [PATCH] feat(providers): add rest of the providers --- src/lib/models/providers/aiml.ts | 152 +++++++++++++++++++++++++ src/lib/models/providers/anthropic.ts | 115 +++++++++++++++++++ src/lib/models/providers/deepseek.ts | 107 +++++++++++++++++ src/lib/models/providers/gemini.ts | 140 +++++++++++++++++++++++ src/lib/models/providers/groq.ts | 118 +++++++++++++++++++ src/lib/models/providers/index.ts | 14 +++ src/lib/models/providers/lemonade.ts | 158 ++++++++++++++++++++++++++ src/lib/models/providers/lmstudio.ts | 148 ++++++++++++++++++++++++ 8 files changed, 952 insertions(+) create mode 100644 src/lib/models/providers/aiml.ts create mode 100644 src/lib/models/providers/anthropic.ts create mode 100644 src/lib/models/providers/deepseek.ts create mode 100644 src/lib/models/providers/gemini.ts create mode 100644 src/lib/models/providers/groq.ts create mode 100644 src/lib/models/providers/lemonade.ts create mode 100644 src/lib/models/providers/lmstudio.ts diff --git a/src/lib/models/providers/aiml.ts b/src/lib/models/providers/aiml.ts new file mode 100644 index 0000000..35ccf79 --- /dev/null +++ b/src/lib/models/providers/aiml.ts @@ -0,0 +1,152 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Model, ModelList, ProviderMetadata } from '../types'; +import BaseModelProvider from './baseProvider'; +import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; +import { Embeddings } from '@langchain/core/embeddings'; +import { UIConfigField } from '@/lib/config/types'; +import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry'; + +interface AimlConfig { + apiKey: string; +} + +const providerConfigFields: UIConfigField[] = [ + { + type: 'password', + name: 'API Key', + key: 'apiKey', + description: 'Your AI/ML API key', + required: true, + placeholder: 'AI/ML API Key', + env: 'AIML_API_KEY', + scope: 'server', + }, +]; + +class AimlProvider extends BaseModelProvider { + constructor(id: string, name: string, config: AimlConfig) { + super(id, name, config); + } + + async getDefaultModels(): Promise { + try { + const res = await fetch('https://api.aimlapi.com/models', { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.config.apiKey}`, + }, + }); + + const data = await res.json(); + + const chatModels: Model[] = data.data + .filter((m: any) => m.type === 'chat-completion') + .map((m: any) => { + return { + name: m.id, + key: m.id, + }; + }); + + const embeddingModels: Model[] = data.data + .filter((m: any) => m.type === 'embedding') + .map((m: any) => { + return { + name: m.id, + key: m.id, + }; + }); + + return { + embedding: embeddingModels, + chat: chatModels, + }; + } catch (err) { + if (err instanceof TypeError) { + throw new Error( + 'Error connecting to AI/ML API. Please ensure your API key is correct and the service is available.', + ); + } + + throw err; + } + } + + async getModelList(): Promise { + const defaultModels = await this.getDefaultModels(); + const configProvider = getConfiguredModelProviderById(this.id)!; + + return { + embedding: [ + ...defaultModels.embedding, + ...configProvider.embeddingModels, + ], + chat: [...defaultModels.chat, ...configProvider.chatModels], + }; + } + + async loadChatModel(key: string): Promise { + const modelList = await this.getModelList(); + + const exists = modelList.chat.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading AI/ML API Chat Model. Invalid Model Selected', + ); + } + + return new ChatOpenAI({ + apiKey: this.config.apiKey, + temperature: 0.7, + model: key, + configuration: { + baseURL: 'https://api.aimlapi.com', + }, + }); + } + + async loadEmbeddingModel(key: string): Promise { + const modelList = await this.getModelList(); + const exists = modelList.embedding.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading AI/ML API Embedding Model. Invalid Model Selected.', + ); + } + + return new OpenAIEmbeddings({ + apiKey: this.config.apiKey, + model: key, + configuration: { + baseURL: 'https://api.aimlapi.com', + }, + }); + } + + static parseAndValidate(raw: any): AimlConfig { + if (!raw || typeof raw !== 'object') + throw new Error('Invalid config provided. Expected object'); + if (!raw.apiKey) + throw new Error('Invalid config provided. API key must be provided'); + + return { + apiKey: String(raw.apiKey), + }; + } + + static getProviderConfigFields(): UIConfigField[] { + return providerConfigFields; + } + + static getProviderMetadata(): ProviderMetadata { + return { + key: 'aiml', + name: 'AI/ML API', + }; + } +} + +export default AimlProvider; diff --git a/src/lib/models/providers/anthropic.ts b/src/lib/models/providers/anthropic.ts new file mode 100644 index 0000000..e071159 --- /dev/null +++ b/src/lib/models/providers/anthropic.ts @@ -0,0 +1,115 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Model, ModelList, ProviderMetadata } from '../types'; +import BaseModelProvider from './baseProvider'; +import { ChatAnthropic } from '@langchain/anthropic'; +import { Embeddings } from '@langchain/core/embeddings'; +import { UIConfigField } from '@/lib/config/types'; +import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry'; + +interface AnthropicConfig { + apiKey: string; +} + +const providerConfigFields: UIConfigField[] = [ + { + type: 'password', + name: 'API Key', + key: 'apiKey', + description: 'Your Anthropic API key', + required: true, + placeholder: 'Anthropic API Key', + env: 'ANTHROPIC_API_KEY', + scope: 'server', + }, +]; + +class AnthropicProvider extends BaseModelProvider { + constructor(id: string, name: string, config: AnthropicConfig) { + super(id, name, config); + } + + async getDefaultModels(): Promise { + const res = await fetch('https://api.anthropic.com/v1/models?limit=999', { + method: 'GET', + headers: { + 'x-api-key': this.config.apiKey, + 'anthropic-version': '2023-06-01', + 'Content-type': 'application/json', + }, + }); + + if (!res.ok) { + throw new Error(`Failed to fetch Anthropic models: ${res.statusText}`); + } + + const data = (await res.json()).data; + + const models: Model[] = data.map((m: any) => { + return { + key: m.id, + name: m.display_name, + }; + }); + + return { + embedding: [], + chat: models, + }; + } + + async getModelList(): Promise { + const defaultModels = await this.getDefaultModels(); + const configProvider = getConfiguredModelProviderById(this.id)!; + + return { + embedding: [], + chat: [...defaultModels.chat, ...configProvider.chatModels], + }; + } + + async loadChatModel(key: string): Promise { + const modelList = await this.getModelList(); + + const exists = modelList.chat.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading Anthropic Chat Model. Invalid Model Selected', + ); + } + + return new ChatAnthropic({ + apiKey: this.config.apiKey, + temperature: 0.7, + model: key, + }); + } + + async loadEmbeddingModel(key: string): Promise { + throw new Error('Anthropic provider does not support embedding models.'); + } + + static parseAndValidate(raw: any): AnthropicConfig { + if (!raw || typeof raw !== 'object') + throw new Error('Invalid config provided. Expected object'); + if (!raw.apiKey) + throw new Error('Invalid config provided. API key must be provided'); + + return { + apiKey: String(raw.apiKey), + }; + } + + static getProviderConfigFields(): UIConfigField[] { + return providerConfigFields; + } + + static getProviderMetadata(): ProviderMetadata { + return { + key: 'anthropic', + name: 'Anthropic', + }; + } +} + +export default AnthropicProvider; diff --git a/src/lib/models/providers/deepseek.ts b/src/lib/models/providers/deepseek.ts new file mode 100644 index 0000000..9b29d83 --- /dev/null +++ b/src/lib/models/providers/deepseek.ts @@ -0,0 +1,107 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Model, ModelList, ProviderMetadata } from '../types'; +import BaseModelProvider from './baseProvider'; +import { ChatOpenAI } from '@langchain/openai'; +import { Embeddings } from '@langchain/core/embeddings'; +import { UIConfigField } from '@/lib/config/types'; +import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry'; + +interface DeepSeekConfig { + apiKey: string; +} + +const defaultChatModels: Model[] = [ + { + name: 'Deepseek Chat / DeepSeek V3.2 Exp', + key: 'deepseek-chat', + }, + { + name: 'Deepseek Reasoner / DeepSeek V3.2 Exp', + key: 'deepseek-reasoner', + }, +]; + +const providerConfigFields: UIConfigField[] = [ + { + type: 'password', + name: 'API Key', + key: 'apiKey', + description: 'Your DeepSeek API key', + required: true, + placeholder: 'DeepSeek API Key', + env: 'DEEPSEEK_API_KEY', + scope: 'server', + }, +]; + +class DeepSeekProvider extends BaseModelProvider { + constructor(id: string, name: string, config: DeepSeekConfig) { + super(id, name, config); + } + + async getDefaultModels(): Promise { + return { + embedding: [], + chat: defaultChatModels, + }; + } + + async getModelList(): Promise { + const defaultModels = await this.getDefaultModels(); + const configProvider = getConfiguredModelProviderById(this.id)!; + + return { + embedding: [], + chat: [...defaultModels.chat, ...configProvider.chatModels], + }; + } + + async loadChatModel(key: string): Promise { + const modelList = await this.getModelList(); + + const exists = modelList.chat.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading DeepSeek Chat Model. Invalid Model Selected', + ); + } + + return new ChatOpenAI({ + apiKey: this.config.apiKey, + temperature: 0.7, + model: key, + configuration: { + baseURL: 'https://api.deepseek.com', + }, + }); + } + + async loadEmbeddingModel(key: string): Promise { + throw new Error('DeepSeek provider does not support embedding models.'); + } + + static parseAndValidate(raw: any): DeepSeekConfig { + if (!raw || typeof raw !== 'object') + throw new Error('Invalid config provided. Expected object'); + if (!raw.apiKey) + throw new Error('Invalid config provided. API key must be provided'); + + return { + apiKey: String(raw.apiKey), + }; + } + + static getProviderConfigFields(): UIConfigField[] { + return providerConfigFields; + } + + static getProviderMetadata(): ProviderMetadata { + return { + key: 'deepseek', + name: 'Deepseek AI', + }; + } +} + +export default DeepSeekProvider; diff --git a/src/lib/models/providers/gemini.ts b/src/lib/models/providers/gemini.ts new file mode 100644 index 0000000..6cf3584 --- /dev/null +++ b/src/lib/models/providers/gemini.ts @@ -0,0 +1,140 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Model, ModelList, ProviderMetadata } from '../types'; +import BaseModelProvider from './baseProvider'; +import { + ChatGoogleGenerativeAI, + GoogleGenerativeAIEmbeddings, +} from '@langchain/google-genai'; +import { Embeddings } from '@langchain/core/embeddings'; +import { UIConfigField } from '@/lib/config/types'; +import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry'; + +interface GeminiConfig { + apiKey: string; +} + +const providerConfigFields: UIConfigField[] = [ + { + type: 'password', + name: 'API Key', + key: 'apiKey', + description: 'Your Google Gemini API key', + required: true, + placeholder: 'Google Gemini API Key', + env: 'GEMINI_API_KEY', + scope: 'server', + }, +]; + +class GeminiProvider extends BaseModelProvider { + constructor(id: string, name: string, config: GeminiConfig) { + super(id, name, config); + } + + async getDefaultModels(): Promise { + const res = await fetch( + `https://generativelanguage.googleapis.com/v1beta/models?key=${this.config.apiKey}`, + { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }, + ); + + const data = await res.json(); + + let defaultEmbeddingModels: Model[] = []; + let defaultChatModels: Model[] = []; + + data.models.forEach((m: any) => { + if (m.supportedGenerationMethods.includes('embedText')) { + defaultEmbeddingModels.push({ + key: m.name, + name: m.displayName, + }); + } else if (m.supportedGenerationMethods.includes('generateContent')) { + defaultChatModels.push({ + key: m.name, + name: m.displayName, + }); + } + }); + + return { + embedding: defaultEmbeddingModels, + chat: defaultChatModels, + }; + } + + async getModelList(): Promise { + const defaultModels = await this.getDefaultModels(); + const configProvider = getConfiguredModelProviderById(this.id)!; + + return { + embedding: [ + ...defaultModels.embedding, + ...configProvider.embeddingModels, + ], + chat: [...defaultModels.chat, ...configProvider.chatModels], + }; + } + + async loadChatModel(key: string): Promise { + const modelList = await this.getModelList(); + + const exists = modelList.chat.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading Gemini Chat Model. Invalid Model Selected', + ); + } + + return new ChatGoogleGenerativeAI({ + apiKey: this.config.apiKey, + temperature: 0.7, + model: key, + }); + } + + async loadEmbeddingModel(key: string): Promise { + const modelList = await this.getModelList(); + const exists = modelList.embedding.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading Gemini Embedding Model. Invalid Model Selected.', + ); + } + + return new GoogleGenerativeAIEmbeddings({ + apiKey: this.config.apiKey, + model: key, + }); + } + + static parseAndValidate(raw: any): GeminiConfig { + if (!raw || typeof raw !== 'object') + throw new Error('Invalid config provided. Expected object'); + if (!raw.apiKey) + throw new Error('Invalid config provided. API key must be provided'); + + return { + apiKey: String(raw.apiKey), + }; + } + + static getProviderConfigFields(): UIConfigField[] { + return providerConfigFields; + } + + static getProviderMetadata(): ProviderMetadata { + return { + key: 'gemini', + name: 'Google Gemini', + }; + } +} + +export default GeminiProvider; diff --git a/src/lib/models/providers/groq.ts b/src/lib/models/providers/groq.ts new file mode 100644 index 0000000..a87ea88 --- /dev/null +++ b/src/lib/models/providers/groq.ts @@ -0,0 +1,118 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Model, ModelList, ProviderMetadata } from '../types'; +import BaseModelProvider from './baseProvider'; +import { ChatGroq } from '@langchain/groq'; +import { Embeddings } from '@langchain/core/embeddings'; +import { UIConfigField } from '@/lib/config/types'; +import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry'; + +interface GroqConfig { + apiKey: string; +} + +const providerConfigFields: UIConfigField[] = [ + { + type: 'password', + name: 'API Key', + key: 'apiKey', + description: 'Your Groq API key', + required: true, + placeholder: 'Groq API Key', + env: 'GROQ_API_KEY', + scope: 'server', + }, +]; + +class GroqProvider extends BaseModelProvider { + constructor(id: string, name: string, config: GroqConfig) { + super(id, name, config); + } + + async getDefaultModels(): Promise { + try { + const res = await fetch('https://api.groq.com/openai/v1/models', { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.config.apiKey}`, + }, + }); + + const data = await res.json(); + + const models: Model[] = data.data.map((m: any) => { + return { + name: m.id, + key: m.id, + }; + }); + + return { + embedding: [], + chat: models, + }; + } catch (err) { + if (err instanceof TypeError) { + throw new Error( + 'Error connecting to Groq API. Please ensure your API key is correct and the Groq service is available.', + ); + } + + throw err; + } + } + + async getModelList(): Promise { + const defaultModels = await this.getDefaultModels(); + const configProvider = getConfiguredModelProviderById(this.id)!; + + return { + embedding: [], + chat: [...defaultModels.chat, ...configProvider.chatModels], + }; + } + + async loadChatModel(key: string): Promise { + const modelList = await this.getModelList(); + + const exists = modelList.chat.find((m) => m.key === key); + + if (!exists) { + throw new Error('Error Loading Groq Chat Model. Invalid Model Selected'); + } + + return new ChatGroq({ + apiKey: this.config.apiKey, + temperature: 0.7, + model: key, + }); + } + + async loadEmbeddingModel(key: string): Promise { + throw new Error('Groq provider does not support embedding models.'); + } + + static parseAndValidate(raw: any): GroqConfig { + if (!raw || typeof raw !== 'object') + throw new Error('Invalid config provided. Expected object'); + if (!raw.apiKey) + throw new Error('Invalid config provided. API key must be provided'); + + return { + apiKey: String(raw.apiKey), + }; + } + + static getProviderConfigFields(): UIConfigField[] { + return providerConfigFields; + } + + static getProviderMetadata(): ProviderMetadata { + return { + key: 'groq', + name: 'Groq', + }; + } +} + +export default GroqProvider; diff --git a/src/lib/models/providers/index.ts b/src/lib/models/providers/index.ts index e6b9b9b..addca61 100644 --- a/src/lib/models/providers/index.ts +++ b/src/lib/models/providers/index.ts @@ -3,11 +3,25 @@ import { ProviderConstructor } from './baseProvider'; import OpenAIProvider from './openai'; import OllamaProvider from './ollama'; import TransformersProvider from './transformers'; +import AnthropicProvider from './anthropic'; +import GeminiProvider from './gemini'; +import GroqProvider from './groq'; +import DeepSeekProvider from './deepseek'; +import LMStudioProvider from './lmstudio'; +import LemonadeProvider from './lemonade'; +import AimlProvider from '@/lib/models/providers/aiml'; export const providers: Record> = { openai: OpenAIProvider, ollama: OllamaProvider, transformers: TransformersProvider, + anthropic: AnthropicProvider, + gemini: GeminiProvider, + groq: GroqProvider, + deepseek: DeepSeekProvider, + aiml: AimlProvider, + lmstudio: LMStudioProvider, + lemonade: LemonadeProvider, }; export const getModelProvidersUIConfigSection = diff --git a/src/lib/models/providers/lemonade.ts b/src/lib/models/providers/lemonade.ts new file mode 100644 index 0000000..20680a8 --- /dev/null +++ b/src/lib/models/providers/lemonade.ts @@ -0,0 +1,158 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Model, ModelList, ProviderMetadata } from '../types'; +import BaseModelProvider from './baseProvider'; +import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; +import { Embeddings } from '@langchain/core/embeddings'; +import { UIConfigField } from '@/lib/config/types'; +import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry'; + +interface LemonadeConfig { + baseURL: string; + apiKey?: string; +} + +const providerConfigFields: UIConfigField[] = [ + { + type: 'string', + name: 'Base URL', + key: 'baseURL', + description: 'The base URL for Lemonade API', + required: true, + placeholder: 'https://api.lemonade.ai/v1', + env: 'LEMONADE_BASE_URL', + scope: 'server', + }, + { + type: 'password', + name: 'API Key', + key: 'apiKey', + description: 'Your Lemonade API key (optional)', + required: false, + placeholder: 'Lemonade API Key', + env: 'LEMONADE_API_KEY', + scope: 'server', + }, +]; + +class LemonadeProvider extends BaseModelProvider { + constructor(id: string, name: string, config: LemonadeConfig) { + super(id, name, config); + } + + async getDefaultModels(): Promise { + try { + const headers: Record = { + 'Content-Type': 'application/json', + }; + + if (this.config.apiKey) { + headers['Authorization'] = `Bearer ${this.config.apiKey}`; + } + + const res = await fetch(`${this.config.baseURL}/models`, { + method: 'GET', + headers, + }); + + const data = await res.json(); + + const models: Model[] = data.data.map((m: any) => { + return { + name: m.id, + key: m.id, + }; + }); + + return { + embedding: models, + chat: models, + }; + } catch (err) { + if (err instanceof TypeError) { + throw new Error( + 'Error connecting to Lemonade API. Please ensure the base URL is correct and the service is available.', + ); + } + + throw err; + } + } + + async getModelList(): Promise { + const defaultModels = await this.getDefaultModels(); + const configProvider = getConfiguredModelProviderById(this.id)!; + + return { + embedding: [ + ...defaultModels.embedding, + ...configProvider.embeddingModels, + ], + chat: [...defaultModels.chat, ...configProvider.chatModels], + }; + } + + async loadChatModel(key: string): Promise { + const modelList = await this.getModelList(); + + const exists = modelList.chat.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading Lemonade Chat Model. Invalid Model Selected', + ); + } + + return new ChatOpenAI({ + apiKey: this.config.apiKey || 'not-needed', + temperature: 0.7, + model: key, + configuration: { + baseURL: this.config.baseURL, + }, + }); + } + + async loadEmbeddingModel(key: string): Promise { + const modelList = await this.getModelList(); + const exists = modelList.embedding.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading Lemonade Embedding Model. Invalid Model Selected.', + ); + } + + return new OpenAIEmbeddings({ + apiKey: this.config.apiKey || 'not-needed', + model: key, + configuration: { + baseURL: this.config.baseURL, + }, + }); + } + + static parseAndValidate(raw: any): LemonadeConfig { + if (!raw || typeof raw !== 'object') + throw new Error('Invalid config provided. Expected object'); + if (!raw.baseURL) + throw new Error('Invalid config provided. Base URL must be provided'); + + return { + baseURL: String(raw.baseURL), + apiKey: raw.apiKey ? String(raw.apiKey) : undefined, + }; + } + + static getProviderConfigFields(): UIConfigField[] { + return providerConfigFields; + } + + static getProviderMetadata(): ProviderMetadata { + return { + key: 'lemonade', + name: 'Lemonade', + }; + } +} + +export default LemonadeProvider; diff --git a/src/lib/models/providers/lmstudio.ts b/src/lib/models/providers/lmstudio.ts new file mode 100644 index 0000000..3a73a34 --- /dev/null +++ b/src/lib/models/providers/lmstudio.ts @@ -0,0 +1,148 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Model, ModelList, ProviderMetadata } from '../types'; +import BaseModelProvider from './baseProvider'; +import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; +import { Embeddings } from '@langchain/core/embeddings'; +import { UIConfigField } from '@/lib/config/types'; +import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry'; + +interface LMStudioConfig { + baseURL: string; +} + +const providerConfigFields: UIConfigField[] = [ + { + type: 'string', + name: 'Base URL', + key: 'baseURL', + description: 'The base URL for LM Studio server', + required: true, + placeholder: 'http://localhost:1234', + env: 'LM_STUDIO_BASE_URL', + scope: 'server', + }, +]; + +class LMStudioProvider extends BaseModelProvider { + constructor(id: string, name: string, config: LMStudioConfig) { + super(id, name, config); + } + + private normalizeBaseURL(url: string): string { + const trimmed = url.trim().replace(/\/+$/, ''); + return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`; + } + + async getDefaultModels(): Promise { + try { + const baseURL = this.normalizeBaseURL(this.config.baseURL); + + const res = await fetch(`${baseURL}/models`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }); + + const data = await res.json(); + + const models: Model[] = data.data.map((m: any) => { + return { + name: m.id, + key: m.id, + }; + }); + + return { + embedding: models, + chat: models, + }; + } catch (err) { + if (err instanceof TypeError) { + throw new Error( + 'Error connecting to LM Studio. Please ensure the base URL is correct and the LM Studio server is running.', + ); + } + + throw err; + } + } + + async getModelList(): Promise { + const defaultModels = await this.getDefaultModels(); + const configProvider = getConfiguredModelProviderById(this.id)!; + + return { + embedding: [ + ...defaultModels.embedding, + ...configProvider.embeddingModels, + ], + chat: [...defaultModels.chat, ...configProvider.chatModels], + }; + } + + async loadChatModel(key: string): Promise { + const modelList = await this.getModelList(); + + const exists = modelList.chat.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading LM Studio Chat Model. Invalid Model Selected', + ); + } + + return new ChatOpenAI({ + apiKey: 'lm-studio', + temperature: 0.7, + model: key, + streaming: true, + configuration: { + baseURL: this.normalizeBaseURL(this.config.baseURL), + }, + }); + } + + async loadEmbeddingModel(key: string): Promise { + const modelList = await this.getModelList(); + const exists = modelList.embedding.find((m) => m.key === key); + + if (!exists) { + throw new Error( + 'Error Loading LM Studio Embedding Model. Invalid Model Selected.', + ); + } + + return new OpenAIEmbeddings({ + apiKey: 'lm-studio', + model: key, + configuration: { + baseURL: this.normalizeBaseURL(this.config.baseURL), + }, + }); + } + + static parseAndValidate(raw: any): LMStudioConfig { + if (!raw || typeof raw !== 'object') + throw new Error('Invalid config provided. Expected object'); + if (!raw.baseURL) + throw new Error('Invalid config provided. Base URL must be provided'); + + return { + baseURL: String(raw.baseURL), + }; + } + + static getProviderConfigFields(): UIConfigField[] { + return providerConfigFields; + } + + static getProviderMetadata(): ProviderMetadata { + return { + key: 'lmstudio', + name: 'LM Studio', + }; + } +} + +export default LMStudioProvider;