diff --git a/ui/lib/providers/anthropic.ts b/ui/lib/providers/anthropic.ts new file mode 100644 index 0000000..894fda5 --- /dev/null +++ b/ui/lib/providers/anthropic.ts @@ -0,0 +1,63 @@ +import { ChatOpenAI } from '@langchain/openai'; +import { ChatModel } from '.'; +import { getAnthropicApiKey } from '../config'; + +const anthropicChatModels: Record[] = [ + { + displayName: 'Claude 3.7 Sonnet', + key: 'claude-3-7-sonnet-20250219', + }, + { + displayName: 'Claude 3.5 Haiku', + key: 'claude-3-5-haiku-20241022', + }, + { + displayName: 'Claude 3.5 Sonnet v2', + key: 'claude-3-5-sonnet-20241022', + }, + { + displayName: 'Claude 3.5 Sonnet', + key: 'claude-3-5-sonnet-20240620', + }, + { + displayName: 'Claude 3 Opus', + key: 'claude-3-opus-20240229', + }, + { + displayName: 'Claude 3 Sonnet', + key: 'claude-3-sonnet-20240229', + }, + { + displayName: 'Claude 3 Haiku', + key: 'claude-3-haiku-20240307', + }, +]; + +export const loadAnthropicChatModels = async () => { + const anthropicApiKey = getAnthropicApiKey(); + + if (!anthropicApiKey) return {}; + + try { + const chatModels: Record = {}; + + anthropicChatModels.forEach((model) => { + chatModels[model.key] = { + displayName: model.displayName, + model: new ChatOpenAI({ + openAIApiKey: anthropicApiKey, + modelName: model.key, + temperature: 0.7, + configuration: { + baseURL: 'https://api.anthropic.com/v1/', + }, + }), + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading Anthropic models: ${err}`); + return {}; + } +}; diff --git a/ui/lib/providers/gemini.ts b/ui/lib/providers/gemini.ts new file mode 100644 index 0000000..28a31cf --- /dev/null +++ b/ui/lib/providers/gemini.ts @@ -0,0 +1,94 @@ +import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; +import { getGeminiApiKey } from '../config'; +import { ChatModel, EmbeddingModel } from '.'; + +const geminiChatModels: Record[] = [ + { + displayName: 'Gemini 2.0 Flash', + key: 'gemini-2.0-flash', + }, + { + displayName: 'Gemini 2.0 Flash-Lite', + key: 'gemini-2.0-flash-lite', + }, + { + displayName: 'Gemini 2.0 Pro Experimental', + key: 'gemini-2.0-pro-exp-02-05', + }, + { + displayName: 'Gemini 1.5 Flash', + key: 'gemini-1.5-flash', + }, + { + displayName: 'Gemini 1.5 Flash-8B', + key: 'gemini-1.5-flash-8b', + }, + { + displayName: 'Gemini 1.5 Pro', + key: 'gemini-1.5-pro', + }, +]; + +const geminiEmbeddingModels: Record[] = [ + { + displayName: 'Gemini Embedding', + key: 'gemini-embedding-exp', + }, +]; + +export const loadGeminiChatModels = async () => { + const geminiApiKey = getGeminiApiKey(); + + if (!geminiApiKey) return {}; + + try { + const chatModels: Record = {}; + + geminiChatModels.forEach((model) => { + chatModels[model.key] = { + displayName: model.displayName, + model: new ChatOpenAI({ + openAIApiKey: geminiApiKey, + modelName: model.key, + temperature: 0.7, + configuration: { + baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/', + }, + }), + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading Gemini models: ${err}`); + return {}; + } +}; + +export const loadGeminiEmbeddingModels = async () => { + const geminiApiKey = getGeminiApiKey(); + + if (!geminiApiKey) return {}; + + try { + const embeddingModels: Record = {}; + + geminiEmbeddingModels.forEach((model) => { + embeddingModels[model.key] = { + displayName: model.displayName, + model: new OpenAIEmbeddings({ + openAIApiKey: geminiApiKey, + modelName: model.key, + configuration: { + baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/', + }, + }), + }; + }); + + return embeddingModels; + } catch (err) { + console.error(`Error loading OpenAI embeddings models: ${err}`); + return {}; + } +}; diff --git a/ui/lib/providers/groq.ts b/ui/lib/providers/groq.ts new file mode 100644 index 0000000..05947fb --- /dev/null +++ b/ui/lib/providers/groq.ts @@ -0,0 +1,107 @@ +import { ChatOpenAI } from '@langchain/openai'; +import { getGroqApiKey } from '../config'; +import { ChatModel } from '.'; + +const groqChatModels: Record[] = [ + { + displayName: 'Gemma2 9B IT', + key: 'gemma2-9b-it', + }, + { + displayName: 'Llama 3.3 70B Versatile', + key: 'llama-3.3-70b-versatile', + }, + { + displayName: 'Llama 3.1 8B Instant', + key: 'llama-3.1-8b-instant', + }, + { + displayName: 'Llama3 70B 8192', + key: 'llama3-70b-8192', + }, + { + displayName: 'Llama3 8B 8192', + key: 'llama3-8b-8192', + }, + { + displayName: 'Mixtral 8x7B 32768', + key: 'mixtral-8x7b-32768', + }, + { + displayName: 'Qwen QWQ 32B (Preview)', + key: 'qwen-qwq-32b', + }, + { + displayName: 'Mistral Saba 24B (Preview)', + key: 'mistral-saba-24b', + }, + { + displayName: 'Qwen 2.5 Coder 32B (Preview)', + key: 'qwen-2.5-coder-32b', + }, + { + displayName: 'Qwen 2.5 32B (Preview)', + key: 'qwen-2.5-32b', + }, + { + displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)', + key: 'deepseek-r1-distill-qwen-32b', + }, + { + displayName: 'DeepSeek R1 Distill Llama 70B SpecDec (Preview)', + key: 'deepseek-r1-distill-llama-70b-specdec', + }, + { + displayName: 'DeepSeek R1 Distill Llama 70B (Preview)', + key: 'deepseek-r1-distill-llama-70b', + }, + { + displayName: 'Llama 3.3 70B SpecDec (Preview)', + key: 'llama-3.3-70b-specdec', + }, + { + displayName: 'Llama 3.2 1B Preview (Preview)', + key: 'llama-3.2-1b-preview', + }, + { + displayName: 'Llama 3.2 3B Preview (Preview)', + key: 'llama-3.2-3b-preview', + }, + { + displayName: 'Llama 3.2 11B Vision Preview (Preview)', + key: 'llama-3.2-11b-vision-preview', + }, + { + displayName: 'Llama 3.2 90B Vision Preview (Preview)', + key: 'llama-3.2-90b-vision-preview', + }, +]; + +export const loadGroqChatModels = async () => { + const groqApiKey = getGroqApiKey(); + + if (!groqApiKey) return {}; + + try { + const chatModels: Record = {}; + + groqChatModels.forEach((model) => { + chatModels[model.key] = { + displayName: model.displayName, + model: new ChatOpenAI({ + openAIApiKey: groqApiKey, + modelName: model.key, + temperature: 0.7, + configuration: { + baseURL: 'https://api.groq.com/openai/v1', + }, + }), + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading Groq models: ${err}`); + return {}; + } +}; diff --git a/ui/lib/providers/index.ts b/ui/lib/providers/index.ts index cbb1677..caa8074 100644 --- a/ui/lib/providers/index.ts +++ b/ui/lib/providers/index.ts @@ -1,72 +1,91 @@ import { Embeddings } from '@langchain/core/embeddings'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai'; -import { getCustomOpenaiApiKey, getCustomOpenaiApiUrl, getCustomOpenaiModelName } from '../config'; +import { + getCustomOpenaiApiKey, + getCustomOpenaiApiUrl, + getCustomOpenaiModelName, +} from '../config'; import { ChatOpenAI } from '@langchain/openai'; +import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama'; +import { loadGroqChatModels } from './groq'; +import { loadAnthropicChatModels } from './anthropic'; +import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; -export interface ChatModelProvider { - displayName: string - model: BaseChatModel +export interface ChatModel { + displayName: string; + model: BaseChatModel; } -export interface EmbeddingModelProvider { - displayName: string - model: Embeddings +export interface EmbeddingModel { + displayName: string; + model: Embeddings; } -const chatModelProviders: Record Promise>> = { - openai: loadOpenAIChatModels -} +const chatModelProviders: Record< + string, + () => Promise> +> = { + openai: loadOpenAIChatModels, + ollama: loadOllamaChatModels, + groq: loadGroqChatModels, + anthropic: loadAnthropicChatModels, + gemini: loadGeminiChatModels +}; -const embeddingModelProviders: Record Promise>> = { - openai: loadOpenAIEmbeddingModels -} +const embeddingModelProviders: Record< + string, + () => Promise> +> = { + openai: loadOpenAIEmbeddingModels, + ollama: loadOllamaEmbeddingModels, + gemini: loadGeminiEmbeddingModels +}; export const getAvailableChatModelProviders = async () => { - const models: Record> = {}; - - for (const provider in chatModelProviders) { - const providerModels = await chatModelProviders[provider](); - if (Object.keys(providerModels).length > 0) { - models[provider] = providerModels; - } + const models: Record> = {}; + + for (const provider in chatModelProviders) { + const providerModels = await chatModelProviders[provider](); + if (Object.keys(providerModels).length > 0) { + models[provider] = providerModels; } - - const customOpenAiApiKey = getCustomOpenaiApiKey(); - const customOpenAiApiUrl = getCustomOpenaiApiUrl(); - const customOpenAiModelName = getCustomOpenaiModelName(); - - models['custom_openai'] = { - ...(customOpenAiApiKey && customOpenAiApiUrl && customOpenAiModelName - ? { - [customOpenAiModelName]: { - displayName: customOpenAiModelName, - model: new ChatOpenAI({ - openAIApiKey: customOpenAiApiKey, - modelName: customOpenAiModelName, - temperature: 0.7, - configuration: { - baseURL: customOpenAiApiUrl, - }, - }), - }, - } - : {}), - }; - - return models; + } + + const customOpenAiApiKey = getCustomOpenaiApiKey(); + const customOpenAiApiUrl = getCustomOpenaiApiUrl(); + const customOpenAiModelName = getCustomOpenaiModelName(); + + models['custom_openai'] = { + ...(customOpenAiApiKey && customOpenAiApiUrl && customOpenAiModelName + ? { + [customOpenAiModelName]: { + displayName: customOpenAiModelName, + model: new ChatOpenAI({ + openAIApiKey: customOpenAiApiKey, + modelName: customOpenAiModelName, + temperature: 0.7, + configuration: { + baseURL: customOpenAiApiUrl, + }, + }), + }, + } + : {}), + }; + + return models; }; export const getAvailableEmbeddingModelProviders = async () => { - const models: Record> = {}; - - for (const provider in embeddingModelProviders) { - const providerModels = await embeddingModelProviders[provider](); - if (Object.keys(providerModels).length > 0) { - models[provider] = providerModels; - } + const models: Record> = {}; + + for (const provider in embeddingModelProviders) { + const providerModels = await embeddingModelProviders[provider](); + if (Object.keys(providerModels).length > 0) { + models[provider] = providerModels; } - - return models; - }; - \ No newline at end of file + } + + return models; +}; diff --git a/ui/lib/providers/ollama.ts b/ui/lib/providers/ollama.ts new file mode 100644 index 0000000..92e98e4 --- /dev/null +++ b/ui/lib/providers/ollama.ts @@ -0,0 +1,73 @@ +import axios from 'axios'; +import { getKeepAlive, getOllamaApiEndpoint } from '../config'; +import { ChatModel, EmbeddingModel } from '.'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; +import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; + +export const loadOllamaChatModels = async () => { + const ollamaApiEndpoint = getOllamaApiEndpoint(); + + if (!ollamaApiEndpoint) return {}; + + try { + const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, { + headers: { + 'Content-Type': 'application/json', + }, + }); + + const { models } = res.data; + + const chatModels: Record = {}; + + models.forEach((model: any) => { + chatModels[model.model] = { + displayName: model.name, + model: new ChatOllama({ + baseUrl: ollamaApiEndpoint, + model: model.model, + temperature: 0.7, + keepAlive: getKeepAlive(), + }), + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading Ollama models: ${err}`); + return {}; + } +}; + +export const loadOllamaEmbeddingModels = async () => { + const ollamaApiEndpoint = getOllamaApiEndpoint(); + + if (!ollamaApiEndpoint) return {}; + + try { + const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, { + headers: { + 'Content-Type': 'application/json', + }, + }); + + const { models } = res.data; + + const embeddingModels: Record = {}; + + models.forEach((model: any) => { + embeddingModels[model.model] = { + displayName: model.name, + model: new OllamaEmbeddings({ + baseUrl: ollamaApiEndpoint, + model: model.model, + }), + }; + }); + + return embeddingModels; + } catch (err) { + console.error(`Error loading Ollama embeddings models: ${err}`); + return {}; + } +};