Compare commits

...

1 Commits

Author SHA1 Message Date
ItzCrazyKns
463c8692da feat(providers): add models.json for models list 2025-04-08 16:00:45 +05:30
9 changed files with 326 additions and 278 deletions

1
data/.gitignore vendored
View File

@ -1,2 +1,3 @@
* *
!models.json
!.gitignore !.gitignore

157
data/models.json Normal file
View File

@ -0,0 +1,157 @@
{
"_comment": "Ollama models are fetched from the Ollama API, so they are not included here.",
"chatModels": {
"openai": [
{
"displayName": "GPT-3.5 Turbo",
"key": "gpt-3.5-turbo"
},
{
"displayName": "GPT-4",
"key": "gpt-4"
},
{
"displayName": "GPT-4 Turbo",
"key": "gpt-4-turbo"
},
{
"displayName": "GPT-4 Omni",
"key": "gpt-4o"
},
{
"displayName": "GPT-4 Omni Mini",
"key": "gpt-4o-mini"
}
],
"groq": [
{
"displayName": "Gemma2 9B IT",
"key": "gemma2-9b-it"
},
{
"displayName": "Llama 3.3 70B Versatile",
"key": "llama-3.3-70b-versatile"
},
{
"displayName": "Llama 3.1 8B Instant",
"key": "llama-3.1-8b-instant"
},
{
"displayName": "Llama3 70B 8192",
"key": "llama3-70b-8192"
},
{
"displayName": "Llama3 8B 8192",
"key": "llama3-8b-8192"
},
{
"displayName": "Mixtral 8x7B 32768",
"key": "mixtral-8x7b-32768"
},
{
"displayName": "Qwen QWQ 32B (Preview)",
"key": "qwen-qwq-32b"
},
{
"displayName": "Mistral Saba 24B (Preview)",
"key": "mistral-saba-24b"
},
{
"displayName": "DeepSeek R1 Distill Llama 70B (Preview)",
"key": "deepseek-r1-distill-llama-70b"
}
],
"gemini": [
{
"displayName": "Gemini 2.5 Pro Experimental",
"key": "gemini-2.5-pro-exp-03-25"
},
{
"displayName": "Gemini 2.0 Flash",
"key": "gemini-2.0-flash"
},
{
"displayName": "Gemini 2.0 Flash-Lite",
"key": "gemini-2.0-flash-lite"
},
{
"displayName": "Gemini 2.0 Flash Thinking Experimental",
"key": "gemini-2.0-flash-thinking-exp-01-21"
},
{
"displayName": "Gemini 1.5 Flash",
"key": "gemini-1.5-flash"
},
{
"displayName": "Gemini 1.5 Flash-8B",
"key": "gemini-1.5-flash-8b"
},
{
"displayName": "Gemini 1.5 Pro",
"key": "gemini-1.5-pro"
}
],
"anthropic": [
{
"displayName": "Claude 3.7 Sonnet",
"key": "claude-3-7-sonnet-20250219"
},
{
"displayName": "Claude 3.5 Haiku",
"key": "claude-3-5-haiku-20241022"
},
{
"displayName": "Claude 3.5 Sonnet v2",
"key": "claude-3-5-sonnet-20241022"
},
{
"displayName": "Claude 3.5 Sonnet",
"key": "claude-3-5-sonnet-20240620"
},
{
"displayName": "Claude 3 Opus",
"key": "claude-3-opus-20240229"
},
{
"displayName": "Claude 3 Sonnet",
"key": "claude-3-sonnet-20240229"
},
{
"displayName": "Claude 3 Haiku",
"key": "claude-3-haiku-20240307"
}
]
},
"embeddingModels": {
"openai": [
{
"displayName": "Text Embedding 3 Large",
"key": "text-embedding-3-large"
},
{
"displayName": "Text Embedding 3 Small",
"key": "text-embedding-3-small"
}
],
"gemini": [
{
"displayName": "Gemini Embedding",
"key": "gemini-embedding-exp"
}
],
"transformers": [
{
"displayName": "BGE Small",
"key": "xenova-bge-small-en-v1.5"
},
{
"displayName": "GTE Small",
"key": "xenova-gte-small"
},
{
"displayName": "Bert Multilingual",
"key": "xenova-bert-base-multilingual-uncased"
}
]
}
}

View File

@ -1,48 +1,22 @@
import { ChatAnthropic } from '@langchain/anthropic'; import { ChatAnthropic } from '@langchain/anthropic';
import { ChatModel } from '.'; import { ChatModel, getModelsList, RawModel } from '.';
import { getAnthropicApiKey } from '../config'; import { getAnthropicApiKey } from '../config';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [ const loadModels = () => {
{ return getModelsList()?.['chatModels']['anthropic'] as unknown as RawModel[]
displayName: 'Claude 3.7 Sonnet', }
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
export const loadAnthropicChatModels = async () => { export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey(); const anthropicApiKey = getAnthropicApiKey();
if (!anthropicApiKey) return {}; if (!anthropicApiKey) return {};
const models = loadModels()
try { try {
const chatModels: Record<string, ChatModel> = {}; const chatModels: Record<string, ChatModel> = {};
anthropicChatModels.forEach((model) => { models.forEach((model) => {
chatModels[model.key] = { chatModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new ChatAnthropic({ model: new ChatAnthropic({

View File

@ -3,57 +3,24 @@ import {
GoogleGenerativeAIEmbeddings, GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai'; } from '@langchain/google-genai';
import { getGeminiApiKey } from '../config'; import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel, getModelsList, RawModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [ const loadModels = (modelType: 'chat' | 'embedding') => {
{ return getModelsList()?.[modelType === 'chat' ? 'chatModels' : 'embeddingModels']['gemini'] as unknown as RawModel[]
displayName: 'Gemini 2.5 Pro Experimental', }
key: 'gemini-2.5-pro-exp-03-25',
},
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Flash Thinking Experimental',
key: 'gemini-2.0-flash-thinking-exp-01-21',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Gemini Embedding',
key: 'gemini-embedding-exp',
},
];
export const loadGeminiChatModels = async () => { export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey(); const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {}; if (!geminiApiKey) return {};
const models = loadModels('chat');
try { try {
const chatModels: Record<string, ChatModel> = {}; const chatModels: Record<string, ChatModel> = {};
geminiChatModels.forEach((model) => { models.forEach((model) => {
chatModels[model.key] = { chatModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new ChatGoogleGenerativeAI({ model: new ChatGoogleGenerativeAI({
@ -73,13 +40,14 @@ export const loadGeminiChatModels = async () => {
export const loadGeminiEmbeddingModels = async () => { export const loadGeminiEmbeddingModels = async () => {
const geminiApiKey = getGeminiApiKey(); const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {}; if (!geminiApiKey) return {};
const models = loadModels('embedding');
try { try {
const embeddingModels: Record<string, EmbeddingModel> = {}; const embeddingModels: Record<string, EmbeddingModel> = {};
geminiEmbeddingModels.forEach((model) => { models.forEach((model) => {
embeddingModels[model.key] = { embeddingModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new GoogleGenerativeAIEmbeddings({ model: new GoogleGenerativeAIEmbeddings({

View File

@ -1,88 +1,22 @@
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../config'; import { getGroqApiKey } from '../config';
import { ChatModel } from '.'; import { ChatModel, getModelsList, RawModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [ const loadModels = () => {
{ return getModelsList()?.chatModels['groq'] as unknown as RawModel[]
displayName: 'Gemma2 9B IT', }
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
];
export const loadGroqChatModels = async () => { export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey(); const groqApiKey = getGroqApiKey();
if (!groqApiKey) return {}; if (!groqApiKey) return {};
const models = loadModels()
try { try {
const chatModels: Record<string, ChatModel> = {}; const chatModels: Record<string, ChatModel> = {};
groqChatModels.forEach((model) => { models.forEach((model) => {
chatModels[model.key] = { chatModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new ChatOpenAI({ model: new ChatOpenAI({

View File

@ -1,26 +1,39 @@
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai'; import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai'
import { import {
getCustomOpenaiApiKey, getCustomOpenaiApiKey,
getCustomOpenaiApiUrl, getCustomOpenaiApiUrl,
getCustomOpenaiModelName, getCustomOpenaiModelName,
} from '../config'; } from '../config'
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai'
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama'; import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama'
import { loadGroqChatModels } from './groq'; import { loadGroqChatModels } from './groq'
import { loadAnthropicChatModels } from './anthropic'; import { loadAnthropicChatModels } from './anthropic'
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'
import { loadTransformersEmbeddingsModels } from './transformers'; import { loadTransformersEmbeddingsModels } from './transformers'
import path from 'path'
import fs from 'fs'
export interface ChatModel { export interface ChatModel {
displayName: string; displayName: string
model: BaseChatModel; model: BaseChatModel
} }
export interface EmbeddingModel { export interface EmbeddingModel {
displayName: string; displayName: string
model: Embeddings; model: Embeddings
}
export type RawModel = {
displayName: string
key: string
}
type ModelsList = {
[key in "chatModels" | "embeddingModels"]: {
[key: string]: RawModel[]
}
} }
export const chatModelProviders: Record< export const chatModelProviders: Record<
@ -32,7 +45,7 @@ export const chatModelProviders: Record<
groq: loadGroqChatModels, groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
}; }
export const embeddingModelProviders: Record< export const embeddingModelProviders: Record<
string, string,
@ -42,21 +55,43 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels, ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels, gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels, transformers: loadTransformersEmbeddingsModels,
}; }
export const getModelsList = (): ModelsList | null => {
const modelFile = path.join(process.cwd(), 'data/models.json')
try {
const content = fs.readFileSync(modelFile, 'utf-8')
return JSON.parse(content) as ModelsList
} catch (err) {
console.error(`Error reading models file: ${err}`)
return null
}
}
export const updateModelsList = (models: ModelsList) => {
try {
const modelFile = path.join(process.cwd(), 'data/models.json')
const content = JSON.stringify(models, null, 2)
fs.writeFileSync(modelFile, content, 'utf-8')
} catch(err) {
console.error(`Error updating models file: ${err}`)
}
}
export const getAvailableChatModelProviders = async () => { export const getAvailableChatModelProviders = async () => {
const models: Record<string, Record<string, ChatModel>> = {}; const models: Record<string, Record<string, ChatModel>> = {}
for (const provider in chatModelProviders) { for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider](); const providerModels = await chatModelProviders[provider]()
if (Object.keys(providerModels).length > 0) { if (Object.keys(providerModels).length > 0) {
models[provider] = providerModels; models[provider] = providerModels
} }
} }
const customOpenAiApiKey = getCustomOpenaiApiKey(); const customOpenAiApiKey = getCustomOpenaiApiKey()
const customOpenAiApiUrl = getCustomOpenaiApiUrl(); const customOpenAiApiUrl = getCustomOpenaiApiUrl()
const customOpenAiModelName = getCustomOpenaiModelName(); const customOpenAiModelName = getCustomOpenaiModelName()
models['custom_openai'] = { models['custom_openai'] = {
...(customOpenAiApiKey && customOpenAiApiUrl && customOpenAiModelName ...(customOpenAiApiKey && customOpenAiApiUrl && customOpenAiModelName
@ -74,20 +109,20 @@ export const getAvailableChatModelProviders = async () => {
}, },
} }
: {}), : {}),
}; }
return models; return models
}; }
export const getAvailableEmbeddingModelProviders = async () => { export const getAvailableEmbeddingModelProviders = async () => {
const models: Record<string, Record<string, EmbeddingModel>> = {}; const models: Record<string, Record<string, EmbeddingModel>> = {}
for (const provider in embeddingModelProviders) { for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider](); const providerModels = await embeddingModelProviders[provider]()
if (Object.keys(providerModels).length > 0) { if (Object.keys(providerModels).length > 0) {
models[provider] = providerModels; models[provider] = providerModels
} }
} }
return models; return models
}; }

View File

@ -1,24 +1,39 @@
import axios from 'axios'; import axios from 'axios'
import { getKeepAlive, getOllamaApiEndpoint } from '../config'; import { getKeepAlive, getOllamaApiEndpoint } from '../config'
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel } from '.'
import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { ChatOllama } from '@langchain/community/chat_models/ollama'
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaApiEndpoint) return {};
const loadModels = async (apiURL: string) => {
try { try {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, { const res = await axios.get(`${apiURL}/api/tags`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}); })
const { models } = res.data; if (res.status !== 200) {
console.error(`Failed to load Ollama models: ${res.data}`)
return []
}
const chatModels: Record<string, ChatModel> = {}; const { models } = res.data
return models
} catch (err) {
console.error(`Error loading Ollama models: ${err}`)
return []
}
}
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint()
if (!ollamaApiEndpoint) return {}
const models = await loadModels(ollamaApiEndpoint)
try {
const chatModels: Record<string, ChatModel> = {}
models.forEach((model: any) => { models.forEach((model: any) => {
chatModels[model.model] = { chatModels[model.model] = {
@ -29,31 +44,24 @@ export const loadOllamaChatModels = async () => {
temperature: 0.7, temperature: 0.7,
keepAlive: getKeepAlive(), keepAlive: getKeepAlive(),
}), }),
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Ollama models: ${err}`);
return {};
} }
}; })
return chatModels
} catch (err) {
console.error(`Error loading Ollama models: ${err}`)
return {}
}
}
export const loadOllamaEmbeddingModels = async () => { export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint(); const ollamaApiEndpoint = getOllamaApiEndpoint()
if (!ollamaApiEndpoint) return {}
if (!ollamaApiEndpoint) return {}; const models = await loadModels(ollamaApiEndpoint)
try { try {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, { const embeddingModels: Record<string, EmbeddingModel> = {}
headers: {
'Content-Type': 'application/json',
},
});
const { models } = res.data;
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model: any) => { models.forEach((model: any) => {
embeddingModels[model.model] = { embeddingModels[model.model] = {
@ -62,12 +70,12 @@ export const loadOllamaEmbeddingModels = async () => {
baseUrl: ollamaApiEndpoint, baseUrl: ollamaApiEndpoint,
model: model.model, model: model.model,
}), }),
};
});
return embeddingModels;
} catch (err) {
console.error(`Error loading Ollama embeddings models: ${err}`);
return {};
} }
}; })
return embeddingModels
} catch (err) {
console.error(`Error loading Ollama embeddings models: ${err}`)
return {}
}
}

View File

@ -1,52 +1,23 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../config'; import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.'; import { ChatModel, EmbeddingModel, getModelsList, RawModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [ const loadModels = (modelType: 'chat' | 'embedding') => {
{ return getModelsList()?.[modelType === 'chat' ? 'chatModels' : 'embeddingModels']['openai'] as unknown as RawModel[]
displayName: 'GPT-3.5 Turbo', }
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
export const loadOpenAIChatModels = async () => { export const loadOpenAIChatModels = async () => {
const openaiApiKey = getOpenaiApiKey(); const openaiApiKey = getOpenaiApiKey();
const models = loadModels('chat');
if (!openaiApiKey) return {}; if (!openaiApiKey || !models) return {};
try { try {
const chatModels: Record<string, ChatModel> = {}; const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => { models.forEach((model) => {
chatModels[model.key] = { chatModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new ChatOpenAI({ model: new ChatOpenAI({
@ -66,13 +37,14 @@ export const loadOpenAIChatModels = async () => {
export const loadOpenAIEmbeddingModels = async () => { export const loadOpenAIEmbeddingModels = async () => {
const openaiApiKey = getOpenaiApiKey(); const openaiApiKey = getOpenaiApiKey();
const models = loadModels('embedding');
if (!openaiApiKey) return {}; if (!openaiApiKey || !models) return {};
try { try {
const embeddingModels: Record<string, EmbeddingModel> = {}; const embeddingModels: Record<string, EmbeddingModel> = {};
openaiEmbeddingModels.forEach((model) => { models.forEach((model) => {
embeddingModels[model.key] = { embeddingModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new OpenAIEmbeddings({ model: new OpenAIEmbeddings({

View File

@ -1,31 +1,30 @@
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; import { EmbeddingModel, getModelsList, RawModel } from '.'
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'
const loadModels = () => {
return getModelsList()?.embeddingModels[
'transformers'
] as unknown as RawModel[]
}
export const loadTransformersEmbeddingsModels = async () => { export const loadTransformersEmbeddingsModels = async () => {
try { try {
const embeddingModels = { const models = loadModels()
'xenova-bge-small-en-v1.5': {
displayName: 'BGE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bge-small-en-v1.5',
}),
},
'xenova-gte-small': {
displayName: 'GTE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/gte-small',
}),
},
'xenova-bert-base-multilingual-uncased': {
displayName: 'Bert Multilingual',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bert-base-multilingual-uncased',
}),
},
};
return embeddingModels; const embeddingModels: Record<string, EmbeddingModel> = {}
} catch (err) {
console.error(`Error loading Transformers embeddings model: ${err}`); models.forEach(model => {
return {}; embeddingModels[model.key] = {
displayName: model.displayName,
model: new HuggingFaceTransformersEmbeddings({
modelName: model.key,
}),
} }
}; })
return embeddingModels
} catch (err) {
console.error(`Error loading Transformers embeddings model: ${err}`)
return {}
}
}