mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-11-22 13:08:14 +00:00
Compare commits
5 Commits
4ee3173368
...
b67ca79e2a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b67ca79e2a | ||
|
|
626cb646e2 | ||
|
|
410201b117 | ||
|
|
30fb1e312b | ||
|
|
cc5eea17e4 |
@@ -1,47 +0,0 @@
|
||||
import {
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '@/lib/providers';
|
||||
|
||||
export const GET = async (req: Request) => {
|
||||
try {
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
Object.keys(chatModelProviders).forEach((provider) => {
|
||||
Object.keys(chatModelProviders[provider]).forEach((model) => {
|
||||
delete (chatModelProviders[provider][model] as { model?: unknown })
|
||||
.model;
|
||||
});
|
||||
});
|
||||
|
||||
Object.keys(embeddingModelProviders).forEach((provider) => {
|
||||
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
|
||||
delete (embeddingModelProviders[provider][model] as { model?: unknown })
|
||||
.model;
|
||||
});
|
||||
});
|
||||
|
||||
return Response.json(
|
||||
{
|
||||
chatModelProviders,
|
||||
embeddingModelProviders,
|
||||
},
|
||||
{
|
||||
status: 200,
|
||||
},
|
||||
);
|
||||
} catch (err) {
|
||||
console.error('An error occurred while fetching models', err);
|
||||
return Response.json(
|
||||
{
|
||||
message: 'An error has occurred.',
|
||||
},
|
||||
{
|
||||
status: 500,
|
||||
},
|
||||
);
|
||||
}
|
||||
};
|
||||
28
src/app/api/providers/route.ts
Normal file
28
src/app/api/providers/route.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import ModelRegistry from '@/lib/models/registry';
|
||||
|
||||
export const GET = async (req: Request) => {
|
||||
try {
|
||||
const registry = new ModelRegistry();
|
||||
|
||||
const activeProviders = await registry.getActiveProviders();
|
||||
|
||||
return Response.json(
|
||||
{
|
||||
providers: activeProviders,
|
||||
},
|
||||
{
|
||||
status: 200,
|
||||
},
|
||||
);
|
||||
} catch (err) {
|
||||
console.error('An error occurred while fetching providers', err);
|
||||
return Response.json(
|
||||
{
|
||||
message: 'An error has occurred.',
|
||||
},
|
||||
{
|
||||
status: 500,
|
||||
},
|
||||
);
|
||||
}
|
||||
};
|
||||
@@ -1,158 +0,0 @@
|
||||
import toml from '@iarna/toml';
|
||||
|
||||
// Use dynamic imports for Node.js modules to prevent client-side errors
|
||||
let fs: any;
|
||||
let path: any;
|
||||
if (typeof window === 'undefined') {
|
||||
// We're on the server
|
||||
fs = require('fs');
|
||||
path = require('path');
|
||||
}
|
||||
|
||||
const configFileName = 'config.toml';
|
||||
|
||||
interface Config {
|
||||
GENERAL: {
|
||||
SIMILARITY_MEASURE: string;
|
||||
KEEP_ALIVE: string;
|
||||
};
|
||||
MODELS: {
|
||||
OPENAI: {
|
||||
API_KEY: string;
|
||||
};
|
||||
GROQ: {
|
||||
API_KEY: string;
|
||||
};
|
||||
ANTHROPIC: {
|
||||
API_KEY: string;
|
||||
};
|
||||
GEMINI: {
|
||||
API_KEY: string;
|
||||
};
|
||||
OLLAMA: {
|
||||
API_URL: string;
|
||||
API_KEY: string;
|
||||
};
|
||||
DEEPSEEK: {
|
||||
API_KEY: string;
|
||||
};
|
||||
AIMLAPI: {
|
||||
API_KEY: string;
|
||||
};
|
||||
LM_STUDIO: {
|
||||
API_URL: string;
|
||||
};
|
||||
LEMONADE: {
|
||||
API_URL: string;
|
||||
API_KEY: string;
|
||||
};
|
||||
CUSTOM_OPENAI: {
|
||||
API_URL: string;
|
||||
API_KEY: string;
|
||||
MODEL_NAME: string;
|
||||
};
|
||||
};
|
||||
API_ENDPOINTS: {
|
||||
SEARXNG: string;
|
||||
};
|
||||
}
|
||||
|
||||
type RecursivePartial<T> = {
|
||||
[P in keyof T]?: RecursivePartial<T[P]>;
|
||||
};
|
||||
|
||||
const loadConfig = () => {
|
||||
// Server-side only
|
||||
if (typeof window === 'undefined') {
|
||||
return toml.parse(
|
||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||
) as any as Config;
|
||||
}
|
||||
|
||||
// Client-side fallback - settings will be loaded via API
|
||||
return {} as Config;
|
||||
};
|
||||
|
||||
export const getSimilarityMeasure = () =>
|
||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||
|
||||
export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE;
|
||||
|
||||
export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY;
|
||||
|
||||
export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY;
|
||||
|
||||
export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
|
||||
|
||||
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
|
||||
|
||||
export const getSearxngApiEndpoint = () =>
|
||||
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
|
||||
|
||||
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
|
||||
|
||||
export const getOllamaApiKey = () => loadConfig().MODELS.OLLAMA.API_KEY;
|
||||
|
||||
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
|
||||
|
||||
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
|
||||
|
||||
export const getCustomOpenaiApiKey = () =>
|
||||
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
||||
|
||||
export const getCustomOpenaiApiUrl = () =>
|
||||
loadConfig().MODELS.CUSTOM_OPENAI.API_URL;
|
||||
|
||||
export const getCustomOpenaiModelName = () =>
|
||||
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
||||
|
||||
export const getLMStudioApiEndpoint = () =>
|
||||
loadConfig().MODELS.LM_STUDIO.API_URL;
|
||||
|
||||
export const getLemonadeApiEndpoint = () =>
|
||||
loadConfig().MODELS.LEMONADE.API_URL;
|
||||
|
||||
export const getLemonadeApiKey = () => loadConfig().MODELS.LEMONADE.API_KEY;
|
||||
|
||||
const mergeConfigs = (current: any, update: any): any => {
|
||||
if (update === null || update === undefined) {
|
||||
return current;
|
||||
}
|
||||
|
||||
if (typeof current !== 'object' || current === null) {
|
||||
return update;
|
||||
}
|
||||
|
||||
const result = { ...current };
|
||||
|
||||
for (const key in update) {
|
||||
if (Object.prototype.hasOwnProperty.call(update, key)) {
|
||||
const updateValue = update[key];
|
||||
|
||||
if (
|
||||
typeof updateValue === 'object' &&
|
||||
updateValue !== null &&
|
||||
typeof result[key] === 'object' &&
|
||||
result[key] !== null
|
||||
) {
|
||||
result[key] = mergeConfigs(result[key], updateValue);
|
||||
} else if (updateValue !== undefined) {
|
||||
result[key] = updateValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
// Server-side only
|
||||
if (typeof window === 'undefined') {
|
||||
const currentConfig = loadConfig();
|
||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||
fs.writeFileSync(
|
||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||
toml.stringify(mergedConfig),
|
||||
);
|
||||
}
|
||||
};
|
||||
@@ -10,3 +10,5 @@ export const getConfiguredModelProviderById = (
|
||||
): ConfigModelProvider | undefined => {
|
||||
return getConfiguredModelProviders().find((p) => p.id === id) ?? undefined;
|
||||
};
|
||||
|
||||
export const getSearxngURL = () => configManager.getConfig('search.searxngURL', '')
|
||||
|
||||
@@ -55,6 +55,9 @@ type Config = {
|
||||
[key: string]: any;
|
||||
};
|
||||
modelProviders: ConfigModelProvider[];
|
||||
search: {
|
||||
[key: string]: any
|
||||
}
|
||||
};
|
||||
|
||||
type EnvMap = {
|
||||
@@ -73,6 +76,7 @@ type ModelProviderUISection = {
|
||||
type UIConfigSections = {
|
||||
general: UIConfigField[];
|
||||
modelProviders: ModelProviderUISection[];
|
||||
search: UIConfigField[];
|
||||
};
|
||||
|
||||
export type {
|
||||
|
||||
@@ -20,6 +20,7 @@ import crypto from 'crypto';
|
||||
import { useSearchParams } from 'next/navigation';
|
||||
import { toast } from 'sonner';
|
||||
import { getSuggestions } from '../actions';
|
||||
import { MinimalProvider } from '../models/types';
|
||||
|
||||
export type Section = {
|
||||
userMessage: UserMessage;
|
||||
@@ -66,13 +67,13 @@ export interface File {
|
||||
}
|
||||
|
||||
interface ChatModelProvider {
|
||||
name: string;
|
||||
provider: string;
|
||||
key: string;
|
||||
providerId: string;
|
||||
}
|
||||
|
||||
interface EmbeddingModelProvider {
|
||||
name: string;
|
||||
provider: string;
|
||||
key: string;
|
||||
providerId: string;
|
||||
}
|
||||
|
||||
const checkConfig = async (
|
||||
@@ -82,10 +83,12 @@ const checkConfig = async (
|
||||
setHasError: (hasError: boolean) => void,
|
||||
) => {
|
||||
try {
|
||||
let chatModel = localStorage.getItem('chatModel');
|
||||
let chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||
let embeddingModel = localStorage.getItem('embeddingModel');
|
||||
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
|
||||
let chatModelKey = localStorage.getItem('chatModelKey');
|
||||
let chatModelProviderId = localStorage.getItem('chatModelProviderId');
|
||||
let embeddingModelKey = localStorage.getItem('embeddingModelKey');
|
||||
let embeddingModelProviderId = localStorage.getItem(
|
||||
'embeddingModelProviderId',
|
||||
);
|
||||
|
||||
const autoImageSearch = localStorage.getItem('autoImageSearch');
|
||||
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
|
||||
@@ -98,145 +101,81 @@ const checkConfig = async (
|
||||
localStorage.setItem('autoVideoSearch', 'false');
|
||||
}
|
||||
|
||||
const providers = await fetch(`/api/models`, {
|
||||
const res = await fetch(`/api/providers`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}).then(async (res) => {
|
||||
if (!res.ok)
|
||||
throw new Error(
|
||||
`Failed to fetch models: ${res.status} ${res.statusText}`,
|
||||
);
|
||||
return res.json();
|
||||
});
|
||||
|
||||
if (
|
||||
!chatModel ||
|
||||
!chatModelProvider ||
|
||||
!embeddingModel ||
|
||||
!embeddingModelProvider
|
||||
) {
|
||||
if (!chatModel || !chatModelProvider) {
|
||||
const chatModelProviders = providers.chatModelProviders;
|
||||
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
||||
|
||||
if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
|
||||
return toast.error('No chat models available');
|
||||
} else {
|
||||
chatModelProvider =
|
||||
chatModelProvidersKeys.find(
|
||||
(provider) =>
|
||||
Object.keys(chatModelProviders[provider]).length > 0,
|
||||
) || chatModelProvidersKeys[0];
|
||||
}
|
||||
|
||||
if (
|
||||
chatModelProvider === 'custom_openai' &&
|
||||
Object.keys(chatModelProviders[chatModelProvider]).length === 0
|
||||
) {
|
||||
toast.error(
|
||||
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
|
||||
);
|
||||
return setHasError(true);
|
||||
}
|
||||
|
||||
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
}
|
||||
|
||||
if (!embeddingModel || !embeddingModelProvider) {
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
|
||||
if (
|
||||
!embeddingModelProviders ||
|
||||
Object.keys(embeddingModelProviders).length === 0
|
||||
)
|
||||
return toast.error('No embedding models available');
|
||||
|
||||
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
|
||||
embeddingModel = Object.keys(
|
||||
embeddingModelProviders[embeddingModelProvider],
|
||||
)[0];
|
||||
}
|
||||
|
||||
localStorage.setItem('chatModel', chatModel!);
|
||||
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||
localStorage.setItem('embeddingModel', embeddingModel!);
|
||||
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
|
||||
} else {
|
||||
const chatModelProviders = providers.chatModelProviders;
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
|
||||
if (
|
||||
Object.keys(chatModelProviders).length > 0 &&
|
||||
(!chatModelProviders[chatModelProvider] ||
|
||||
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
|
||||
) {
|
||||
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
||||
chatModelProvider =
|
||||
chatModelProvidersKeys.find(
|
||||
(key) => Object.keys(chatModelProviders[key]).length > 0,
|
||||
) || chatModelProvidersKeys[0];
|
||||
|
||||
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||
}
|
||||
|
||||
if (
|
||||
chatModelProvider &&
|
||||
!chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
if (
|
||||
chatModelProvider === 'custom_openai' &&
|
||||
Object.keys(chatModelProviders[chatModelProvider]).length === 0
|
||||
) {
|
||||
toast.error(
|
||||
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
|
||||
);
|
||||
return setHasError(true);
|
||||
}
|
||||
|
||||
chatModel = Object.keys(
|
||||
chatModelProviders[
|
||||
Object.keys(chatModelProviders[chatModelProvider]).length > 0
|
||||
? chatModelProvider
|
||||
: Object.keys(chatModelProviders)[0]
|
||||
],
|
||||
)[0];
|
||||
|
||||
localStorage.setItem('chatModel', chatModel);
|
||||
}
|
||||
|
||||
if (
|
||||
Object.keys(embeddingModelProviders).length > 0 &&
|
||||
!embeddingModelProviders[embeddingModelProvider]
|
||||
) {
|
||||
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
|
||||
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
|
||||
}
|
||||
|
||||
if (
|
||||
embeddingModelProvider &&
|
||||
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
|
||||
) {
|
||||
embeddingModel = Object.keys(
|
||||
embeddingModelProviders[embeddingModelProvider],
|
||||
)[0];
|
||||
localStorage.setItem('embeddingModel', embeddingModel);
|
||||
}
|
||||
if (!res.ok) {
|
||||
throw new Error(
|
||||
`Provider fetching failed with status code ${res.status}`,
|
||||
);
|
||||
}
|
||||
|
||||
const data = await res.json();
|
||||
const providers: MinimalProvider[] = data.providers;
|
||||
|
||||
if (providers.length === 0) {
|
||||
throw new Error(
|
||||
'No chat model providers found, please configure them in the settings page.',
|
||||
);
|
||||
}
|
||||
|
||||
const chatModelProvider =
|
||||
providers.find((p) => p.id === chatModelProviderId) ??
|
||||
providers.find((p) => p.chatModels.length > 0);
|
||||
|
||||
if (!chatModelProvider) {
|
||||
throw new Error(
|
||||
'No chat models found, pleae configure them in the settings page.',
|
||||
);
|
||||
}
|
||||
|
||||
chatModelProviderId = chatModelProvider.id;
|
||||
|
||||
const chatModel =
|
||||
chatModelProvider.chatModels.find((m) => m.key === chatModelKey) ??
|
||||
chatModelProvider.chatModels[0];
|
||||
chatModelKey = chatModel.key;
|
||||
|
||||
const embeddingModelProvider =
|
||||
providers.find((p) => p.id === embeddingModelProviderId) ??
|
||||
providers.find((p) => p.embeddingModels.length > 0);
|
||||
|
||||
if (!embeddingModelProvider) {
|
||||
throw new Error(
|
||||
'No embedding models found, pleae configure them in the settings page.',
|
||||
);
|
||||
}
|
||||
|
||||
embeddingModelProviderId = embeddingModelProvider.id;
|
||||
|
||||
const embeddingModel =
|
||||
embeddingModelProvider.embeddingModels.find(
|
||||
(m) => m.key === embeddingModelKey,
|
||||
) ?? embeddingModelProvider.embeddingModels[0];
|
||||
embeddingModelKey = embeddingModel.key;
|
||||
|
||||
localStorage.setItem('chatModelKey', chatModelKey);
|
||||
localStorage.setItem('chatModelProviderId', chatModelProviderId);
|
||||
localStorage.setItem('embeddingModelKey', embeddingModelKey);
|
||||
localStorage.setItem('embeddingModelProviderId', embeddingModelProviderId);
|
||||
|
||||
setChatModelProvider({
|
||||
name: chatModel!,
|
||||
provider: chatModelProvider,
|
||||
key: chatModelKey,
|
||||
providerId: chatModelProviderId,
|
||||
});
|
||||
|
||||
setEmbeddingModelProvider({
|
||||
name: embeddingModel!,
|
||||
provider: embeddingModelProvider,
|
||||
key: embeddingModelKey,
|
||||
providerId: embeddingModelProviderId,
|
||||
});
|
||||
|
||||
setIsConfigReady(true);
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
console.error('An error occurred while checking the configuration:', err);
|
||||
toast.error(err.message);
|
||||
setIsConfigReady(false);
|
||||
setHasError(true);
|
||||
}
|
||||
@@ -356,15 +295,15 @@ export const ChatProvider = ({
|
||||
|
||||
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
|
||||
{
|
||||
name: '',
|
||||
provider: '',
|
||||
key: '',
|
||||
providerId: '',
|
||||
},
|
||||
);
|
||||
|
||||
const [embeddingModelProvider, setEmbeddingModelProvider] =
|
||||
useState<EmbeddingModelProvider>({
|
||||
name: '',
|
||||
provider: '',
|
||||
key: '',
|
||||
providerId: '',
|
||||
});
|
||||
|
||||
const [isConfigReady, setIsConfigReady] = useState(false);
|
||||
@@ -742,12 +681,12 @@ export const ChatProvider = ({
|
||||
? chatHistory.slice(0, messageIndex === -1 ? undefined : messageIndex)
|
||||
: chatHistory,
|
||||
chatModel: {
|
||||
name: chatModelProvider.name,
|
||||
provider: chatModelProvider.provider,
|
||||
key: chatModelProvider.key,
|
||||
providerId: chatModelProvider.providerId,
|
||||
},
|
||||
embeddingModel: {
|
||||
name: embeddingModelProvider.name,
|
||||
provider: embeddingModelProvider.provider,
|
||||
key: embeddingModelProvider.key,
|
||||
providerId: embeddingModelProvider.providerId,
|
||||
},
|
||||
systemInstructions: localStorage.getItem('systemInstructions'),
|
||||
}),
|
||||
|
||||
@@ -4,7 +4,7 @@ import BaseModelProvider, {
|
||||
} from './providers/baseProvider';
|
||||
import { getConfiguredModelProviders } from '../config/serverRegistry';
|
||||
import { providers } from './providers';
|
||||
import { ModelList } from './types';
|
||||
import { MinimalProvider, Model } from './types';
|
||||
|
||||
class ModelRegistry {
|
||||
activeProviders: (ConfigModelProvider & {
|
||||
@@ -35,18 +35,23 @@ class ModelRegistry {
|
||||
});
|
||||
}
|
||||
|
||||
async getActiveModels() {
|
||||
const models: ModelList[] = [];
|
||||
async getActiveProviders() {
|
||||
const providers: MinimalProvider[] = [];
|
||||
|
||||
await Promise.all(
|
||||
this.activeProviders.map(async (p) => {
|
||||
const m = await p.provider.getModelList();
|
||||
|
||||
models.push(m);
|
||||
providers.push({
|
||||
id: p.id,
|
||||
name: p.name,
|
||||
chatModels: m.chat,
|
||||
embeddingModels: m.embedding,
|
||||
});
|
||||
}),
|
||||
);
|
||||
|
||||
return models;
|
||||
return providers;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,4 +13,11 @@ type ProviderMetadata = {
|
||||
key: string;
|
||||
};
|
||||
|
||||
export type { Model, ModelList, ProviderMetadata };
|
||||
type MinimalProvider = {
|
||||
id: string;
|
||||
name: string;
|
||||
chatModels: Model[];
|
||||
embeddingModels: Model[];
|
||||
};
|
||||
|
||||
export type { Model, ModelList, ProviderMetadata, MinimalProvider };
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getAimlApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
import axios from 'axios';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'aimlapi',
|
||||
displayName: 'AI/ML API',
|
||||
};
|
||||
|
||||
interface AimlApiModel {
|
||||
id: string;
|
||||
name?: string;
|
||||
type?: string;
|
||||
}
|
||||
|
||||
const API_URL = 'https://api.aimlapi.com';
|
||||
|
||||
export const loadAimlApiChatModels = async () => {
|
||||
const apiKey = getAimlApiKey();
|
||||
|
||||
if (!apiKey) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${API_URL}/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
response.data.data.forEach((model: AimlApiModel) => {
|
||||
if (model.type === 'chat-completion') {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new ChatOpenAI({
|
||||
apiKey: apiKey,
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: API_URL,
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading AI/ML API models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadAimlApiEmbeddingModels = async () => {
|
||||
const apiKey = getAimlApiKey();
|
||||
|
||||
if (!apiKey) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${API_URL}/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
response.data.data.forEach((model: AimlApiModel) => {
|
||||
if (model.type === 'embedding') {
|
||||
embeddingModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
apiKey: apiKey,
|
||||
modelName: model.id,
|
||||
configuration: {
|
||||
baseURL: API_URL,
|
||||
},
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading AI/ML API embeddings models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,78 +0,0 @@
|
||||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import { ChatModel } from '.';
|
||||
import { getAnthropicApiKey } from '../config';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'anthropic',
|
||||
displayName: 'Anthropic',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const anthropicChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Claude 4.1 Opus',
|
||||
key: 'claude-opus-4-1-20250805',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 4 Opus',
|
||||
key: 'claude-opus-4-20250514',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 4 Sonnet',
|
||||
key: 'claude-sonnet-4-20250514',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3.7 Sonnet',
|
||||
key: 'claude-3-7-sonnet-20250219',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3.5 Haiku',
|
||||
key: 'claude-3-5-haiku-20241022',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3.5 Sonnet v2',
|
||||
key: 'claude-3-5-sonnet-20241022',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3.5 Sonnet',
|
||||
key: 'claude-3-5-sonnet-20240620',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3 Opus',
|
||||
key: 'claude-3-opus-20240229',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3 Sonnet',
|
||||
key: 'claude-3-sonnet-20240229',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3 Haiku',
|
||||
key: 'claude-3-haiku-20240307',
|
||||
},
|
||||
];
|
||||
|
||||
export const loadAnthropicChatModels = async () => {
|
||||
const anthropicApiKey = getAnthropicApiKey();
|
||||
|
||||
if (!anthropicApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
anthropicChatModels.forEach((model) => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatAnthropic({
|
||||
apiKey: anthropicApiKey,
|
||||
modelName: model.key,
|
||||
temperature: 0.7,
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Anthropic models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,49 +0,0 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { getDeepseekApiKey } from '../config';
|
||||
import { ChatModel } from '.';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'deepseek',
|
||||
displayName: 'Deepseek AI',
|
||||
};
|
||||
|
||||
const deepseekChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Deepseek Chat (Deepseek V3)',
|
||||
key: 'deepseek-chat',
|
||||
},
|
||||
{
|
||||
displayName: 'Deepseek Reasoner (Deepseek R1)',
|
||||
key: 'deepseek-reasoner',
|
||||
},
|
||||
];
|
||||
|
||||
export const loadDeepseekChatModels = async () => {
|
||||
const deepseekApiKey = getDeepseekApiKey();
|
||||
|
||||
if (!deepseekApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
deepseekChatModels.forEach((model) => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatOpenAI({
|
||||
apiKey: deepseekApiKey,
|
||||
modelName: model.key,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: 'https://api.deepseek.com',
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Deepseek models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,114 +0,0 @@
|
||||
import {
|
||||
ChatGoogleGenerativeAI,
|
||||
GoogleGenerativeAIEmbeddings,
|
||||
} from '@langchain/google-genai';
|
||||
import { getGeminiApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'gemini',
|
||||
displayName: 'Google Gemini',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
const geminiChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Gemini 2.5 Flash',
|
||||
key: 'gemini-2.5-flash',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 2.5 Flash-Lite',
|
||||
key: 'gemini-2.5-flash-lite',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 2.5 Pro',
|
||||
key: 'gemini-2.5-pro',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 2.0 Flash',
|
||||
key: 'gemini-2.0-flash',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 2.0 Flash-Lite',
|
||||
key: 'gemini-2.0-flash-lite',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 2.0 Flash Thinking Experimental',
|
||||
key: 'gemini-2.0-flash-thinking-exp-01-21',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 1.5 Flash',
|
||||
key: 'gemini-1.5-flash',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 1.5 Flash-8B',
|
||||
key: 'gemini-1.5-flash-8b',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 1.5 Pro',
|
||||
key: 'gemini-1.5-pro',
|
||||
},
|
||||
];
|
||||
|
||||
const geminiEmbeddingModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Text Embedding 004',
|
||||
key: 'models/text-embedding-004',
|
||||
},
|
||||
{
|
||||
displayName: 'Embedding 001',
|
||||
key: 'models/embedding-001',
|
||||
},
|
||||
];
|
||||
|
||||
export const loadGeminiChatModels = async () => {
|
||||
const geminiApiKey = getGeminiApiKey();
|
||||
|
||||
if (!geminiApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
geminiChatModels.forEach((model) => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatGoogleGenerativeAI({
|
||||
apiKey: geminiApiKey,
|
||||
model: model.key,
|
||||
temperature: 0.7,
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Gemini models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadGeminiEmbeddingModels = async () => {
|
||||
const geminiApiKey = getGeminiApiKey();
|
||||
|
||||
if (!geminiApiKey) return {};
|
||||
|
||||
try {
|
||||
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
geminiEmbeddingModels.forEach((model) => {
|
||||
embeddingModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new GoogleGenerativeAIEmbeddings({
|
||||
apiKey: geminiApiKey,
|
||||
modelName: model.key,
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
});
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Gemini embeddings models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,44 +0,0 @@
|
||||
import { ChatGroq } from '@langchain/groq';
|
||||
import { getGroqApiKey } from '../config';
|
||||
import { ChatModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'groq',
|
||||
displayName: 'Groq',
|
||||
};
|
||||
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
export const loadGroqChatModels = async () => {
|
||||
const groqApiKey = getGroqApiKey();
|
||||
if (!groqApiKey) return {};
|
||||
|
||||
try {
|
||||
const res = await fetch('https://api.groq.com/openai/v1/models', {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `bearer ${groqApiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const groqChatModels = (await res.json()).data;
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
groqChatModels.forEach((model: any) => {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.id,
|
||||
model: new ChatGroq({
|
||||
apiKey: groqApiKey,
|
||||
model: model.id,
|
||||
temperature: 0.7,
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Groq models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,170 +0,0 @@
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import {
|
||||
loadOpenAIChatModels,
|
||||
loadOpenAIEmbeddingModels,
|
||||
PROVIDER_INFO as OpenAIInfo,
|
||||
PROVIDER_INFO,
|
||||
} from './openai';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import {
|
||||
loadOllamaChatModels,
|
||||
loadOllamaEmbeddingModels,
|
||||
PROVIDER_INFO as OllamaInfo,
|
||||
} from './ollama';
|
||||
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
|
||||
import {
|
||||
loadAnthropicChatModels,
|
||||
PROVIDER_INFO as AnthropicInfo,
|
||||
} from './anthropic';
|
||||
import {
|
||||
loadGeminiChatModels,
|
||||
loadGeminiEmbeddingModels,
|
||||
PROVIDER_INFO as GeminiInfo,
|
||||
} from './gemini';
|
||||
import {
|
||||
loadTransformersEmbeddingsModels,
|
||||
PROVIDER_INFO as TransformersInfo,
|
||||
} from './transformers';
|
||||
import {
|
||||
loadDeepseekChatModels,
|
||||
PROVIDER_INFO as DeepseekInfo,
|
||||
} from './deepseek';
|
||||
import {
|
||||
loadAimlApiChatModels,
|
||||
loadAimlApiEmbeddingModels,
|
||||
PROVIDER_INFO as AimlApiInfo,
|
||||
} from './aimlapi';
|
||||
import {
|
||||
loadLMStudioChatModels,
|
||||
loadLMStudioEmbeddingsModels,
|
||||
PROVIDER_INFO as LMStudioInfo,
|
||||
} from './lmstudio';
|
||||
import {
|
||||
loadLemonadeChatModels,
|
||||
loadLemonadeEmbeddingModels,
|
||||
PROVIDER_INFO as LemonadeInfo,
|
||||
} from './lemonade';
|
||||
|
||||
export const PROVIDER_METADATA = {
|
||||
openai: OpenAIInfo,
|
||||
ollama: OllamaInfo,
|
||||
groq: GroqInfo,
|
||||
anthropic: AnthropicInfo,
|
||||
gemini: GeminiInfo,
|
||||
transformers: TransformersInfo,
|
||||
deepseek: DeepseekInfo,
|
||||
aimlapi: AimlApiInfo,
|
||||
lmstudio: LMStudioInfo,
|
||||
lemonade: LemonadeInfo,
|
||||
custom_openai: {
|
||||
key: 'custom_openai',
|
||||
displayName: 'Custom OpenAI',
|
||||
},
|
||||
};
|
||||
|
||||
export interface ChatModel {
|
||||
displayName: string;
|
||||
model: BaseChatModel;
|
||||
}
|
||||
|
||||
export interface EmbeddingModel {
|
||||
displayName: string;
|
||||
model: Embeddings;
|
||||
}
|
||||
|
||||
export const chatModelProviders: Record<
|
||||
string,
|
||||
() => Promise<Record<string, ChatModel>>
|
||||
> = {
|
||||
openai: loadOpenAIChatModels,
|
||||
ollama: loadOllamaChatModels,
|
||||
groq: loadGroqChatModels,
|
||||
anthropic: loadAnthropicChatModels,
|
||||
gemini: loadGeminiChatModels,
|
||||
deepseek: loadDeepseekChatModels,
|
||||
aimlapi: loadAimlApiChatModels,
|
||||
lmstudio: loadLMStudioChatModels,
|
||||
lemonade: loadLemonadeChatModels,
|
||||
};
|
||||
|
||||
export const embeddingModelProviders: Record<
|
||||
string,
|
||||
() => Promise<Record<string, EmbeddingModel>>
|
||||
> = {
|
||||
openai: loadOpenAIEmbeddingModels,
|
||||
ollama: loadOllamaEmbeddingModels,
|
||||
gemini: loadGeminiEmbeddingModels,
|
||||
transformers: loadTransformersEmbeddingsModels,
|
||||
aimlapi: loadAimlApiEmbeddingModels,
|
||||
lmstudio: loadLMStudioEmbeddingsModels,
|
||||
lemonade: loadLemonadeEmbeddingModels,
|
||||
};
|
||||
|
||||
export const getAvailableChatModelProviders = async () => {
|
||||
const models: Record<string, Record<string, ChatModel>> = {};
|
||||
|
||||
for (const provider in chatModelProviders) {
|
||||
const providerModels = await chatModelProviders[provider]();
|
||||
if (Object.keys(providerModels).length > 0) {
|
||||
models[provider] = providerModels;
|
||||
}
|
||||
}
|
||||
|
||||
const customOpenAiApiKey = getCustomOpenaiApiKey();
|
||||
const customOpenAiApiUrl = getCustomOpenaiApiUrl();
|
||||
const customOpenAiModelName = getCustomOpenaiModelName();
|
||||
|
||||
models['custom_openai'] = {
|
||||
...(customOpenAiApiKey && customOpenAiApiUrl && customOpenAiModelName
|
||||
? {
|
||||
[customOpenAiModelName]: {
|
||||
displayName: customOpenAiModelName,
|
||||
model: new ChatOpenAI({
|
||||
apiKey: customOpenAiApiKey,
|
||||
modelName: customOpenAiModelName,
|
||||
...(() => {
|
||||
const temperatureRestrictedModels = [
|
||||
'gpt-5-nano',
|
||||
'gpt-5',
|
||||
'gpt-5-mini',
|
||||
'o1',
|
||||
'o3',
|
||||
'o3-mini',
|
||||
'o4-mini',
|
||||
];
|
||||
const isTemperatureRestricted =
|
||||
temperatureRestrictedModels.some((restrictedModel) =>
|
||||
customOpenAiModelName.includes(restrictedModel),
|
||||
);
|
||||
return isTemperatureRestricted ? {} : { temperature: 0.7 };
|
||||
})(),
|
||||
configuration: {
|
||||
baseURL: customOpenAiApiUrl,
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
};
|
||||
|
||||
return models;
|
||||
};
|
||||
|
||||
export const getAvailableEmbeddingModelProviders = async () => {
|
||||
const models: Record<string, Record<string, EmbeddingModel>> = {};
|
||||
|
||||
for (const provider in embeddingModelProviders) {
|
||||
const providerModels = await embeddingModelProviders[provider]();
|
||||
if (Object.keys(providerModels).length > 0) {
|
||||
models[provider] = providerModels;
|
||||
}
|
||||
}
|
||||
|
||||
return models;
|
||||
};
|
||||
@@ -1,94 +0,0 @@
|
||||
import axios from 'axios';
|
||||
import { getLemonadeApiEndpoint, getLemonadeApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'lemonade',
|
||||
displayName: 'Lemonade',
|
||||
};
|
||||
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||
|
||||
export const loadLemonadeChatModels = async () => {
|
||||
const lemonadeApiEndpoint = getLemonadeApiEndpoint();
|
||||
const lemonadeApiKey = getLemonadeApiKey();
|
||||
|
||||
if (!lemonadeApiEndpoint) return {};
|
||||
|
||||
try {
|
||||
const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(lemonadeApiKey
|
||||
? { Authorization: `Bearer ${lemonadeApiKey}` }
|
||||
: {}),
|
||||
},
|
||||
});
|
||||
|
||||
const { data: models } = res.data;
|
||||
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
models.forEach((model: any) => {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.id,
|
||||
model: new ChatOpenAI({
|
||||
apiKey: lemonadeApiKey || 'lemonade-key',
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: `${lemonadeApiEndpoint}/api/v1`,
|
||||
},
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Lemonade models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLemonadeEmbeddingModels = async () => {
|
||||
const lemonadeApiEndpoint = getLemonadeApiEndpoint();
|
||||
const lemonadeApiKey = getLemonadeApiKey();
|
||||
|
||||
if (!lemonadeApiEndpoint) return {};
|
||||
|
||||
try {
|
||||
const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(lemonadeApiKey
|
||||
? { Authorization: `Bearer ${lemonadeApiKey}` }
|
||||
: {}),
|
||||
},
|
||||
});
|
||||
|
||||
const { data: models } = res.data;
|
||||
|
||||
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
// Filter models that support embeddings (if Lemonade provides this info)
|
||||
// For now, we'll assume all models can be used for embeddings
|
||||
models.forEach((model: any) => {
|
||||
embeddingModels[model.id] = {
|
||||
displayName: model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
apiKey: lemonadeApiKey || 'lemonade-key',
|
||||
modelName: model.id,
|
||||
configuration: {
|
||||
baseURL: `${lemonadeApiEndpoint}/api/v1`,
|
||||
},
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Lemonade embedding models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,100 +0,0 @@
|
||||
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
|
||||
import axios from 'axios';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'lmstudio',
|
||||
displayName: 'LM Studio',
|
||||
};
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
interface LMStudioModel {
|
||||
id: string;
|
||||
name?: string;
|
||||
}
|
||||
|
||||
const ensureV1Endpoint = (endpoint: string): string =>
|
||||
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
||||
|
||||
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
|
||||
try {
|
||||
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioChatModels = async () => {
|
||||
const endpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!endpoint) return {};
|
||||
if (!(await checkServerAvailability(endpoint))) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
response.data.data.forEach((model: LMStudioModel) => {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new ChatOpenAI({
|
||||
apiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
streaming: true,
|
||||
maxRetries: 3,
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading LM Studio models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioEmbeddingsModels = async () => {
|
||||
const endpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!endpoint) return {};
|
||||
if (!(await checkServerAvailability(endpoint))) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
const embeddingsModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
response.data.data.forEach((model: LMStudioModel) => {
|
||||
embeddingsModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
apiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
modelName: model.id,
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
});
|
||||
|
||||
return embeddingsModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,86 +0,0 @@
|
||||
import axios from 'axios';
|
||||
import { getKeepAlive, getOllamaApiEndpoint, getOllamaApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'ollama',
|
||||
displayName: 'Ollama',
|
||||
};
|
||||
import { ChatOllama } from '@langchain/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/ollama';
|
||||
|
||||
export const loadOllamaChatModels = async () => {
|
||||
const ollamaApiEndpoint = getOllamaApiEndpoint();
|
||||
const ollamaApiKey = getOllamaApiKey();
|
||||
|
||||
if (!ollamaApiEndpoint) return {};
|
||||
|
||||
try {
|
||||
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const { models } = res.data;
|
||||
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
models.forEach((model: any) => {
|
||||
chatModels[model.model] = {
|
||||
displayName: model.name,
|
||||
model: new ChatOllama({
|
||||
baseUrl: ollamaApiEndpoint,
|
||||
model: model.model,
|
||||
temperature: 0.7,
|
||||
keepAlive: getKeepAlive(),
|
||||
...(ollamaApiKey
|
||||
? { headers: { Authorization: `Bearer ${ollamaApiKey}` } }
|
||||
: {}),
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Ollama models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadOllamaEmbeddingModels = async () => {
|
||||
const ollamaApiEndpoint = getOllamaApiEndpoint();
|
||||
const ollamaApiKey = getOllamaApiKey();
|
||||
|
||||
if (!ollamaApiEndpoint) return {};
|
||||
|
||||
try {
|
||||
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const { models } = res.data;
|
||||
|
||||
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
models.forEach((model: any) => {
|
||||
embeddingModels[model.model] = {
|
||||
displayName: model.name,
|
||||
model: new OllamaEmbeddings({
|
||||
baseUrl: ollamaApiEndpoint,
|
||||
model: model.model,
|
||||
...(ollamaApiKey
|
||||
? { headers: { Authorization: `Bearer ${ollamaApiKey}` } }
|
||||
: {}),
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Ollama embeddings models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,159 +0,0 @@
|
||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getOpenaiApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'openai',
|
||||
displayName: 'OpenAI',
|
||||
};
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
const openaiChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo',
|
||||
key: 'gpt-3.5-turbo',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4',
|
||||
key: 'gpt-4',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4 turbo',
|
||||
key: 'gpt-4-turbo',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4 omni',
|
||||
key: 'gpt-4o',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4o (2024-05-13)',
|
||||
key: 'gpt-4o-2024-05-13',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4 omni mini',
|
||||
key: 'gpt-4o-mini',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1 nano',
|
||||
key: 'gpt-4.1-nano',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1 mini',
|
||||
key: 'gpt-4.1-mini',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 4.1',
|
||||
key: 'gpt-4.1',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 5 nano',
|
||||
key: 'gpt-5-nano',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 5',
|
||||
key: 'gpt-5',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT 5 Mini',
|
||||
key: 'gpt-5-mini',
|
||||
},
|
||||
{
|
||||
displayName: 'o1',
|
||||
key: 'o1',
|
||||
},
|
||||
{
|
||||
displayName: 'o3',
|
||||
key: 'o3',
|
||||
},
|
||||
{
|
||||
displayName: 'o3 Mini',
|
||||
key: 'o3-mini',
|
||||
},
|
||||
{
|
||||
displayName: 'o4 Mini',
|
||||
key: 'o4-mini',
|
||||
},
|
||||
];
|
||||
|
||||
const openaiEmbeddingModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Text Embedding 3 Small',
|
||||
key: 'text-embedding-3-small',
|
||||
},
|
||||
{
|
||||
displayName: 'Text Embedding 3 Large',
|
||||
key: 'text-embedding-3-large',
|
||||
},
|
||||
];
|
||||
|
||||
export const loadOpenAIChatModels = async () => {
|
||||
const openaiApiKey = getOpenaiApiKey();
|
||||
|
||||
if (!openaiApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
openaiChatModels.forEach((model) => {
|
||||
// Models that only support temperature = 1
|
||||
const temperatureRestrictedModels = [
|
||||
'gpt-5-nano',
|
||||
'gpt-5',
|
||||
'gpt-5-mini',
|
||||
'o1',
|
||||
'o3',
|
||||
'o3-mini',
|
||||
'o4-mini',
|
||||
];
|
||||
const isTemperatureRestricted = temperatureRestrictedModels.some(
|
||||
(restrictedModel) => model.key.includes(restrictedModel),
|
||||
);
|
||||
|
||||
const modelConfig: any = {
|
||||
apiKey: openaiApiKey,
|
||||
modelName: model.key,
|
||||
};
|
||||
|
||||
// Only add temperature if the model supports it
|
||||
if (!isTemperatureRestricted) {
|
||||
modelConfig.temperature = 0.7;
|
||||
}
|
||||
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatOpenAI(modelConfig) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading OpenAI models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadOpenAIEmbeddingModels = async () => {
|
||||
const openaiApiKey = getOpenaiApiKey();
|
||||
|
||||
if (!openaiApiKey) return {};
|
||||
|
||||
try {
|
||||
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
openaiEmbeddingModels.forEach((model) => {
|
||||
embeddingModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new OpenAIEmbeddings({
|
||||
apiKey: openaiApiKey,
|
||||
modelName: model.key,
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
});
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading OpenAI embeddings models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,36 +0,0 @@
|
||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
||||
|
||||
export const PROVIDER_INFO = {
|
||||
key: 'transformers',
|
||||
displayName: 'Hugging Face',
|
||||
};
|
||||
|
||||
export const loadTransformersEmbeddingsModels = async () => {
|
||||
try {
|
||||
const embeddingModels = {
|
||||
'xenova-bge-small-en-v1.5': {
|
||||
displayName: 'BGE Small',
|
||||
model: new HuggingFaceTransformersEmbeddings({
|
||||
modelName: 'Xenova/bge-small-en-v1.5',
|
||||
}),
|
||||
},
|
||||
'xenova-gte-small': {
|
||||
displayName: 'GTE Small',
|
||||
model: new HuggingFaceTransformersEmbeddings({
|
||||
modelName: 'Xenova/gte-small',
|
||||
}),
|
||||
},
|
||||
'xenova-bert-base-multilingual-uncased': {
|
||||
displayName: 'Bert Multilingual',
|
||||
model: new HuggingFaceTransformersEmbeddings({
|
||||
modelName: 'Xenova/bert-base-multilingual-uncased',
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Transformers embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
@@ -1,5 +1,5 @@
|
||||
import axios from 'axios';
|
||||
import { getSearxngApiEndpoint } from './config';
|
||||
import { getSearxngURL } from './config/serverRegistry';
|
||||
|
||||
interface SearxngSearchOptions {
|
||||
categories?: string[];
|
||||
@@ -23,7 +23,7 @@ export const searchSearxng = async (
|
||||
query: string,
|
||||
opts?: SearxngSearchOptions,
|
||||
) => {
|
||||
const searxngURL = getSearxngApiEndpoint();
|
||||
const searxngURL = getSearxngURL();
|
||||
|
||||
const url = new URL(`${searxngURL}/search?format=json`);
|
||||
url.searchParams.append('q', query);
|
||||
|
||||
Reference in New Issue
Block a user