mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-11-25 14:38:14 +00:00
160 lines
3.3 KiB
TypeScript
160 lines
3.3 KiB
TypeScript
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
|
import { getOpenaiApiKey } from '../config';
|
|
import { ChatModel, EmbeddingModel } from '.';
|
|
|
|
export const PROVIDER_INFO = {
|
|
key: 'openai',
|
|
displayName: 'OpenAI',
|
|
};
|
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
import { Embeddings } from '@langchain/core/embeddings';
|
|
|
|
const openaiChatModels: Record<string, string>[] = [
|
|
{
|
|
displayName: 'GPT-3.5 Turbo',
|
|
key: 'gpt-3.5-turbo',
|
|
},
|
|
{
|
|
displayName: 'GPT-4',
|
|
key: 'gpt-4',
|
|
},
|
|
{
|
|
displayName: 'GPT-4 turbo',
|
|
key: 'gpt-4-turbo',
|
|
},
|
|
{
|
|
displayName: 'GPT-4 omni',
|
|
key: 'gpt-4o',
|
|
},
|
|
{
|
|
displayName: 'GPT-4o (2024-05-13)',
|
|
key: 'gpt-4o-2024-05-13',
|
|
},
|
|
{
|
|
displayName: 'GPT-4 omni mini',
|
|
key: 'gpt-4o-mini',
|
|
},
|
|
{
|
|
displayName: 'GPT 4.1 nano',
|
|
key: 'gpt-4.1-nano',
|
|
},
|
|
{
|
|
displayName: 'GPT 4.1 mini',
|
|
key: 'gpt-4.1-mini',
|
|
},
|
|
{
|
|
displayName: 'GPT 4.1',
|
|
key: 'gpt-4.1',
|
|
},
|
|
{
|
|
displayName: 'GPT 5 nano',
|
|
key: 'gpt-5-nano',
|
|
},
|
|
{
|
|
displayName: 'GPT 5',
|
|
key: 'gpt-5',
|
|
},
|
|
{
|
|
displayName: 'GPT 5 Mini',
|
|
key: 'gpt-5-mini',
|
|
},
|
|
{
|
|
displayName: 'o1',
|
|
key: 'o1',
|
|
},
|
|
{
|
|
displayName: 'o3',
|
|
key: 'o3',
|
|
},
|
|
{
|
|
displayName: 'o3 Mini',
|
|
key: 'o3-mini',
|
|
},
|
|
{
|
|
displayName: 'o4 Mini',
|
|
key: 'o4-mini',
|
|
},
|
|
];
|
|
|
|
const openaiEmbeddingModels: Record<string, string>[] = [
|
|
{
|
|
displayName: 'Text Embedding 3 Small',
|
|
key: 'text-embedding-3-small',
|
|
},
|
|
{
|
|
displayName: 'Text Embedding 3 Large',
|
|
key: 'text-embedding-3-large',
|
|
},
|
|
];
|
|
|
|
export const loadOpenAIChatModels = async () => {
|
|
const openaiApiKey = getOpenaiApiKey();
|
|
|
|
if (!openaiApiKey) return {};
|
|
|
|
try {
|
|
const chatModels: Record<string, ChatModel> = {};
|
|
|
|
openaiChatModels.forEach((model) => {
|
|
// Models that only support temperature = 1
|
|
const temperatureRestrictedModels = [
|
|
'gpt-5-nano',
|
|
'gpt-5',
|
|
'gpt-5-mini',
|
|
'o1',
|
|
'o3',
|
|
'o3-mini',
|
|
'o4-mini',
|
|
];
|
|
const isTemperatureRestricted = temperatureRestrictedModels.some(
|
|
(restrictedModel) => model.key.includes(restrictedModel),
|
|
);
|
|
|
|
const modelConfig: any = {
|
|
apiKey: openaiApiKey,
|
|
modelName: model.key,
|
|
};
|
|
|
|
// Only add temperature if the model supports it
|
|
if (!isTemperatureRestricted) {
|
|
modelConfig.temperature = 0.7;
|
|
}
|
|
|
|
chatModels[model.key] = {
|
|
displayName: model.displayName,
|
|
model: new ChatOpenAI(modelConfig) as unknown as BaseChatModel,
|
|
};
|
|
});
|
|
|
|
return chatModels;
|
|
} catch (err) {
|
|
console.error(`Error loading OpenAI models: ${err}`);
|
|
return {};
|
|
}
|
|
};
|
|
|
|
export const loadOpenAIEmbeddingModels = async () => {
|
|
const openaiApiKey = getOpenaiApiKey();
|
|
|
|
if (!openaiApiKey) return {};
|
|
|
|
try {
|
|
const embeddingModels: Record<string, EmbeddingModel> = {};
|
|
|
|
openaiEmbeddingModels.forEach((model) => {
|
|
embeddingModels[model.key] = {
|
|
displayName: model.displayName,
|
|
model: new OpenAIEmbeddings({
|
|
apiKey: openaiApiKey,
|
|
modelName: model.key,
|
|
}) as unknown as Embeddings,
|
|
};
|
|
});
|
|
|
|
return embeddingModels;
|
|
} catch (err) {
|
|
console.error(`Error loading OpenAI embeddings models: ${err}`);
|
|
return {};
|
|
}
|
|
};
|