mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-15 05:51:33 +00:00
Introduces support for the AI/ML API provider, including configuration options, chat and embedding model loading, and UI integration. Updates documentation and sample config to reflect the new provider.
125 lines
3.4 KiB
TypeScript
125 lines
3.4 KiB
TypeScript
import {
|
|
getAnthropicApiKey,
|
|
getCustomOpenaiApiKey,
|
|
getCustomOpenaiApiUrl,
|
|
getCustomOpenaiModelName,
|
|
getGeminiApiKey,
|
|
getGroqApiKey,
|
|
getOllamaApiEndpoint,
|
|
getOpenaiApiKey,
|
|
getDeepseekApiKey,
|
|
getAimlApiKey,
|
|
getLMStudioApiEndpoint,
|
|
updateConfig,
|
|
} from '@/lib/config';
|
|
import {
|
|
getAvailableChatModelProviders,
|
|
getAvailableEmbeddingModelProviders,
|
|
} from '@/lib/providers';
|
|
|
|
export const GET = async (req: Request) => {
|
|
try {
|
|
const config: Record<string, any> = {};
|
|
|
|
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
|
getAvailableChatModelProviders(),
|
|
getAvailableEmbeddingModelProviders(),
|
|
]);
|
|
|
|
config['chatModelProviders'] = {};
|
|
config['embeddingModelProviders'] = {};
|
|
|
|
for (const provider in chatModelProviders) {
|
|
config['chatModelProviders'][provider] = Object.keys(
|
|
chatModelProviders[provider],
|
|
).map((model) => {
|
|
return {
|
|
name: model,
|
|
displayName: chatModelProviders[provider][model].displayName,
|
|
};
|
|
});
|
|
}
|
|
|
|
for (const provider in embeddingModelProviders) {
|
|
config['embeddingModelProviders'][provider] = Object.keys(
|
|
embeddingModelProviders[provider],
|
|
).map((model) => {
|
|
return {
|
|
name: model,
|
|
displayName: embeddingModelProviders[provider][model].displayName,
|
|
};
|
|
});
|
|
}
|
|
|
|
config['openaiApiKey'] = getOpenaiApiKey();
|
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
|
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
|
config['anthropicApiKey'] = getAnthropicApiKey();
|
|
config['groqApiKey'] = getGroqApiKey();
|
|
config['geminiApiKey'] = getGeminiApiKey();
|
|
config['deepseekApiKey'] = getDeepseekApiKey();
|
|
config['aimlApiKey'] = getAimlApiKey();
|
|
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
|
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
|
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
|
|
|
return Response.json({ ...config }, { status: 200 });
|
|
} catch (err) {
|
|
console.error('An error occurred while getting config:', err);
|
|
return Response.json(
|
|
{ message: 'An error occurred while getting config' },
|
|
{ status: 500 },
|
|
);
|
|
}
|
|
};
|
|
|
|
export const POST = async (req: Request) => {
|
|
try {
|
|
const config = await req.json();
|
|
|
|
const updatedConfig = {
|
|
MODELS: {
|
|
OPENAI: {
|
|
API_KEY: config.openaiApiKey,
|
|
},
|
|
GROQ: {
|
|
API_KEY: config.groqApiKey,
|
|
},
|
|
ANTHROPIC: {
|
|
API_KEY: config.anthropicApiKey,
|
|
},
|
|
GEMINI: {
|
|
API_KEY: config.geminiApiKey,
|
|
},
|
|
OLLAMA: {
|
|
API_URL: config.ollamaApiUrl,
|
|
},
|
|
DEEPSEEK: {
|
|
API_KEY: config.deepseekApiKey,
|
|
},
|
|
AIMLAPI: {
|
|
API_KEY: config.aimlApiKey,
|
|
},
|
|
LM_STUDIO: {
|
|
API_URL: config.lmStudioApiUrl,
|
|
},
|
|
CUSTOM_OPENAI: {
|
|
API_URL: config.customOpenaiApiUrl,
|
|
API_KEY: config.customOpenaiApiKey,
|
|
MODEL_NAME: config.customOpenaiModelName,
|
|
},
|
|
},
|
|
};
|
|
|
|
updateConfig(updatedConfig);
|
|
|
|
return Response.json({ message: 'Config updated' }, { status: 200 });
|
|
} catch (err) {
|
|
console.error('An error occurred while updating config:', err);
|
|
return Response.json(
|
|
{ message: 'An error occurred while updating config' },
|
|
{ status: 500 },
|
|
);
|
|
}
|
|
};
|