diff --git a/README.md b/README.md index 14a6193..9551250 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,9 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - - `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**. + - `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**. + - `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.** + - `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.** **Note**: You can change these after starting Perplexica from the settings dialog. diff --git a/sample.config.toml b/sample.config.toml index 1db2125..ba3e98e 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -25,6 +25,9 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434 [MODELS.DEEPSEEK] API_KEY = "" +[MODELS.AIMLAPI] +API_KEY = "" # Required to use AI/ML API chat and embedding models + [MODELS.LM_STUDIO] API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index c1e5bbd..0c11b23 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -8,6 +8,7 @@ import { getOllamaApiEndpoint, getOpenaiApiKey, getDeepseekApiKey, + getAimlApiKey, getLMStudioApiEndpoint, updateConfig, } from '@/lib/config'; @@ -57,6 +58,7 @@ export const GET = async (req: Request) => { config['groqApiKey'] = getGroqApiKey(); config['geminiApiKey'] = getGeminiApiKey(); config['deepseekApiKey'] = getDeepseekApiKey(); + config['aimlApiKey'] = getAimlApiKey(); config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); config['customOpenaiModelName'] = getCustomOpenaiModelName(); @@ -95,6 +97,9 @@ export const POST = async (req: Request) => { DEEPSEEK: { API_KEY: config.deepseekApiKey, }, + AIMLAPI: { + API_KEY: config.aimlApiKey, + }, LM_STUDIO: { API_URL: config.lmStudioApiUrl, }, diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 6f20f01..b91519e 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -23,6 +23,7 @@ interface SettingsType { ollamaApiUrl: string; lmStudioApiUrl: string; deepseekApiKey: string; + aimlApiKey: string; customOpenaiApiKey: string; customOpenaiApiUrl: string; customOpenaiModelName: string; @@ -862,6 +863,25 @@ const Page = () => { /> +
+ AI/ML API Key +
+ { + setConfig((prev) => ({ + ...prev!, + aimlApiKey: e.target.value, + })); + }} + onSave={(value) => saveConfig('aimlApiKey', value)} + /> +
LM Studio API URL
diff --git a/src/lib/config.ts b/src/lib/config.ts
index 78ad09c..d885e13 100644
--- a/src/lib/config.ts
+++ b/src/lib/config.ts
@@ -35,6 +35,9 @@ interface Config {
DEEPSEEK: {
API_KEY: string;
};
+ AIMLAPI: {
+ API_KEY: string;
+ };
LM_STUDIO: {
API_URL: string;
};
@@ -85,6 +88,8 @@ export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
+export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
+
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
diff --git a/src/lib/providers/aimlapi.ts b/src/lib/providers/aimlapi.ts
new file mode 100644
index 0000000..e934362
--- /dev/null
+++ b/src/lib/providers/aimlapi.ts
@@ -0,0 +1,94 @@
+import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
+import { getAimlApiKey } from '../config';
+import { ChatModel, EmbeddingModel } from '.';
+import { BaseChatModel } from '@langchain/core/language_models/chat_models';
+import { Embeddings } from '@langchain/core/embeddings';
+import axios from 'axios';
+
+export const PROVIDER_INFO = {
+ key: 'aimlapi',
+ displayName: 'AI/ML API',
+};
+
+interface AimlApiModel {
+ id: string;
+ name?: string;
+ type?: string;
+}
+
+const API_URL = 'https://api.aimlapi.com';
+
+export const loadAimlApiChatModels = async () => {
+ const apiKey = getAimlApiKey();
+
+ if (!apiKey) return {};
+
+ try {
+ const response = await axios.get(`${API_URL}/models`, {
+ headers: {
+ 'Content-Type': 'application/json',
+ Authorization: `Bearer ${apiKey}`,
+ },
+ });
+
+ const chatModels: Record