Compare commits

...

29 Commits

Author SHA1 Message Date
57407112fb feat(package): bump version 2025-07-16 10:39:50 +05:30
b280cc2e01 Merge pull request #787 from chriswritescode-dev/IOS
Fix: IOS Input Zoom / Support PWA Home Screen App, closes #458
2025-07-15 22:10:01 +05:30
e6ebf892c5 feat(styles): update globals.css 2025-07-15 21:47:20 +05:30
b754641058 feat(gitignore): add certificates 2025-07-15 21:45:44 +05:30
722f4f760e feat(manifest): update icons & screenshots 2025-07-15 21:45:37 +05:30
01e04a209f feat(public): add screenshots & update icons 2025-07-15 21:45:24 +05:30
0299fd1ea0 Merge pull request #817 from kittrydge/patch-1
Update Linux ollama instructions in README.md
2025-07-15 20:23:02 +05:30
cf8dec53ca feat(chat-window): select provider if model's present, closes #803 2025-07-07 16:09:36 +05:30
d5c012d748 Revert "Update ChatWindow.tsx"
This reverts commit 2ccbd9a44c.
2025-07-07 15:52:39 +05:30
2ccbd9a44c Update ChatWindow.tsx 2025-07-05 22:00:06 +05:30
ccd89d48d9 Update Linux ollama instructions in README.md
When setting the OLLAMA_HOST environment variable, the port number must be specified ( see https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux )

Also, 'systemctl daemon-reload' needs to be called after changing a systemd unit file, and before the relevant systemd service is reloaded.
2025-07-01 18:00:26 -06:00
87d788ddef Update README.md 2025-06-30 19:55:23 +05:30
809b625a34 feat(widgets): fix size on smaller screens, closes #791 2025-06-30 15:42:41 +05:30
95c753a549 Merge branch 'pr/815' 2025-06-30 15:38:31 +05:30
0bb8b7ec5c feat(weather-widget): enable geolocation for weather data
Replaces the previous commented-out geolocation logic with an implementation that uses the browser's geolocation API and reverse geocoding to determine the user's city. Falls back to approximate location if permission is denied or unavailable.
2025-06-28 13:49:17 +05:30
c6d084f5dc feat: add AIML API provider
Introduces support for the AI/ML API provider, including configuration options, chat and embedding model loading, and UI integration. Updates documentation and sample config to reflect the new provider.
2025-06-27 13:43:54 +02:00
0024ce36c8 Merge pull request #784 from Davixk/fix/docs-typo
docs: correct typo in npm start command
2025-06-21 20:27:34 +05:30
c44e746807 Merge pull request #785 from koyasi777/patch-1
feat(gemini): add Gemini 2.5 Flash & Pro preview models (May 2025)
2025-06-21 20:26:37 +05:30
b1826066f4 Merge pull request #801 from glitchySid/patch-1
Update README.md
2025-06-21 20:25:41 +05:30
b0b8acc45b Merge pull request #781 from alckasoc/master
feat(models): Update Gemini 2.5 pro key
2025-06-21 20:25:06 +05:30
e2b9ffc072 Update README.md
Mentioned that Gemini api key can be used in perplexica.
2025-06-11 22:52:13 +05:30
68c43ea372 Fix: IOS Input Zoom
config for theme consistency and iOS standalone mode
- Modified manifest.ts to ensure proper metadata

- Added display: standalone for iOS PWA behavior
2025-06-02 21:52:41 -04:00
3b46baca4f docs(readme): fix typo in npm start command 2025-06-02 05:52:31 +02:00
772e461c08 feat(gemini): add Gemini 2.5 Flash & Pro preview models (May 2025) 2025-06-02 00:30:18 +09:00
5c6018a0f9 docs: correct typo in npm start command 2025-06-01 06:35:16 +02:00
0b7989c3d3 feat(empty-chat): remove unused imports 2025-05-30 09:55:06 +05:30
8cfcc3e39c feat(chat): update margins and spacing 2025-05-30 09:52:36 +05:30
9eba4b7373 Merge branch 'master' of https://github.com/alckasoc/Perplexica 2025-05-29 18:27:00 -07:00
91306dc0c7 update gemini 2.5 pro key 2025-05-29 18:26:36 -07:00
24 changed files with 302 additions and 54 deletions

0
.assets/manifest.json Normal file
View File

2
.gitignore vendored
View File

@ -37,3 +37,5 @@ Thumbs.db
# Db # Db
db.sqlite db.sqlite
/searxng /searxng
certificates

View File

@ -16,7 +16,7 @@
<hr/> <hr/>
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT) [![Discord](https://dcbadge.limes.pink/api/server/26aArMy8tT?style=flat)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?) ![preview](.assets/perplexica-screenshot.png?)
@ -90,6 +90,9 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
**Note**: You can change these after starting Perplexica from the settings dialog. **Note**: You can change these after starting Perplexica from the settings dialog.
@ -111,7 +114,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file. 2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. After populating the configuration run `npm i`. 3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`. 4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm rum start` 5. Finally, start the app by running `npm run start`
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies. **Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
@ -132,7 +135,7 @@ If you're encountering an Ollama connection error, it is likely due to the backe
3. **Linux Users - Expose Ollama to Network:** 3. **Linux Users - Expose Ollama to Network:**
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0"`. Then restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux) - Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0:11434"`. (Change the port number if you are using a different one.) Then reload the systemd manager configuration with `systemctl daemon-reload`, and restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Ensure that the port (default is 11434) is not blocked by your firewall. - Ensure that the port (default is 11434) is not blocked by your firewall.

View File

@ -41,6 +41,6 @@ To update Perplexica to the latest version, follow these steps:
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly. 3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`. 4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`. 5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm rum start` 6. Finally, start the app by running `npm run start`
--- ---

View File

@ -1,6 +1,6 @@
{ {
"name": "perplexica-frontend", "name": "perplexica-frontend",
"version": "1.11.0-rc1", "version": "1.11.0-rc2",
"license": "MIT", "license": "MIT",
"author": "ItzCrazyKns", "author": "ItzCrazyKns",
"scripts": { "scripts": {

BIN
public/icon-100.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 916 B

BIN
public/icon-50.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 515 B

BIN
public/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

BIN
public/screenshots/p1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

BIN
public/screenshots/p2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

View File

@ -25,6 +25,9 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK] [MODELS.DEEPSEEK]
API_KEY = "" API_KEY = ""
[MODELS.AIMLAPI]
API_KEY = "" # Required to use AI/ML API chat and embedding models
[MODELS.LM_STUDIO] [MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 API_URL = "" # LM Studio API URL - http://host.docker.internal:1234

View File

@ -8,6 +8,7 @@ import {
getOllamaApiEndpoint, getOllamaApiEndpoint,
getOpenaiApiKey, getOpenaiApiKey,
getDeepseekApiKey, getDeepseekApiKey,
getAimlApiKey,
getLMStudioApiEndpoint, getLMStudioApiEndpoint,
updateConfig, updateConfig,
} from '@/lib/config'; } from '@/lib/config';
@ -57,6 +58,7 @@ export const GET = async (req: Request) => {
config['groqApiKey'] = getGroqApiKey(); config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey(); config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey(); config['deepseekApiKey'] = getDeepseekApiKey();
config['aimlApiKey'] = getAimlApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl(); config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName(); config['customOpenaiModelName'] = getCustomOpenaiModelName();
@ -95,6 +97,9 @@ export const POST = async (req: Request) => {
DEEPSEEK: { DEEPSEEK: {
API_KEY: config.deepseekApiKey, API_KEY: config.deepseekApiKey,
}, },
AIMLAPI: {
API_KEY: config.aimlApiKey,
},
LM_STUDIO: { LM_STUDIO: {
API_URL: config.lmStudioApiUrl, API_URL: config.lmStudioApiUrl,
}, },

View File

@ -11,3 +11,11 @@
display: none; display: none;
} }
} }
@media screen and (-webkit-min-device-pixel-ratio: 0) {
select,
textarea,
input {
font-size: 16px !important;
}
}

54
src/app/manifest.ts Normal file
View File

@ -0,0 +1,54 @@
import type { MetadataRoute } from 'next';
export default function manifest(): MetadataRoute.Manifest {
return {
name: 'Perplexica - Chat with the internet',
short_name: 'Perplexica',
description:
'Perplexica is an AI powered chatbot that is connected to the internet.',
start_url: '/',
display: 'standalone',
background_color: '#0a0a0a',
theme_color: '#0a0a0a',
screenshots: [
{
src: '/screenshots/p1.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p2.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p1_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
{
src: '/screenshots/p2_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
],
icons: [
{
src: '/icon-50.png',
sizes: '50x50',
type: 'image/png' as const,
},
{
src: '/icon-100.png',
sizes: '100x100',
type: 'image/png',
},
{
src: '/icon.png',
sizes: '440x440',
type: 'image/png',
purpose: 'any',
},
],
};
}

View File

@ -23,6 +23,7 @@ interface SettingsType {
ollamaApiUrl: string; ollamaApiUrl: string;
lmStudioApiUrl: string; lmStudioApiUrl: string;
deepseekApiKey: string; deepseekApiKey: string;
aimlApiKey: string;
customOpenaiApiKey: string; customOpenaiApiKey: string;
customOpenaiApiUrl: string; customOpenaiApiUrl: string;
customOpenaiModelName: string; customOpenaiModelName: string;
@ -862,6 +863,25 @@ const Page = () => {
/> />
</div> </div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
AI/ML API Key
</p>
<Input
type="text"
placeholder="AI/ML API Key"
value={config.aimlApiKey}
isSaving={savingStates['aimlApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
aimlApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('aimlApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1"> <div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm"> <p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL LM Studio API URL

View File

@ -82,14 +82,29 @@ const checkConfig = async (
) { ) {
if (!chatModel || !chatModelProvider) { if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders; const chatModelProviders = providers.chatModelProviders;
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider = if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
chatModelProvider || Object.keys(chatModelProviders)[0]; return toast.error('No chat models available');
} else {
chatModelProvider =
chatModelProvidersKeys.find(
(provider) =>
Object.keys(chatModelProviders[provider]).length > 0,
) || chatModelProvidersKeys[0];
}
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
} }
if (!embeddingModel || !embeddingModelProvider) { if (!embeddingModel || !embeddingModelProvider) {
@ -117,7 +132,8 @@ const checkConfig = async (
if ( if (
Object.keys(chatModelProviders).length > 0 && Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider] (!chatModelProviders[chatModelProvider] ||
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
) { ) {
const chatModelProvidersKeys = Object.keys(chatModelProviders); const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider = chatModelProvider =
@ -132,6 +148,16 @@ const checkConfig = async (
chatModelProvider && chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel] !chatModelProviders[chatModelProvider][chatModel]
) { ) {
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys( chatModel = Object.keys(
chatModelProviders[ chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0 Object.keys(chatModelProviders[chatModelProvider]).length > 0
@ -139,6 +165,7 @@ const checkConfig = async (
: Object.keys(chatModelProviders)[0] : Object.keys(chatModelProviders)[0]
], ],
)[0]; )[0];
localStorage.setItem('chatModel', chatModel); localStorage.setItem('chatModel', chatModel);
} }

View File

@ -1,6 +1,5 @@
import { Settings } from 'lucide-react'; import { Settings } from 'lucide-react';
import EmptyChatMessageInput from './EmptyChatMessageInput'; import EmptyChatMessageInput from './EmptyChatMessageInput';
import { useEffect, useState } from 'react';
import { File } from './ChatWindow'; import { File } from './ChatWindow';
import Link from 'next/link'; import Link from 'next/link';
import WeatherWidget from './WeatherWidget'; import WeatherWidget from './WeatherWidget';
@ -34,26 +33,28 @@ const EmptyChat = ({
<Settings className="cursor-pointer lg:hidden" /> <Settings className="cursor-pointer lg:hidden" />
</Link> </Link>
</div> </div>
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-8"> <div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-4">
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8"> <div className="flex flex-col items-center justify-center w-full space-y-8">
Research begins here. <h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
</h2> Research begins here.
<EmptyChatMessageInput </h2>
sendMessage={sendMessage} <EmptyChatMessageInput
focusMode={focusMode} sendMessage={sendMessage}
setFocusMode={setFocusMode} focusMode={focusMode}
optimizationMode={optimizationMode} setFocusMode={setFocusMode}
setOptimizationMode={setOptimizationMode} optimizationMode={optimizationMode}
fileIds={fileIds} setOptimizationMode={setOptimizationMode}
setFileIds={setFileIds} fileIds={fileIds}
files={files} setFileIds={setFileIds}
setFiles={setFiles} files={files}
/> setFiles={setFiles}
/>
</div>
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center"> <div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
<div className="flex-1 max-w-xs"> <div className="flex-1 w-full">
<WeatherWidget /> <WeatherWidget />
</div> </div>
<div className="flex-1 max-w-xs"> <div className="flex-1 w-full">
<NewsArticleWidget /> <NewsArticleWidget />
</div> </div>
</div> </div>

View File

@ -31,30 +31,40 @@ const WeatherWidget = () => {
city: string; city: string;
}) => void, }) => void,
) => { ) => {
/* if (navigator.geolocation) {
// Geolocation doesn't give city so we'll country using ipapi for now const result = await navigator.permissions.query({
if (navigator.geolocation) { name: 'geolocation',
const result = await navigator.permissions.query({ });
name: 'geolocation',
})
if (result.state === 'granted') { if (result.state === 'granted') {
navigator.geolocation.getCurrentPosition(position => { navigator.geolocation.getCurrentPosition(async (position) => {
callback({ const res = await fetch(
latitude: position.coords.latitude, `https://api-bdc.io/data/reverse-geocode-client?latitude=${position.coords.latitude}&longitude=${position.coords.longitude}&localityLanguage=en`,
longitude: position.coords.longitude, {
}) method: 'GET',
}) headers: {
} else if (result.state === 'prompt') { 'Content-Type': 'application/json',
callback(await getApproxLocation()) },
navigator.geolocation.getCurrentPosition(position => {}) },
} else if (result.state === 'denied') { );
callback(await getApproxLocation())
} const data = await res.json();
} else {
callback(await getApproxLocation()) callback({
} */ latitude: position.coords.latitude,
callback(await getApproxLocation()); longitude: position.coords.longitude,
city: data.locality,
});
});
} else if (result.state === 'prompt') {
callback(await getApproxLocation());
navigator.geolocation.getCurrentPosition((position) => {});
} else if (result.state === 'denied') {
callback(await getApproxLocation());
}
} else {
callback(await getApproxLocation());
}
}; };
getLocation(async (location) => { getLocation(async (location) => {

View File

@ -35,6 +35,9 @@ interface Config {
DEEPSEEK: { DEEPSEEK: {
API_KEY: string; API_KEY: string;
}; };
AIMLAPI: {
API_KEY: string;
};
LM_STUDIO: { LM_STUDIO: {
API_URL: string; API_URL: string;
}; };
@ -85,6 +88,8 @@ export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY; export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
export const getCustomOpenaiApiKey = () => export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY; loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;

View File

@ -0,0 +1,94 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getAimlApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
import axios from 'axios';
export const PROVIDER_INFO = {
key: 'aimlapi',
displayName: 'AI/ML API',
};
interface AimlApiModel {
id: string;
name?: string;
type?: string;
}
const API_URL = 'https://api.aimlapi.com';
export const loadAimlApiChatModels = async () => {
const apiKey = getAimlApiKey();
if (!apiKey) return {};
try {
const response = await axios.get(`${API_URL}/models`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: AimlApiModel) => {
if (model.type === 'chat-completion') {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: apiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
baseURL: API_URL,
},
}) as unknown as BaseChatModel,
};
}
});
return chatModels;
} catch (err) {
console.error(`Error loading AI/ML API models: ${err}`);
return {};
}
};
export const loadAimlApiEmbeddingModels = async () => {
const apiKey = getAimlApiKey();
if (!apiKey) return {};
try {
const response = await axios.get(`${API_URL}/models`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
});
const embeddingModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: AimlApiModel) => {
if (model.type === 'embedding') {
embeddingModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: apiKey,
modelName: model.id,
configuration: {
baseURL: API_URL,
},
}) as unknown as Embeddings,
};
}
});
return embeddingModels;
} catch (err) {
console.error(`Error loading AI/ML API embeddings models: ${err}`);
return {};
}
};

View File

@ -13,9 +13,17 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings'; import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [ const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Flash Preview 05-20',
key: 'gemini-2.5-flash-preview-05-20',
},
{
displayName: 'Gemini 2.5 Pro Preview',
key: 'gemini-2.5-pro-preview-05-06',
},
{ {
displayName: 'Gemini 2.5 Pro Experimental', displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-exp-03-25', key: 'gemini-2.5-pro-preview-05-06',
}, },
{ {
displayName: 'Gemini 2.0 Flash', displayName: 'Gemini 2.0 Flash',

View File

@ -35,6 +35,11 @@ import {
loadDeepseekChatModels, loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo, PROVIDER_INFO as DeepseekInfo,
} from './deepseek'; } from './deepseek';
import {
loadAimlApiChatModels,
loadAimlApiEmbeddingModels,
PROVIDER_INFO as AimlApiInfo,
} from './aimlapi';
import { import {
loadLMStudioChatModels, loadLMStudioChatModels,
loadLMStudioEmbeddingsModels, loadLMStudioEmbeddingsModels,
@ -49,6 +54,7 @@ export const PROVIDER_METADATA = {
gemini: GeminiInfo, gemini: GeminiInfo,
transformers: TransformersInfo, transformers: TransformersInfo,
deepseek: DeepseekInfo, deepseek: DeepseekInfo,
aimlapi: AimlApiInfo,
lmstudio: LMStudioInfo, lmstudio: LMStudioInfo,
custom_openai: { custom_openai: {
key: 'custom_openai', key: 'custom_openai',
@ -76,6 +82,7 @@ export const chatModelProviders: Record<
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels, deepseek: loadDeepseekChatModels,
aimlapi: loadAimlApiChatModels,
lmstudio: loadLMStudioChatModels, lmstudio: loadLMStudioChatModels,
}; };
@ -87,6 +94,7 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels, ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels, gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels, transformers: loadTransformersEmbeddingsModels,
aimlapi: loadAimlApiEmbeddingModels,
lmstudio: loadLMStudioEmbeddingsModels, lmstudio: loadLMStudioEmbeddingsModels,
}; };