mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-17 14:51:32 +00:00
Compare commits
20 Commits
v1.11.0-rc
...
cf8dec53ca
Author | SHA1 | Date | |
---|---|---|---|
|
cf8dec53ca | ||
|
d5c012d748 | ||
|
2ccbd9a44c | ||
|
87d788ddef | ||
|
809b625a34 | ||
|
95c753a549 | ||
|
0bb8b7ec5c | ||
|
c6d084f5dc | ||
|
0024ce36c8 | ||
|
c44e746807 | ||
|
b1826066f4 | ||
|
b0b8acc45b | ||
|
e2b9ffc072 | ||
|
3b46baca4f | ||
|
772e461c08 | ||
|
5c6018a0f9 | ||
|
0b7989c3d3 | ||
|
8cfcc3e39c | ||
|
9eba4b7373 | ||
|
91306dc0c7 |
@@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
<hr/>
|
<hr/>
|
||||||
|
|
||||||
[](https://discord.gg/26aArMy8tT)
|
[](https://discord.gg/26aArMy8tT)
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -90,6 +90,9 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
|||||||
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
||||||
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
|
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
|
||||||
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
|
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
|
||||||
|
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
|
||||||
|
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
|
||||||
|
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
|
||||||
|
|
||||||
**Note**: You can change these after starting Perplexica from the settings dialog.
|
**Note**: You can change these after starting Perplexica from the settings dialog.
|
||||||
|
|
||||||
@@ -111,7 +114,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
|||||||
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
|
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
|
||||||
3. After populating the configuration run `npm i`.
|
3. After populating the configuration run `npm i`.
|
||||||
4. Install the dependencies and then execute `npm run build`.
|
4. Install the dependencies and then execute `npm run build`.
|
||||||
5. Finally, start the app by running `npm rum start`
|
5. Finally, start the app by running `npm run start`
|
||||||
|
|
||||||
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
|
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
|
||||||
|
|
||||||
|
@@ -41,6 +41,6 @@ To update Perplexica to the latest version, follow these steps:
|
|||||||
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
||||||
4. After populating the configuration run `npm i`.
|
4. After populating the configuration run `npm i`.
|
||||||
5. Install the dependencies and then execute `npm run build`.
|
5. Install the dependencies and then execute `npm run build`.
|
||||||
6. Finally, start the app by running `npm rum start`
|
6. Finally, start the app by running `npm run start`
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@@ -25,6 +25,9 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
|||||||
[MODELS.DEEPSEEK]
|
[MODELS.DEEPSEEK]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
|
|
||||||
|
[MODELS.AIMLAPI]
|
||||||
|
API_KEY = "" # Required to use AI/ML API chat and embedding models
|
||||||
|
|
||||||
[MODELS.LM_STUDIO]
|
[MODELS.LM_STUDIO]
|
||||||
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
||||||
|
|
||||||
|
@@ -8,6 +8,7 @@ import {
|
|||||||
getOllamaApiEndpoint,
|
getOllamaApiEndpoint,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
getDeepseekApiKey,
|
getDeepseekApiKey,
|
||||||
|
getAimlApiKey,
|
||||||
getLMStudioApiEndpoint,
|
getLMStudioApiEndpoint,
|
||||||
updateConfig,
|
updateConfig,
|
||||||
} from '@/lib/config';
|
} from '@/lib/config';
|
||||||
@@ -57,6 +58,7 @@ export const GET = async (req: Request) => {
|
|||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
config['geminiApiKey'] = getGeminiApiKey();
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
config['deepseekApiKey'] = getDeepseekApiKey();
|
config['deepseekApiKey'] = getDeepseekApiKey();
|
||||||
|
config['aimlApiKey'] = getAimlApiKey();
|
||||||
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
||||||
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
||||||
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
||||||
@@ -95,6 +97,9 @@ export const POST = async (req: Request) => {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: config.deepseekApiKey,
|
API_KEY: config.deepseekApiKey,
|
||||||
},
|
},
|
||||||
|
AIMLAPI: {
|
||||||
|
API_KEY: config.aimlApiKey,
|
||||||
|
},
|
||||||
LM_STUDIO: {
|
LM_STUDIO: {
|
||||||
API_URL: config.lmStudioApiUrl,
|
API_URL: config.lmStudioApiUrl,
|
||||||
},
|
},
|
||||||
|
@@ -23,6 +23,7 @@ interface SettingsType {
|
|||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
lmStudioApiUrl: string;
|
lmStudioApiUrl: string;
|
||||||
deepseekApiKey: string;
|
deepseekApiKey: string;
|
||||||
|
aimlApiKey: string;
|
||||||
customOpenaiApiKey: string;
|
customOpenaiApiKey: string;
|
||||||
customOpenaiApiUrl: string;
|
customOpenaiApiUrl: string;
|
||||||
customOpenaiModelName: string;
|
customOpenaiModelName: string;
|
||||||
@@ -862,6 +863,25 @@ const Page = () => {
|
|||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
AI/ML API Key
|
||||||
|
</p>
|
||||||
|
<Input
|
||||||
|
type="text"
|
||||||
|
placeholder="AI/ML API Key"
|
||||||
|
value={config.aimlApiKey}
|
||||||
|
isSaving={savingStates['aimlApiKey']}
|
||||||
|
onChange={(e) => {
|
||||||
|
setConfig((prev) => ({
|
||||||
|
...prev!,
|
||||||
|
aimlApiKey: e.target.value,
|
||||||
|
}));
|
||||||
|
}}
|
||||||
|
onSave={(value) => saveConfig('aimlApiKey', value)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div className="flex flex-col space-y-1">
|
<div className="flex flex-col space-y-1">
|
||||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
LM Studio API URL
|
LM Studio API URL
|
||||||
|
@@ -82,14 +82,29 @@ const checkConfig = async (
|
|||||||
) {
|
) {
|
||||||
if (!chatModel || !chatModelProvider) {
|
if (!chatModel || !chatModelProvider) {
|
||||||
const chatModelProviders = providers.chatModelProviders;
|
const chatModelProviders = providers.chatModelProviders;
|
||||||
|
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
||||||
|
|
||||||
|
if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
|
||||||
|
return toast.error('No chat models available');
|
||||||
|
} else {
|
||||||
chatModelProvider =
|
chatModelProvider =
|
||||||
chatModelProvider || Object.keys(chatModelProviders)[0];
|
chatModelProvidersKeys.find(
|
||||||
|
(provider) =>
|
||||||
|
Object.keys(chatModelProviders[provider]).length > 0,
|
||||||
|
) || chatModelProvidersKeys[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
chatModelProvider === 'custom_openai' &&
|
||||||
|
Object.keys(chatModelProviders[chatModelProvider]).length === 0
|
||||||
|
) {
|
||||||
|
toast.error(
|
||||||
|
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
|
||||||
|
);
|
||||||
|
return setHasError(true);
|
||||||
|
}
|
||||||
|
|
||||||
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||||
|
|
||||||
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
|
|
||||||
return toast.error('No chat models available');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!embeddingModel || !embeddingModelProvider) {
|
if (!embeddingModel || !embeddingModelProvider) {
|
||||||
@@ -117,7 +132,8 @@ const checkConfig = async (
|
|||||||
|
|
||||||
if (
|
if (
|
||||||
Object.keys(chatModelProviders).length > 0 &&
|
Object.keys(chatModelProviders).length > 0 &&
|
||||||
!chatModelProviders[chatModelProvider]
|
(!chatModelProviders[chatModelProvider] ||
|
||||||
|
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
|
||||||
) {
|
) {
|
||||||
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
||||||
chatModelProvider =
|
chatModelProvider =
|
||||||
@@ -132,6 +148,16 @@ const checkConfig = async (
|
|||||||
chatModelProvider &&
|
chatModelProvider &&
|
||||||
!chatModelProviders[chatModelProvider][chatModel]
|
!chatModelProviders[chatModelProvider][chatModel]
|
||||||
) {
|
) {
|
||||||
|
if (
|
||||||
|
chatModelProvider === 'custom_openai' &&
|
||||||
|
Object.keys(chatModelProviders[chatModelProvider]).length === 0
|
||||||
|
) {
|
||||||
|
toast.error(
|
||||||
|
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
|
||||||
|
);
|
||||||
|
return setHasError(true);
|
||||||
|
}
|
||||||
|
|
||||||
chatModel = Object.keys(
|
chatModel = Object.keys(
|
||||||
chatModelProviders[
|
chatModelProviders[
|
||||||
Object.keys(chatModelProviders[chatModelProvider]).length > 0
|
Object.keys(chatModelProviders[chatModelProvider]).length > 0
|
||||||
@@ -139,6 +165,7 @@ const checkConfig = async (
|
|||||||
: Object.keys(chatModelProviders)[0]
|
: Object.keys(chatModelProviders)[0]
|
||||||
],
|
],
|
||||||
)[0];
|
)[0];
|
||||||
|
|
||||||
localStorage.setItem('chatModel', chatModel);
|
localStorage.setItem('chatModel', chatModel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,6 +1,5 @@
|
|||||||
import { Settings } from 'lucide-react';
|
import { Settings } from 'lucide-react';
|
||||||
import EmptyChatMessageInput from './EmptyChatMessageInput';
|
import EmptyChatMessageInput from './EmptyChatMessageInput';
|
||||||
import { useEffect, useState } from 'react';
|
|
||||||
import { File } from './ChatWindow';
|
import { File } from './ChatWindow';
|
||||||
import Link from 'next/link';
|
import Link from 'next/link';
|
||||||
import WeatherWidget from './WeatherWidget';
|
import WeatherWidget from './WeatherWidget';
|
||||||
@@ -34,7 +33,8 @@ const EmptyChat = ({
|
|||||||
<Settings className="cursor-pointer lg:hidden" />
|
<Settings className="cursor-pointer lg:hidden" />
|
||||||
</Link>
|
</Link>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-8">
|
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-4">
|
||||||
|
<div className="flex flex-col items-center justify-center w-full space-y-8">
|
||||||
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
|
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
|
||||||
Research begins here.
|
Research begins here.
|
||||||
</h2>
|
</h2>
|
||||||
@@ -49,11 +49,12 @@ const EmptyChat = ({
|
|||||||
files={files}
|
files={files}
|
||||||
setFiles={setFiles}
|
setFiles={setFiles}
|
||||||
/>
|
/>
|
||||||
|
</div>
|
||||||
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
|
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
|
||||||
<div className="flex-1 max-w-xs">
|
<div className="flex-1 w-full">
|
||||||
<WeatherWidget />
|
<WeatherWidget />
|
||||||
</div>
|
</div>
|
||||||
<div className="flex-1 max-w-xs">
|
<div className="flex-1 w-full">
|
||||||
<NewsArticleWidget />
|
<NewsArticleWidget />
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
@@ -31,30 +31,40 @@ const WeatherWidget = () => {
|
|||||||
city: string;
|
city: string;
|
||||||
}) => void,
|
}) => void,
|
||||||
) => {
|
) => {
|
||||||
/*
|
|
||||||
// Geolocation doesn't give city so we'll country using ipapi for now
|
|
||||||
if (navigator.geolocation) {
|
if (navigator.geolocation) {
|
||||||
const result = await navigator.permissions.query({
|
const result = await navigator.permissions.query({
|
||||||
name: 'geolocation',
|
name: 'geolocation',
|
||||||
})
|
});
|
||||||
|
|
||||||
if (result.state === 'granted') {
|
if (result.state === 'granted') {
|
||||||
navigator.geolocation.getCurrentPosition(position => {
|
navigator.geolocation.getCurrentPosition(async (position) => {
|
||||||
|
const res = await fetch(
|
||||||
|
`https://api-bdc.io/data/reverse-geocode-client?latitude=${position.coords.latitude}&longitude=${position.coords.longitude}&localityLanguage=en`,
|
||||||
|
{
|
||||||
|
method: 'GET',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const data = await res.json();
|
||||||
|
|
||||||
callback({
|
callback({
|
||||||
latitude: position.coords.latitude,
|
latitude: position.coords.latitude,
|
||||||
longitude: position.coords.longitude,
|
longitude: position.coords.longitude,
|
||||||
})
|
city: data.locality,
|
||||||
})
|
});
|
||||||
|
});
|
||||||
} else if (result.state === 'prompt') {
|
} else if (result.state === 'prompt') {
|
||||||
callback(await getApproxLocation())
|
callback(await getApproxLocation());
|
||||||
navigator.geolocation.getCurrentPosition(position => {})
|
navigator.geolocation.getCurrentPosition((position) => {});
|
||||||
} else if (result.state === 'denied') {
|
} else if (result.state === 'denied') {
|
||||||
callback(await getApproxLocation())
|
callback(await getApproxLocation());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
callback(await getApproxLocation())
|
|
||||||
} */
|
|
||||||
callback(await getApproxLocation());
|
callback(await getApproxLocation());
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
getLocation(async (location) => {
|
getLocation(async (location) => {
|
||||||
|
@@ -35,6 +35,9 @@ interface Config {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
};
|
};
|
||||||
|
AIMLAPI: {
|
||||||
|
API_KEY: string;
|
||||||
|
};
|
||||||
LM_STUDIO: {
|
LM_STUDIO: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
};
|
};
|
||||||
@@ -85,6 +88,8 @@ export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
|
|||||||
|
|
||||||
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
|
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
|
||||||
|
|
||||||
|
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
|
||||||
|
|
||||||
export const getCustomOpenaiApiKey = () =>
|
export const getCustomOpenaiApiKey = () =>
|
||||||
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
||||||
|
|
||||||
|
94
src/lib/providers/aimlapi.ts
Normal file
94
src/lib/providers/aimlapi.ts
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||||
|
import { getAimlApiKey } from '../config';
|
||||||
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'aimlapi',
|
||||||
|
displayName: 'AI/ML API',
|
||||||
|
};
|
||||||
|
|
||||||
|
interface AimlApiModel {
|
||||||
|
id: string;
|
||||||
|
name?: string;
|
||||||
|
type?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const API_URL = 'https://api.aimlapi.com';
|
||||||
|
|
||||||
|
export const loadAimlApiChatModels = async () => {
|
||||||
|
const apiKey = getAimlApiKey();
|
||||||
|
|
||||||
|
if (!apiKey) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${API_URL}/models`, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
Authorization: `Bearer ${apiKey}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const chatModels: Record<string, ChatModel> = {};
|
||||||
|
|
||||||
|
response.data.data.forEach((model: AimlApiModel) => {
|
||||||
|
if (model.type === 'chat-completion') {
|
||||||
|
chatModels[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new ChatOpenAI({
|
||||||
|
openAIApiKey: apiKey,
|
||||||
|
modelName: model.id,
|
||||||
|
temperature: 0.7,
|
||||||
|
configuration: {
|
||||||
|
baseURL: API_URL,
|
||||||
|
},
|
||||||
|
}) as unknown as BaseChatModel,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading AI/ML API models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadAimlApiEmbeddingModels = async () => {
|
||||||
|
const apiKey = getAimlApiKey();
|
||||||
|
|
||||||
|
if (!apiKey) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${API_URL}/models`, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
Authorization: `Bearer ${apiKey}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||||
|
|
||||||
|
response.data.data.forEach((model: AimlApiModel) => {
|
||||||
|
if (model.type === 'embedding') {
|
||||||
|
embeddingModels[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new OpenAIEmbeddings({
|
||||||
|
openAIApiKey: apiKey,
|
||||||
|
modelName: model.id,
|
||||||
|
configuration: {
|
||||||
|
baseURL: API_URL,
|
||||||
|
},
|
||||||
|
}) as unknown as Embeddings,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return embeddingModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading AI/ML API embeddings models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
@@ -13,9 +13,17 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
const geminiChatModels: Record<string, string>[] = [
|
const geminiChatModels: Record<string, string>[] = [
|
||||||
|
{
|
||||||
|
displayName: 'Gemini 2.5 Flash Preview 05-20',
|
||||||
|
key: 'gemini-2.5-flash-preview-05-20',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Gemini 2.5 Pro Preview',
|
||||||
|
key: 'gemini-2.5-pro-preview-05-06',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
displayName: 'Gemini 2.5 Pro Experimental',
|
displayName: 'Gemini 2.5 Pro Experimental',
|
||||||
key: 'gemini-2.5-pro-exp-03-25',
|
key: 'gemini-2.5-pro-preview-05-06',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
displayName: 'Gemini 2.0 Flash',
|
displayName: 'Gemini 2.0 Flash',
|
||||||
|
@@ -35,6 +35,11 @@ import {
|
|||||||
loadDeepseekChatModels,
|
loadDeepseekChatModels,
|
||||||
PROVIDER_INFO as DeepseekInfo,
|
PROVIDER_INFO as DeepseekInfo,
|
||||||
} from './deepseek';
|
} from './deepseek';
|
||||||
|
import {
|
||||||
|
loadAimlApiChatModels,
|
||||||
|
loadAimlApiEmbeddingModels,
|
||||||
|
PROVIDER_INFO as AimlApiInfo,
|
||||||
|
} from './aimlapi';
|
||||||
import {
|
import {
|
||||||
loadLMStudioChatModels,
|
loadLMStudioChatModels,
|
||||||
loadLMStudioEmbeddingsModels,
|
loadLMStudioEmbeddingsModels,
|
||||||
@@ -49,6 +54,7 @@ export const PROVIDER_METADATA = {
|
|||||||
gemini: GeminiInfo,
|
gemini: GeminiInfo,
|
||||||
transformers: TransformersInfo,
|
transformers: TransformersInfo,
|
||||||
deepseek: DeepseekInfo,
|
deepseek: DeepseekInfo,
|
||||||
|
aimlapi: AimlApiInfo,
|
||||||
lmstudio: LMStudioInfo,
|
lmstudio: LMStudioInfo,
|
||||||
custom_openai: {
|
custom_openai: {
|
||||||
key: 'custom_openai',
|
key: 'custom_openai',
|
||||||
@@ -76,6 +82,7 @@ export const chatModelProviders: Record<
|
|||||||
anthropic: loadAnthropicChatModels,
|
anthropic: loadAnthropicChatModels,
|
||||||
gemini: loadGeminiChatModels,
|
gemini: loadGeminiChatModels,
|
||||||
deepseek: loadDeepseekChatModels,
|
deepseek: loadDeepseekChatModels,
|
||||||
|
aimlapi: loadAimlApiChatModels,
|
||||||
lmstudio: loadLMStudioChatModels,
|
lmstudio: loadLMStudioChatModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -87,6 +94,7 @@ export const embeddingModelProviders: Record<
|
|||||||
ollama: loadOllamaEmbeddingModels,
|
ollama: loadOllamaEmbeddingModels,
|
||||||
gemini: loadGeminiEmbeddingModels,
|
gemini: loadGeminiEmbeddingModels,
|
||||||
transformers: loadTransformersEmbeddingsModels,
|
transformers: loadTransformersEmbeddingsModels,
|
||||||
|
aimlapi: loadAimlApiEmbeddingModels,
|
||||||
lmstudio: loadLMStudioEmbeddingsModels,
|
lmstudio: loadLMStudioEmbeddingsModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user