Compare commits
38 Commits
v1.11.0-rc
...
feat/struc
Author | SHA1 | Date | |
---|---|---|---|
|
df33229934 | ||
|
49fafaa096 | ||
|
ca9b32a23b | ||
|
76e3ff4e02 | ||
|
eabf3ca7d3 | ||
|
94e6db10bb | ||
|
26e1d5fec3 | ||
|
66be87b688 | ||
|
f7b4e32218 | ||
|
57407112fb | ||
|
b280cc2e01 | ||
|
e6ebf892c5 | ||
|
b754641058 | ||
|
722f4f760e | ||
|
01e04a209f | ||
|
0299fd1ea0 | ||
|
cf8dec53ca | ||
|
d5c012d748 | ||
|
2ccbd9a44c | ||
|
ccd89d48d9 | ||
|
87d788ddef | ||
|
809b625a34 | ||
|
95c753a549 | ||
|
0bb8b7ec5c | ||
|
c6d084f5dc | ||
|
0024ce36c8 | ||
|
c44e746807 | ||
|
b1826066f4 | ||
|
b0b8acc45b | ||
|
e2b9ffc072 | ||
|
68c43ea372 | ||
|
3b46baca4f | ||
|
772e461c08 | ||
|
5c6018a0f9 | ||
|
0b7989c3d3 | ||
|
8cfcc3e39c | ||
|
9eba4b7373 | ||
|
91306dc0c7 |
0
.assets/manifest.json
Normal file
2
.gitignore
vendored
@@ -37,3 +37,5 @@ Thumbs.db
|
|||||||
# Db
|
# Db
|
||||||
db.sqlite
|
db.sqlite
|
||||||
/searxng
|
/searxng
|
||||||
|
|
||||||
|
certificates
|
@@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
<hr/>
|
<hr/>
|
||||||
|
|
||||||
[](https://discord.gg/26aArMy8tT)
|
[](https://discord.gg/26aArMy8tT)
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -90,6 +90,9 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
|||||||
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
||||||
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
|
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
|
||||||
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
|
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
|
||||||
|
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
|
||||||
|
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
|
||||||
|
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
|
||||||
|
|
||||||
**Note**: You can change these after starting Perplexica from the settings dialog.
|
**Note**: You can change these after starting Perplexica from the settings dialog.
|
||||||
|
|
||||||
@@ -111,7 +114,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
|||||||
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
|
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
|
||||||
3. After populating the configuration run `npm i`.
|
3. After populating the configuration run `npm i`.
|
||||||
4. Install the dependencies and then execute `npm run build`.
|
4. Install the dependencies and then execute `npm run build`.
|
||||||
5. Finally, start the app by running `npm rum start`
|
5. Finally, start the app by running `npm run start`
|
||||||
|
|
||||||
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
|
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
|
||||||
|
|
||||||
@@ -132,7 +135,7 @@ If you're encountering an Ollama connection error, it is likely due to the backe
|
|||||||
|
|
||||||
3. **Linux Users - Expose Ollama to Network:**
|
3. **Linux Users - Expose Ollama to Network:**
|
||||||
|
|
||||||
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0"`. Then restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
|
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0:11434"`. (Change the port number if you are using a different one.) Then reload the systemd manager configuration with `systemctl daemon-reload`, and restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
|
||||||
|
|
||||||
- Ensure that the port (default is 11434) is not blocked by your firewall.
|
- Ensure that the port (default is 11434) is not blocked by your firewall.
|
||||||
|
|
||||||
|
@@ -41,6 +41,6 @@ To update Perplexica to the latest version, follow these steps:
|
|||||||
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
||||||
4. After populating the configuration run `npm i`.
|
4. After populating the configuration run `npm i`.
|
||||||
5. Install the dependencies and then execute `npm run build`.
|
5. Install the dependencies and then execute `npm run build`.
|
||||||
6. Finally, start the app by running `npm rum start`
|
6. Finally, start the app by running `npm run start`
|
||||||
|
|
||||||
---
|
---
|
||||||
|
15
package.json
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "perplexica-frontend",
|
"name": "perplexica-frontend",
|
||||||
"version": "1.11.0-rc1",
|
"version": "1.11.0-rc2",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"author": "ItzCrazyKns",
|
"author": "ItzCrazyKns",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
@@ -15,11 +15,12 @@
|
|||||||
"@headlessui/react": "^2.2.0",
|
"@headlessui/react": "^2.2.0",
|
||||||
"@iarna/toml": "^2.2.5",
|
"@iarna/toml": "^2.2.5",
|
||||||
"@icons-pack/react-simple-icons": "^12.3.0",
|
"@icons-pack/react-simple-icons": "^12.3.0",
|
||||||
"@langchain/anthropic": "^0.3.15",
|
"@langchain/anthropic": "^0.3.24",
|
||||||
"@langchain/community": "^0.3.36",
|
"@langchain/community": "^0.3.49",
|
||||||
"@langchain/core": "^0.3.42",
|
"@langchain/core": "^0.3.66",
|
||||||
"@langchain/google-genai": "^0.1.12",
|
"@langchain/google-genai": "^0.2.15",
|
||||||
"@langchain/openai": "^0.0.25",
|
"@langchain/ollama": "^0.2.3",
|
||||||
|
"@langchain/openai": "^0.6.2",
|
||||||
"@langchain/textsplitters": "^0.1.0",
|
"@langchain/textsplitters": "^0.1.0",
|
||||||
"@tailwindcss/typography": "^0.5.12",
|
"@tailwindcss/typography": "^0.5.12",
|
||||||
"@xenova/transformers": "^2.17.2",
|
"@xenova/transformers": "^2.17.2",
|
||||||
@@ -31,7 +32,7 @@
|
|||||||
"drizzle-orm": "^0.40.1",
|
"drizzle-orm": "^0.40.1",
|
||||||
"html-to-text": "^9.0.5",
|
"html-to-text": "^9.0.5",
|
||||||
"jspdf": "^3.0.1",
|
"jspdf": "^3.0.1",
|
||||||
"langchain": "^0.1.30",
|
"langchain": "^0.3.30",
|
||||||
"lucide-react": "^0.363.0",
|
"lucide-react": "^0.363.0",
|
||||||
"mammoth": "^1.9.1",
|
"mammoth": "^1.9.1",
|
||||||
"markdown-to-jsx": "^7.7.2",
|
"markdown-to-jsx": "^7.7.2",
|
||||||
|
BIN
public/icon-100.png
Normal file
After Width: | Height: | Size: 916 B |
BIN
public/icon-50.png
Normal file
After Width: | Height: | Size: 515 B |
BIN
public/icon.png
Normal file
After Width: | Height: | Size: 30 KiB |
BIN
public/screenshots/p1.png
Normal file
After Width: | Height: | Size: 183 KiB |
BIN
public/screenshots/p1_small.png
Normal file
After Width: | Height: | Size: 130 KiB |
BIN
public/screenshots/p2.png
Normal file
After Width: | Height: | Size: 627 KiB |
BIN
public/screenshots/p2_small.png
Normal file
After Width: | Height: | Size: 202 KiB |
@@ -25,6 +25,9 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
|||||||
[MODELS.DEEPSEEK]
|
[MODELS.DEEPSEEK]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
|
|
||||||
|
[MODELS.AIMLAPI]
|
||||||
|
API_KEY = "" # Required to use AI/ML API chat and embedding models
|
||||||
|
|
||||||
[MODELS.LM_STUDIO]
|
[MODELS.LM_STUDIO]
|
||||||
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
||||||
|
|
||||||
|
@@ -223,7 +223,7 @@ export const POST = async (req: Request) => {
|
|||||||
|
|
||||||
if (body.chatModel?.provider === 'custom_openai') {
|
if (body.chatModel?.provider === 'custom_openai') {
|
||||||
llm = new ChatOpenAI({
|
llm = new ChatOpenAI({
|
||||||
openAIApiKey: getCustomOpenaiApiKey(),
|
apiKey: getCustomOpenaiApiKey(),
|
||||||
modelName: getCustomOpenaiModelName(),
|
modelName: getCustomOpenaiModelName(),
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
|
@@ -8,6 +8,7 @@ import {
|
|||||||
getOllamaApiEndpoint,
|
getOllamaApiEndpoint,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
getDeepseekApiKey,
|
getDeepseekApiKey,
|
||||||
|
getAimlApiKey,
|
||||||
getLMStudioApiEndpoint,
|
getLMStudioApiEndpoint,
|
||||||
updateConfig,
|
updateConfig,
|
||||||
} from '@/lib/config';
|
} from '@/lib/config';
|
||||||
@@ -57,6 +58,7 @@ export const GET = async (req: Request) => {
|
|||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
config['geminiApiKey'] = getGeminiApiKey();
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
config['deepseekApiKey'] = getDeepseekApiKey();
|
config['deepseekApiKey'] = getDeepseekApiKey();
|
||||||
|
config['aimlApiKey'] = getAimlApiKey();
|
||||||
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
|
||||||
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
||||||
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
||||||
@@ -95,6 +97,9 @@ export const POST = async (req: Request) => {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: config.deepseekApiKey,
|
API_KEY: config.deepseekApiKey,
|
||||||
},
|
},
|
||||||
|
AIMLAPI: {
|
||||||
|
API_KEY: config.aimlApiKey,
|
||||||
|
},
|
||||||
LM_STUDIO: {
|
LM_STUDIO: {
|
||||||
API_URL: config.lmStudioApiUrl,
|
API_URL: config.lmStudioApiUrl,
|
||||||
},
|
},
|
||||||
|
@@ -36,6 +36,7 @@ export const GET = async (req: Request) => {
|
|||||||
{
|
{
|
||||||
engines: ['bing news'],
|
engines: ['bing news'],
|
||||||
pageno: 1,
|
pageno: 1,
|
||||||
|
language: 'en',
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
).results;
|
).results;
|
||||||
@@ -49,7 +50,11 @@ export const GET = async (req: Request) => {
|
|||||||
data = (
|
data = (
|
||||||
await searchSearxng(
|
await searchSearxng(
|
||||||
`site:${articleWebsites[Math.floor(Math.random() * articleWebsites.length)]} ${topics[Math.floor(Math.random() * topics.length)]}`,
|
`site:${articleWebsites[Math.floor(Math.random() * articleWebsites.length)]} ${topics[Math.floor(Math.random() * topics.length)]}`,
|
||||||
{ engines: ['bing news'], pageno: 1 },
|
{
|
||||||
|
engines: ['bing news'],
|
||||||
|
pageno: 1,
|
||||||
|
language: 'en',
|
||||||
|
},
|
||||||
)
|
)
|
||||||
).results;
|
).results;
|
||||||
}
|
}
|
||||||
|
@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
|
|||||||
|
|
||||||
if (body.chatModel?.provider === 'custom_openai') {
|
if (body.chatModel?.provider === 'custom_openai') {
|
||||||
llm = new ChatOpenAI({
|
llm = new ChatOpenAI({
|
||||||
openAIApiKey: getCustomOpenaiApiKey(),
|
apiKey: getCustomOpenaiApiKey(),
|
||||||
modelName: getCustomOpenaiModelName(),
|
modelName: getCustomOpenaiModelName(),
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
|
@@ -81,7 +81,7 @@ export const POST = async (req: Request) => {
|
|||||||
if (body.chatModel?.provider === 'custom_openai') {
|
if (body.chatModel?.provider === 'custom_openai') {
|
||||||
llm = new ChatOpenAI({
|
llm = new ChatOpenAI({
|
||||||
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
|
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
|
||||||
openAIApiKey:
|
apiKey:
|
||||||
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
|
@@ -48,7 +48,7 @@ export const POST = async (req: Request) => {
|
|||||||
|
|
||||||
if (body.chatModel?.provider === 'custom_openai') {
|
if (body.chatModel?.provider === 'custom_openai') {
|
||||||
llm = new ChatOpenAI({
|
llm = new ChatOpenAI({
|
||||||
openAIApiKey: getCustomOpenaiApiKey(),
|
apiKey: getCustomOpenaiApiKey(),
|
||||||
modelName: getCustomOpenaiModelName(),
|
modelName: getCustomOpenaiModelName(),
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
|
@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
|
|||||||
|
|
||||||
if (body.chatModel?.provider === 'custom_openai') {
|
if (body.chatModel?.provider === 'custom_openai') {
|
||||||
llm = new ChatOpenAI({
|
llm = new ChatOpenAI({
|
||||||
openAIApiKey: getCustomOpenaiApiKey(),
|
apiKey: getCustomOpenaiApiKey(),
|
||||||
modelName: getCustomOpenaiModelName(),
|
modelName: getCustomOpenaiModelName(),
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
export const POST = async (req: Request) => {
|
export const POST = async (req: Request) => {
|
||||||
try {
|
try {
|
||||||
const body: { lat: number; lng: number } = await req.json();
|
const body: { lat: number; lng: number; temperatureUnit: 'C' | 'F' } =
|
||||||
|
await req.json();
|
||||||
|
|
||||||
if (!body.lat || !body.lng) {
|
if (!body.lat || !body.lng) {
|
||||||
return Response.json(
|
return Response.json(
|
||||||
@@ -12,7 +13,7 @@ export const POST = async (req: Request) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const res = await fetch(
|
const res = await fetch(
|
||||||
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}¤t=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto`,
|
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}¤t=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto${body.temperatureUnit === 'C' ? '' : '&temperature_unit=fahrenheit'}`,
|
||||||
);
|
);
|
||||||
|
|
||||||
const data = await res.json();
|
const data = await res.json();
|
||||||
@@ -33,12 +34,14 @@ export const POST = async (req: Request) => {
|
|||||||
humidity: number;
|
humidity: number;
|
||||||
windSpeed: number;
|
windSpeed: number;
|
||||||
icon: string;
|
icon: string;
|
||||||
|
temperatureUnit: 'C' | 'F';
|
||||||
} = {
|
} = {
|
||||||
temperature: data.current.temperature_2m,
|
temperature: data.current.temperature_2m,
|
||||||
condition: '',
|
condition: '',
|
||||||
humidity: data.current.relative_humidity_2m,
|
humidity: data.current.relative_humidity_2m,
|
||||||
windSpeed: data.current.wind_speed_10m,
|
windSpeed: data.current.wind_speed_10m,
|
||||||
icon: '',
|
icon: '',
|
||||||
|
temperatureUnit: body.temperatureUnit,
|
||||||
};
|
};
|
||||||
|
|
||||||
const code = data.current.weather_code;
|
const code = data.current.weather_code;
|
||||||
|
@@ -11,3 +11,11 @@
|
|||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@media screen and (-webkit-min-device-pixel-ratio: 0) {
|
||||||
|
select,
|
||||||
|
textarea,
|
||||||
|
input {
|
||||||
|
font-size: 16px !important;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
54
src/app/manifest.ts
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
import type { MetadataRoute } from 'next';
|
||||||
|
|
||||||
|
export default function manifest(): MetadataRoute.Manifest {
|
||||||
|
return {
|
||||||
|
name: 'Perplexica - Chat with the internet',
|
||||||
|
short_name: 'Perplexica',
|
||||||
|
description:
|
||||||
|
'Perplexica is an AI powered chatbot that is connected to the internet.',
|
||||||
|
start_url: '/',
|
||||||
|
display: 'standalone',
|
||||||
|
background_color: '#0a0a0a',
|
||||||
|
theme_color: '#0a0a0a',
|
||||||
|
screenshots: [
|
||||||
|
{
|
||||||
|
src: '/screenshots/p1.png',
|
||||||
|
form_factor: 'wide',
|
||||||
|
sizes: '2560x1600',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: '/screenshots/p2.png',
|
||||||
|
form_factor: 'wide',
|
||||||
|
sizes: '2560x1600',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: '/screenshots/p1_small.png',
|
||||||
|
form_factor: 'narrow',
|
||||||
|
sizes: '828x1792',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: '/screenshots/p2_small.png',
|
||||||
|
form_factor: 'narrow',
|
||||||
|
sizes: '828x1792',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
icons: [
|
||||||
|
{
|
||||||
|
src: '/icon-50.png',
|
||||||
|
sizes: '50x50',
|
||||||
|
type: 'image/png' as const,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: '/icon-100.png',
|
||||||
|
sizes: '100x100',
|
||||||
|
type: 'image/png',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
src: '/icon.png',
|
||||||
|
sizes: '440x440',
|
||||||
|
type: 'image/png',
|
||||||
|
purpose: 'any',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
@@ -23,6 +23,7 @@ interface SettingsType {
|
|||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
lmStudioApiUrl: string;
|
lmStudioApiUrl: string;
|
||||||
deepseekApiKey: string;
|
deepseekApiKey: string;
|
||||||
|
aimlApiKey: string;
|
||||||
customOpenaiApiKey: string;
|
customOpenaiApiKey: string;
|
||||||
customOpenaiApiUrl: string;
|
customOpenaiApiUrl: string;
|
||||||
customOpenaiModelName: string;
|
customOpenaiModelName: string;
|
||||||
@@ -147,6 +148,7 @@ const Page = () => {
|
|||||||
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
|
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
|
||||||
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
|
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
|
||||||
const [systemInstructions, setSystemInstructions] = useState<string>('');
|
const [systemInstructions, setSystemInstructions] = useState<string>('');
|
||||||
|
const [temperatureUnit, setTemperatureUnit] = useState<'C' | 'F'>('C');
|
||||||
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
|
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@@ -209,6 +211,8 @@ const Page = () => {
|
|||||||
|
|
||||||
setSystemInstructions(localStorage.getItem('systemInstructions')!);
|
setSystemInstructions(localStorage.getItem('systemInstructions')!);
|
||||||
|
|
||||||
|
setTemperatureUnit(localStorage.getItem('temperatureUnit')! as 'C' | 'F');
|
||||||
|
|
||||||
setIsLoading(false);
|
setIsLoading(false);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -367,6 +371,8 @@ const Page = () => {
|
|||||||
localStorage.setItem('embeddingModel', value);
|
localStorage.setItem('embeddingModel', value);
|
||||||
} else if (key === 'systemInstructions') {
|
} else if (key === 'systemInstructions') {
|
||||||
localStorage.setItem('systemInstructions', value);
|
localStorage.setItem('systemInstructions', value);
|
||||||
|
} else if (key === 'temperatureUnit') {
|
||||||
|
localStorage.setItem('temperatureUnit', value.toString());
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to save:', err);
|
console.error('Failed to save:', err);
|
||||||
@@ -415,13 +421,35 @@ const Page = () => {
|
|||||||
) : (
|
) : (
|
||||||
config && (
|
config && (
|
||||||
<div className="flex flex-col space-y-6 pb-28 lg:pb-8">
|
<div className="flex flex-col space-y-6 pb-28 lg:pb-8">
|
||||||
<SettingsSection title="Appearance">
|
<SettingsSection title="Preferences">
|
||||||
<div className="flex flex-col space-y-1">
|
<div className="flex flex-col space-y-1">
|
||||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
Theme
|
Theme
|
||||||
</p>
|
</p>
|
||||||
<ThemeSwitcher />
|
<ThemeSwitcher />
|
||||||
</div>
|
</div>
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
Temperature Unit
|
||||||
|
</p>
|
||||||
|
<Select
|
||||||
|
value={temperatureUnit ?? undefined}
|
||||||
|
onChange={(e) => {
|
||||||
|
setTemperatureUnit(e.target.value as 'C' | 'F');
|
||||||
|
saveConfig('temperatureUnit', e.target.value);
|
||||||
|
}}
|
||||||
|
options={[
|
||||||
|
{
|
||||||
|
label: 'Celsius',
|
||||||
|
value: 'C',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: 'Fahrenheit',
|
||||||
|
value: 'F',
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</SettingsSection>
|
</SettingsSection>
|
||||||
|
|
||||||
<SettingsSection title="Automatic Search">
|
<SettingsSection title="Automatic Search">
|
||||||
@@ -515,7 +543,7 @@ const Page = () => {
|
|||||||
<SettingsSection title="System Instructions">
|
<SettingsSection title="System Instructions">
|
||||||
<div className="flex flex-col space-y-4">
|
<div className="flex flex-col space-y-4">
|
||||||
<Textarea
|
<Textarea
|
||||||
value={systemInstructions}
|
value={systemInstructions ?? undefined}
|
||||||
isSaving={savingStates['systemInstructions']}
|
isSaving={savingStates['systemInstructions']}
|
||||||
onChange={(e) => {
|
onChange={(e) => {
|
||||||
setSystemInstructions(e.target.value);
|
setSystemInstructions(e.target.value);
|
||||||
@@ -862,6 +890,25 @@ const Page = () => {
|
|||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
AI/ML API Key
|
||||||
|
</p>
|
||||||
|
<Input
|
||||||
|
type="text"
|
||||||
|
placeholder="AI/ML API Key"
|
||||||
|
value={config.aimlApiKey}
|
||||||
|
isSaving={savingStates['aimlApiKey']}
|
||||||
|
onChange={(e) => {
|
||||||
|
setConfig((prev) => ({
|
||||||
|
...prev!,
|
||||||
|
aimlApiKey: e.target.value,
|
||||||
|
}));
|
||||||
|
}}
|
||||||
|
onSave={(value) => saveConfig('aimlApiKey', value)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div className="flex flex-col space-y-1">
|
<div className="flex flex-col space-y-1">
|
||||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
LM Studio API URL
|
LM Studio API URL
|
||||||
|
@@ -82,14 +82,29 @@ const checkConfig = async (
|
|||||||
) {
|
) {
|
||||||
if (!chatModel || !chatModelProvider) {
|
if (!chatModel || !chatModelProvider) {
|
||||||
const chatModelProviders = providers.chatModelProviders;
|
const chatModelProviders = providers.chatModelProviders;
|
||||||
|
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
||||||
|
|
||||||
|
if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
|
||||||
|
return toast.error('No chat models available');
|
||||||
|
} else {
|
||||||
chatModelProvider =
|
chatModelProvider =
|
||||||
chatModelProvider || Object.keys(chatModelProviders)[0];
|
chatModelProvidersKeys.find(
|
||||||
|
(provider) =>
|
||||||
|
Object.keys(chatModelProviders[provider]).length > 0,
|
||||||
|
) || chatModelProvidersKeys[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
chatModelProvider === 'custom_openai' &&
|
||||||
|
Object.keys(chatModelProviders[chatModelProvider]).length === 0
|
||||||
|
) {
|
||||||
|
toast.error(
|
||||||
|
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
|
||||||
|
);
|
||||||
|
return setHasError(true);
|
||||||
|
}
|
||||||
|
|
||||||
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||||
|
|
||||||
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
|
|
||||||
return toast.error('No chat models available');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!embeddingModel || !embeddingModelProvider) {
|
if (!embeddingModel || !embeddingModelProvider) {
|
||||||
@@ -117,7 +132,8 @@ const checkConfig = async (
|
|||||||
|
|
||||||
if (
|
if (
|
||||||
Object.keys(chatModelProviders).length > 0 &&
|
Object.keys(chatModelProviders).length > 0 &&
|
||||||
!chatModelProviders[chatModelProvider]
|
(!chatModelProviders[chatModelProvider] ||
|
||||||
|
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
|
||||||
) {
|
) {
|
||||||
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
||||||
chatModelProvider =
|
chatModelProvider =
|
||||||
@@ -132,6 +148,16 @@ const checkConfig = async (
|
|||||||
chatModelProvider &&
|
chatModelProvider &&
|
||||||
!chatModelProviders[chatModelProvider][chatModel]
|
!chatModelProviders[chatModelProvider][chatModel]
|
||||||
) {
|
) {
|
||||||
|
if (
|
||||||
|
chatModelProvider === 'custom_openai' &&
|
||||||
|
Object.keys(chatModelProviders[chatModelProvider]).length === 0
|
||||||
|
) {
|
||||||
|
toast.error(
|
||||||
|
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
|
||||||
|
);
|
||||||
|
return setHasError(true);
|
||||||
|
}
|
||||||
|
|
||||||
chatModel = Object.keys(
|
chatModel = Object.keys(
|
||||||
chatModelProviders[
|
chatModelProviders[
|
||||||
Object.keys(chatModelProviders[chatModelProvider]).length > 0
|
Object.keys(chatModelProviders[chatModelProvider]).length > 0
|
||||||
@@ -139,6 +165,7 @@ const checkConfig = async (
|
|||||||
: Object.keys(chatModelProviders)[0]
|
: Object.keys(chatModelProviders)[0]
|
||||||
],
|
],
|
||||||
)[0];
|
)[0];
|
||||||
|
|
||||||
localStorage.setItem('chatModel', chatModel);
|
localStorage.setItem('chatModel', chatModel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,6 +1,5 @@
|
|||||||
import { Settings } from 'lucide-react';
|
import { Settings } from 'lucide-react';
|
||||||
import EmptyChatMessageInput from './EmptyChatMessageInput';
|
import EmptyChatMessageInput from './EmptyChatMessageInput';
|
||||||
import { useEffect, useState } from 'react';
|
|
||||||
import { File } from './ChatWindow';
|
import { File } from './ChatWindow';
|
||||||
import Link from 'next/link';
|
import Link from 'next/link';
|
||||||
import WeatherWidget from './WeatherWidget';
|
import WeatherWidget from './WeatherWidget';
|
||||||
@@ -34,7 +33,8 @@ const EmptyChat = ({
|
|||||||
<Settings className="cursor-pointer lg:hidden" />
|
<Settings className="cursor-pointer lg:hidden" />
|
||||||
</Link>
|
</Link>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-8">
|
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-4">
|
||||||
|
<div className="flex flex-col items-center justify-center w-full space-y-8">
|
||||||
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
|
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
|
||||||
Research begins here.
|
Research begins here.
|
||||||
</h2>
|
</h2>
|
||||||
@@ -49,11 +49,12 @@ const EmptyChat = ({
|
|||||||
files={files}
|
files={files}
|
||||||
setFiles={setFiles}
|
setFiles={setFiles}
|
||||||
/>
|
/>
|
||||||
|
</div>
|
||||||
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
|
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
|
||||||
<div className="flex-1 max-w-xs">
|
<div className="flex-1 w-full">
|
||||||
<WeatherWidget />
|
<WeatherWidget />
|
||||||
</div>
|
</div>
|
||||||
<div className="flex-1 max-w-xs">
|
<div className="flex-1 w-full">
|
||||||
<NewsArticleWidget />
|
<NewsArticleWidget />
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
@@ -9,7 +9,9 @@ const WeatherWidget = () => {
|
|||||||
humidity: 0,
|
humidity: 0,
|
||||||
windSpeed: 0,
|
windSpeed: 0,
|
||||||
icon: '',
|
icon: '',
|
||||||
|
temperatureUnit: 'C',
|
||||||
});
|
});
|
||||||
|
|
||||||
const [loading, setLoading] = useState(true);
|
const [loading, setLoading] = useState(true);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@@ -31,30 +33,40 @@ const WeatherWidget = () => {
|
|||||||
city: string;
|
city: string;
|
||||||
}) => void,
|
}) => void,
|
||||||
) => {
|
) => {
|
||||||
/*
|
|
||||||
// Geolocation doesn't give city so we'll country using ipapi for now
|
|
||||||
if (navigator.geolocation) {
|
if (navigator.geolocation) {
|
||||||
const result = await navigator.permissions.query({
|
const result = await navigator.permissions.query({
|
||||||
name: 'geolocation',
|
name: 'geolocation',
|
||||||
})
|
});
|
||||||
|
|
||||||
if (result.state === 'granted') {
|
if (result.state === 'granted') {
|
||||||
navigator.geolocation.getCurrentPosition(position => {
|
navigator.geolocation.getCurrentPosition(async (position) => {
|
||||||
|
const res = await fetch(
|
||||||
|
`https://api-bdc.io/data/reverse-geocode-client?latitude=${position.coords.latitude}&longitude=${position.coords.longitude}&localityLanguage=en`,
|
||||||
|
{
|
||||||
|
method: 'GET',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const data = await res.json();
|
||||||
|
|
||||||
callback({
|
callback({
|
||||||
latitude: position.coords.latitude,
|
latitude: position.coords.latitude,
|
||||||
longitude: position.coords.longitude,
|
longitude: position.coords.longitude,
|
||||||
})
|
city: data.locality,
|
||||||
})
|
});
|
||||||
|
});
|
||||||
} else if (result.state === 'prompt') {
|
} else if (result.state === 'prompt') {
|
||||||
callback(await getApproxLocation())
|
callback(await getApproxLocation());
|
||||||
navigator.geolocation.getCurrentPosition(position => {})
|
navigator.geolocation.getCurrentPosition((position) => {});
|
||||||
} else if (result.state === 'denied') {
|
} else if (result.state === 'denied') {
|
||||||
callback(await getApproxLocation())
|
callback(await getApproxLocation());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
callback(await getApproxLocation())
|
|
||||||
} */
|
|
||||||
callback(await getApproxLocation());
|
callback(await getApproxLocation());
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
getLocation(async (location) => {
|
getLocation(async (location) => {
|
||||||
@@ -63,6 +75,7 @@ const WeatherWidget = () => {
|
|||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
lat: location.latitude,
|
lat: location.latitude,
|
||||||
lng: location.longitude,
|
lng: location.longitude,
|
||||||
|
temperatureUnit: localStorage.getItem('temperatureUnit') ?? 'C',
|
||||||
}),
|
}),
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -81,6 +94,7 @@ const WeatherWidget = () => {
|
|||||||
humidity: data.humidity,
|
humidity: data.humidity,
|
||||||
windSpeed: data.windSpeed,
|
windSpeed: data.windSpeed,
|
||||||
icon: data.icon,
|
icon: data.icon,
|
||||||
|
temperatureUnit: data.temperatureUnit,
|
||||||
});
|
});
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
});
|
});
|
||||||
@@ -115,7 +129,7 @@ const WeatherWidget = () => {
|
|||||||
className="h-10 w-auto"
|
className="h-10 w-auto"
|
||||||
/>
|
/>
|
||||||
<span className="text-base font-semibold text-black dark:text-white">
|
<span className="text-base font-semibold text-black dark:text-white">
|
||||||
{data.temperature}°C
|
{data.temperature}°{data.temperatureUnit}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex flex-col justify-between flex-1 h-full py-1">
|
<div className="flex flex-col justify-between flex-1 h-full py-1">
|
||||||
|
@@ -35,6 +35,9 @@ interface Config {
|
|||||||
DEEPSEEK: {
|
DEEPSEEK: {
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
};
|
};
|
||||||
|
AIMLAPI: {
|
||||||
|
API_KEY: string;
|
||||||
|
};
|
||||||
LM_STUDIO: {
|
LM_STUDIO: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
};
|
};
|
||||||
@@ -85,6 +88,8 @@ export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
|
|||||||
|
|
||||||
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
|
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
|
||||||
|
|
||||||
|
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
|
||||||
|
|
||||||
export const getCustomOpenaiApiKey = () =>
|
export const getCustomOpenaiApiKey = () =>
|
||||||
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
||||||
|
|
||||||
|
@@ -1,63 +1,41 @@
|
|||||||
export const webSearchRetrieverPrompt = `
|
export const webSearchRetrieverPrompt = `
|
||||||
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
|
You are an AI question rephraser. You will be given a conversation and a follow-up question; rephrase it into a standalone question that another LLM can use to search the web.
|
||||||
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
|
|
||||||
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
|
|
||||||
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
|
|
||||||
|
|
||||||
There are several examples attached for your reference inside the below \`examples\` XML block
|
Return ONLY a JSON object that matches this schema:
|
||||||
|
query: string // the standalone question (or "summarize")
|
||||||
|
links: string[] // URLs extracted from the user query (empty if none)
|
||||||
|
searchRequired: boolean // true if web search is needed, false for greetings/simple writing tasks
|
||||||
|
searchMode: "" | "normal" | "news" // "" when searchRequired is false; "news" if the user asks for news/articles, otherwise "normal"
|
||||||
|
|
||||||
<examples>
|
Rules
|
||||||
1. Follow up question: What is the capital of France
|
- Greetings / simple writing tasks → query:"", links:[], searchRequired:false, searchMode:""
|
||||||
Rephrased question:\`
|
- Summarizing a URL → query:"summarize", links:[url...], searchRequired:true, searchMode:"normal"
|
||||||
<question>
|
- Asking for news/articles → searchMode:"news"
|
||||||
Capital of france
|
|
||||||
</question>
|
Examples
|
||||||
\`
|
1. Follow-up: What is the capital of France?
|
||||||
|
"query":"capital of France","links":[],"searchRequired":true,"searchMode":"normal"
|
||||||
|
|
||||||
2. Hi, how are you?
|
2. Hi, how are you?
|
||||||
Rephrased question\`
|
"query":"","links":[],"searchRequired":false,"searchMode":""
|
||||||
<question>
|
|
||||||
not_needed
|
|
||||||
</question>
|
|
||||||
\`
|
|
||||||
|
|
||||||
3. Follow up question: What is Docker?
|
3. Follow-up: What is Docker?
|
||||||
Rephrased question: \`
|
"query":"what is Docker","links":[],"searchRequired":true,"searchMode":"normal"
|
||||||
<question>
|
|
||||||
What is Docker
|
|
||||||
</question>
|
|
||||||
\`
|
|
||||||
|
|
||||||
4. Follow up question: Can you tell me what is X from https://example.com
|
4. Follow-up: Can you tell me what is X from https://example.com?
|
||||||
Rephrased question: \`
|
"query":"what is X","links":["https://example.com"],"searchRequired":true,"searchMode":"normal"
|
||||||
<question>
|
|
||||||
Can you tell me what is X?
|
|
||||||
</question>
|
|
||||||
|
|
||||||
<links>
|
5. Follow-up: Summarize the content from https://example.com
|
||||||
https://example.com
|
"query":"summarize","links":["https://example.com"],"searchRequired":true,"searchMode":"normal"
|
||||||
</links>
|
|
||||||
\`
|
|
||||||
|
|
||||||
5. Follow up question: Summarize the content from https://example.com
|
6. Follow-up: Latest news about AI
|
||||||
Rephrased question: \`
|
"query":"latest news about AI","links":[],"searchRequired":true,"searchMode":"news"
|
||||||
<question>
|
|
||||||
summarize
|
|
||||||
</question>
|
|
||||||
|
|
||||||
<links>
|
|
||||||
https://example.com
|
|
||||||
</links>
|
|
||||||
\`
|
|
||||||
</examples>
|
|
||||||
|
|
||||||
Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
|
|
||||||
|
|
||||||
<conversation>
|
<conversation>
|
||||||
{chat_history}
|
{chat_history}
|
||||||
</conversation>
|
</conversation>
|
||||||
|
|
||||||
Follow up question: {query}
|
Follow-up question: {query}
|
||||||
Rephrased question:
|
Rephrased question:
|
||||||
`;
|
`;
|
||||||
|
|
||||||
|
94
src/lib/providers/aimlapi.ts
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||||
|
import { getAimlApiKey } from '../config';
|
||||||
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'aimlapi',
|
||||||
|
displayName: 'AI/ML API',
|
||||||
|
};
|
||||||
|
|
||||||
|
interface AimlApiModel {
|
||||||
|
id: string;
|
||||||
|
name?: string;
|
||||||
|
type?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const API_URL = 'https://api.aimlapi.com';
|
||||||
|
|
||||||
|
export const loadAimlApiChatModels = async () => {
|
||||||
|
const apiKey = getAimlApiKey();
|
||||||
|
|
||||||
|
if (!apiKey) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${API_URL}/models`, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
Authorization: `Bearer ${apiKey}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const chatModels: Record<string, ChatModel> = {};
|
||||||
|
|
||||||
|
response.data.data.forEach((model: AimlApiModel) => {
|
||||||
|
if (model.type === 'chat-completion') {
|
||||||
|
chatModels[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new ChatOpenAI({
|
||||||
|
apiKey: apiKey,
|
||||||
|
modelName: model.id,
|
||||||
|
temperature: 0.7,
|
||||||
|
configuration: {
|
||||||
|
baseURL: API_URL,
|
||||||
|
},
|
||||||
|
}) as unknown as BaseChatModel,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading AI/ML API models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadAimlApiEmbeddingModels = async () => {
|
||||||
|
const apiKey = getAimlApiKey();
|
||||||
|
|
||||||
|
if (!apiKey) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await axios.get(`${API_URL}/models`, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
Authorization: `Bearer ${apiKey}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||||
|
|
||||||
|
response.data.data.forEach((model: AimlApiModel) => {
|
||||||
|
if (model.type === 'embedding') {
|
||||||
|
embeddingModels[model.id] = {
|
||||||
|
displayName: model.name || model.id,
|
||||||
|
model: new OpenAIEmbeddings({
|
||||||
|
apiKey: apiKey,
|
||||||
|
modelName: model.id,
|
||||||
|
configuration: {
|
||||||
|
baseURL: API_URL,
|
||||||
|
},
|
||||||
|
}) as unknown as Embeddings,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return embeddingModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading AI/ML API embeddings models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
@@ -31,7 +31,7 @@ export const loadDeepseekChatModels = async () => {
|
|||||||
chatModels[model.key] = {
|
chatModels[model.key] = {
|
||||||
displayName: model.displayName,
|
displayName: model.displayName,
|
||||||
model: new ChatOpenAI({
|
model: new ChatOpenAI({
|
||||||
openAIApiKey: deepseekApiKey,
|
apiKey: deepseekApiKey,
|
||||||
modelName: model.key,
|
modelName: model.key,
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
|
@@ -13,9 +13,17 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|||||||
import { Embeddings } from '@langchain/core/embeddings';
|
import { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
const geminiChatModels: Record<string, string>[] = [
|
const geminiChatModels: Record<string, string>[] = [
|
||||||
|
{
|
||||||
|
displayName: 'Gemini 2.5 Flash Preview 05-20',
|
||||||
|
key: 'gemini-2.5-flash-preview-05-20',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Gemini 2.5 Pro Preview',
|
||||||
|
key: 'gemini-2.5-pro-preview-05-06',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
displayName: 'Gemini 2.5 Pro Experimental',
|
displayName: 'Gemini 2.5 Pro Experimental',
|
||||||
key: 'gemini-2.5-pro-exp-03-25',
|
key: 'gemini-2.5-pro-preview-05-06',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
displayName: 'Gemini 2.0 Flash',
|
displayName: 'Gemini 2.0 Flash',
|
||||||
|
@@ -29,12 +29,15 @@ export const loadGroqChatModels = async () => {
|
|||||||
chatModels[model.id] = {
|
chatModels[model.id] = {
|
||||||
displayName: model.id,
|
displayName: model.id,
|
||||||
model: new ChatOpenAI({
|
model: new ChatOpenAI({
|
||||||
openAIApiKey: groqApiKey,
|
apiKey: groqApiKey,
|
||||||
modelName: model.id,
|
modelName: model.id,
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
baseURL: 'https://api.groq.com/openai/v1',
|
baseURL: 'https://api.groq.com/openai/v1',
|
||||||
},
|
},
|
||||||
|
metadata: {
|
||||||
|
'model-type': 'groq',
|
||||||
|
},
|
||||||
}) as unknown as BaseChatModel,
|
}) as unknown as BaseChatModel,
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
@@ -35,6 +35,11 @@ import {
|
|||||||
loadDeepseekChatModels,
|
loadDeepseekChatModels,
|
||||||
PROVIDER_INFO as DeepseekInfo,
|
PROVIDER_INFO as DeepseekInfo,
|
||||||
} from './deepseek';
|
} from './deepseek';
|
||||||
|
import {
|
||||||
|
loadAimlApiChatModels,
|
||||||
|
loadAimlApiEmbeddingModels,
|
||||||
|
PROVIDER_INFO as AimlApiInfo,
|
||||||
|
} from './aimlapi';
|
||||||
import {
|
import {
|
||||||
loadLMStudioChatModels,
|
loadLMStudioChatModels,
|
||||||
loadLMStudioEmbeddingsModels,
|
loadLMStudioEmbeddingsModels,
|
||||||
@@ -49,6 +54,7 @@ export const PROVIDER_METADATA = {
|
|||||||
gemini: GeminiInfo,
|
gemini: GeminiInfo,
|
||||||
transformers: TransformersInfo,
|
transformers: TransformersInfo,
|
||||||
deepseek: DeepseekInfo,
|
deepseek: DeepseekInfo,
|
||||||
|
aimlapi: AimlApiInfo,
|
||||||
lmstudio: LMStudioInfo,
|
lmstudio: LMStudioInfo,
|
||||||
custom_openai: {
|
custom_openai: {
|
||||||
key: 'custom_openai',
|
key: 'custom_openai',
|
||||||
@@ -76,6 +82,7 @@ export const chatModelProviders: Record<
|
|||||||
anthropic: loadAnthropicChatModels,
|
anthropic: loadAnthropicChatModels,
|
||||||
gemini: loadGeminiChatModels,
|
gemini: loadGeminiChatModels,
|
||||||
deepseek: loadDeepseekChatModels,
|
deepseek: loadDeepseekChatModels,
|
||||||
|
aimlapi: loadAimlApiChatModels,
|
||||||
lmstudio: loadLMStudioChatModels,
|
lmstudio: loadLMStudioChatModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -87,6 +94,7 @@ export const embeddingModelProviders: Record<
|
|||||||
ollama: loadOllamaEmbeddingModels,
|
ollama: loadOllamaEmbeddingModels,
|
||||||
gemini: loadGeminiEmbeddingModels,
|
gemini: loadGeminiEmbeddingModels,
|
||||||
transformers: loadTransformersEmbeddingsModels,
|
transformers: loadTransformersEmbeddingsModels,
|
||||||
|
aimlapi: loadAimlApiEmbeddingModels,
|
||||||
lmstudio: loadLMStudioEmbeddingsModels,
|
lmstudio: loadLMStudioEmbeddingsModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -110,7 +118,7 @@ export const getAvailableChatModelProviders = async () => {
|
|||||||
[customOpenAiModelName]: {
|
[customOpenAiModelName]: {
|
||||||
displayName: customOpenAiModelName,
|
displayName: customOpenAiModelName,
|
||||||
model: new ChatOpenAI({
|
model: new ChatOpenAI({
|
||||||
openAIApiKey: customOpenAiApiKey,
|
apiKey: customOpenAiApiKey,
|
||||||
modelName: customOpenAiModelName,
|
modelName: customOpenAiModelName,
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
configuration: {
|
configuration: {
|
||||||
|
@@ -47,7 +47,7 @@ export const loadLMStudioChatModels = async () => {
|
|||||||
chatModels[model.id] = {
|
chatModels[model.id] = {
|
||||||
displayName: model.name || model.id,
|
displayName: model.name || model.id,
|
||||||
model: new ChatOpenAI({
|
model: new ChatOpenAI({
|
||||||
openAIApiKey: 'lm-studio',
|
apiKey: 'lm-studio',
|
||||||
configuration: {
|
configuration: {
|
||||||
baseURL: ensureV1Endpoint(endpoint),
|
baseURL: ensureV1Endpoint(endpoint),
|
||||||
},
|
},
|
||||||
@@ -83,7 +83,7 @@ export const loadLMStudioEmbeddingsModels = async () => {
|
|||||||
embeddingsModels[model.id] = {
|
embeddingsModels[model.id] = {
|
||||||
displayName: model.name || model.id,
|
displayName: model.name || model.id,
|
||||||
model: new OpenAIEmbeddings({
|
model: new OpenAIEmbeddings({
|
||||||
openAIApiKey: 'lm-studio',
|
apiKey: 'lm-studio',
|
||||||
configuration: {
|
configuration: {
|
||||||
baseURL: ensureV1Endpoint(endpoint),
|
baseURL: ensureV1Endpoint(endpoint),
|
||||||
},
|
},
|
||||||
|
@@ -6,8 +6,8 @@ export const PROVIDER_INFO = {
|
|||||||
key: 'ollama',
|
key: 'ollama',
|
||||||
displayName: 'Ollama',
|
displayName: 'Ollama',
|
||||||
};
|
};
|
||||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
import { ChatOllama } from '@langchain/ollama';
|
||||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
import { OllamaEmbeddings } from '@langchain/ollama';
|
||||||
|
|
||||||
export const loadOllamaChatModels = async () => {
|
export const loadOllamaChatModels = async () => {
|
||||||
const ollamaApiEndpoint = getOllamaApiEndpoint();
|
const ollamaApiEndpoint = getOllamaApiEndpoint();
|
||||||
|
@@ -67,7 +67,7 @@ export const loadOpenAIChatModels = async () => {
|
|||||||
chatModels[model.key] = {
|
chatModels[model.key] = {
|
||||||
displayName: model.displayName,
|
displayName: model.displayName,
|
||||||
model: new ChatOpenAI({
|
model: new ChatOpenAI({
|
||||||
openAIApiKey: openaiApiKey,
|
apiKey: openaiApiKey,
|
||||||
modelName: model.key,
|
modelName: model.key,
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
}) as unknown as BaseChatModel,
|
}) as unknown as BaseChatModel,
|
||||||
@@ -93,7 +93,7 @@ export const loadOpenAIEmbeddingModels = async () => {
|
|||||||
embeddingModels[model.key] = {
|
embeddingModels[model.key] = {
|
||||||
displayName: model.displayName,
|
displayName: model.displayName,
|
||||||
model: new OpenAIEmbeddings({
|
model: new OpenAIEmbeddings({
|
||||||
openAIApiKey: openaiApiKey,
|
apiKey: openaiApiKey,
|
||||||
modelName: model.key,
|
modelName: model.key,
|
||||||
}) as unknown as Embeddings,
|
}) as unknown as Embeddings,
|
||||||
};
|
};
|
||||||
|
@@ -24,6 +24,7 @@ import computeSimilarity from '../utils/computeSimilarity';
|
|||||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||||
import eventEmitter from 'events';
|
import eventEmitter from 'events';
|
||||||
import { StreamEvent } from '@langchain/core/tracers/log_stream';
|
import { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
export interface MetaSearchAgentType {
|
export interface MetaSearchAgentType {
|
||||||
searchAndAnswer: (
|
searchAndAnswer: (
|
||||||
@@ -52,6 +53,17 @@ type BasicChainInput = {
|
|||||||
query: string;
|
query: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const retrieverLLMOutputSchema = z.object({
|
||||||
|
query: z.string().describe('The query to search the web for.'),
|
||||||
|
links: z
|
||||||
|
.array(z.string())
|
||||||
|
.describe('The links to search/summarize if present'),
|
||||||
|
searchRequired: z
|
||||||
|
.boolean()
|
||||||
|
.describe('Wether there is a need to search the web'),
|
||||||
|
searchMode: z.enum(['', 'normal', 'news']).describe('The search mode.'),
|
||||||
|
});
|
||||||
|
|
||||||
class MetaSearchAgent implements MetaSearchAgentType {
|
class MetaSearchAgent implements MetaSearchAgentType {
|
||||||
private config: Config;
|
private config: Config;
|
||||||
private strParser = new StringOutputParser();
|
private strParser = new StringOutputParser();
|
||||||
@@ -62,26 +74,24 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
|
|
||||||
private async createSearchRetrieverChain(llm: BaseChatModel) {
|
private async createSearchRetrieverChain(llm: BaseChatModel) {
|
||||||
(llm as unknown as ChatOpenAI).temperature = 0;
|
(llm as unknown as ChatOpenAI).temperature = 0;
|
||||||
|
|
||||||
return RunnableSequence.from([
|
return RunnableSequence.from([
|
||||||
PromptTemplate.fromTemplate(this.config.queryGeneratorPrompt),
|
PromptTemplate.fromTemplate(this.config.queryGeneratorPrompt),
|
||||||
|
Object.assign(
|
||||||
|
Object.create(Object.getPrototypeOf(llm)),
|
||||||
llm,
|
llm,
|
||||||
this.strParser,
|
).withStructuredOutput(retrieverLLMOutputSchema, {
|
||||||
RunnableLambda.from(async (input: string) => {
|
...(llm.metadata?.['model-type'] === 'groq'
|
||||||
const linksOutputParser = new LineListOutputParser({
|
? {
|
||||||
key: 'links',
|
method: 'json-object',
|
||||||
});
|
}
|
||||||
|
: {}),
|
||||||
|
}),
|
||||||
|
RunnableLambda.from(
|
||||||
|
async (input: z.infer<typeof retrieverLLMOutputSchema>) => {
|
||||||
|
let question = input.query;
|
||||||
|
const links = input.links;
|
||||||
|
|
||||||
const questionOutputParser = new LineOutputParser({
|
if (!input.searchRequired) {
|
||||||
key: 'question',
|
|
||||||
});
|
|
||||||
|
|
||||||
const links = await linksOutputParser.parse(input);
|
|
||||||
let question = this.config.summarizer
|
|
||||||
? await questionOutputParser.parse(input)
|
|
||||||
: input;
|
|
||||||
|
|
||||||
if (question === 'not_needed') {
|
|
||||||
return { query: '', docs: [] };
|
return { query: '', docs: [] };
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,7 +217,10 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
|
|
||||||
const res = await searchSearxng(question, {
|
const res = await searchSearxng(question, {
|
||||||
language: 'en',
|
language: 'en',
|
||||||
engines: this.config.activeEngines,
|
engines:
|
||||||
|
input.searchMode === 'normal'
|
||||||
|
? this.config.activeEngines
|
||||||
|
: ['bing news'],
|
||||||
});
|
});
|
||||||
|
|
||||||
const documents = res.results.map(
|
const documents = res.results.map(
|
||||||
@@ -228,7 +241,8 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
|||||||
|
|
||||||
return { query: question, docs: documents };
|
return { query: question, docs: documents };
|
||||||
}
|
}
|
||||||
}),
|
},
|
||||||
|
),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|