18 Commits

Author SHA1 Message Date
ItzCrazyKns
f88f179920 feat(package): bump version 2024-05-06 20:01:57 +05:30
ItzCrazyKns
4cb0aeeee3 feat(settings): conditionally pick selected models 2024-05-06 20:00:56 +05:30
ItzCrazyKns
e8fe74ae7c feat(ws-managers): implement better error handling 2024-05-06 19:59:13 +05:30
ItzCrazyKns
ed47191d9b feat(readme): update readme 2024-05-06 13:00:07 +05:30
ItzCrazyKns
b4d787d333 feat(readme): add troubleshooting 2024-05-06 12:58:40 +05:30
ItzCrazyKns
38b1995677 feat(package): bump version 2024-05-06 12:36:13 +05:30
ItzCrazyKns
f28257b480 feat(settings): fetch localStorage at state change 2024-05-06 12:34:59 +05:30
ItzCrazyKns
9b088cd161 feat(package): bump version 2024-05-05 16:35:06 +05:30
ItzCrazyKns
94ea6c372a feat(chat-window): clear storage after error 2024-05-05 16:29:40 +05:30
ItzCrazyKns
6e61c88c9e feat(error-object): add key 2024-05-05 16:28:46 +05:30
ItzCrazyKns
ba7b92ffde feat(providers): add Content-Type header 2024-05-05 10:53:27 +05:30
ItzCrazyKns
f8fd2a6fb0 feat(package): bump version 2024-05-04 15:04:43 +05:30
ItzCrazyKns
0440a810f5 feat(http-headers): add Content-Type 2024-05-04 15:01:53 +05:30
ItzCrazyKns
e3fef3a1be feat(chat-window): add error handling 2024-05-04 14:56:54 +05:30
ItzCrazyKns
4bf69dfdda feat(package): bump version 2024-05-04 10:59:32 +05:30
ItzCrazyKns
9f45ecb98d feat(providers): separate embedding providers, add custom-openai provider 2024-05-04 10:51:06 +05:30
ItzCrazyKns
c710f4f88c feat(message-box): fix bugs 2024-05-04 10:48:42 +05:30
ItzCrazyKns
79f6a52b5b feat(ui-packages): add react-text-to-speech, bump version 2024-05-03 21:16:48 +05:30
16 changed files with 525 additions and 120 deletions

View File

@ -10,6 +10,7 @@
- [Installation](#installation)
- [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended)
- [Non-Docker Installation](#non-docker-installation)
- [Ollama connection errors](#ollama-connection-errors)
- [One-Click Deployment](#one-click-deployment)
- [Upcoming Features](#upcoming-features)
- [Support Us](#support-us)
@ -90,6 +91,16 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
#### Ollama connection errors
If you're facing an Ollama connection error, it is often related to the backend not being able to connect to Ollama's API. How can you fix it? You can fix it by updating your Ollama API URL in the settings menu to the following:
On Windows: `http://host.docker.internal:11434`<br>
On Mac: `http://host.docker.internal:11434`<br>
On Linux: `http://private_ip_of_computer_hosting_ollama:11434`
You need to edit the ports accordingly.
## One-Click Deployment
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)

View File

@ -1,6 +1,6 @@
{
"name": "perplexica-backend",
"version": "1.2.0",
"version": "1.3.4",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@ -29,7 +29,6 @@
"dotenv": "^16.4.5",
"express": "^4.19.2",
"langchain": "^0.1.30",
"react-text-to-speech": "^0.14.5",
"winston": "^3.13.0",
"ws": "^8.16.0",
"zod": "^3.22.4"

View File

@ -8,7 +8,7 @@ import {
} from '../config';
import logger from '../utils/logger';
export const getAvailableProviders = async () => {
export const getAvailableChatModelProviders = async () => {
const openAIApiKey = getOpenaiApiKey();
const groqApiKey = getGroqApiKey();
const ollamaEndpoint = getOllamaApiEndpoint();
@ -33,10 +33,6 @@ export const getAvailableProviders = async () => {
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
embeddings: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
};
} catch (err) {
logger.error(`Error loading OpenAI models: ${err}`);
@ -86,10 +82,6 @@ export const getAvailableProviders = async () => {
baseURL: 'https://api.groq.com/openai/v1',
},
),
embeddings: new OpenAIEmbeddings({
openAIApiKey: openAIApiKey,
modelName: 'text-embedding-3-large',
}),
};
} catch (err) {
logger.error(`Error loading Groq models: ${err}`);
@ -98,7 +90,11 @@ export const getAvailableProviders = async () => {
if (ollamaEndpoint) {
try {
const response = await fetch(`${ollamaEndpoint}/api/tags`);
const response = await fetch(`${ollamaEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models: ollamaModels } = (await response.json()) as any;
@ -110,17 +106,60 @@ export const getAvailableProviders = async () => {
});
return acc;
}, {});
if (Object.keys(models['ollama']).length > 0) {
models['ollama']['embeddings'] = new OllamaEmbeddings({
baseUrl: ollamaEndpoint,
model: models['ollama'][Object.keys(models['ollama'])[0]].model,
});
}
} catch (err) {
logger.error(`Error loading Ollama models: ${err}`);
}
}
models['custom_openai'] = {};
return models;
};
export const getAvailableEmbeddingModelProviders = async () => {
const openAIApiKey = getOpenaiApiKey();
const ollamaEndpoint = getOllamaApiEndpoint();
const models = {};
if (openAIApiKey) {
try {
models['openai'] = {
'Text embedding 3 small': new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-small',
}),
'Text embedding 3 large': new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
};
} catch (err) {
logger.error(`Error loading OpenAI embeddings: ${err}`);
}
}
if (ollamaEndpoint) {
try {
const response = await fetch(`${ollamaEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models: ollamaModels } = (await response.json()) as any;
models['ollama'] = ollamaModels.reduce((acc, model) => {
acc[model.model] = new OllamaEmbeddings({
baseUrl: ollamaEndpoint,
model: model.model,
});
return acc;
}, {});
} catch (err) {
logger.error(`Error loading Ollama embeddings: ${err}`);
}
}
return models;
};

View File

@ -1,5 +1,8 @@
import express from 'express';
import { getAvailableProviders } from '../lib/providers';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import {
getGroqApiKey,
getOllamaApiEndpoint,
@ -12,16 +15,24 @@ const router = express.Router();
router.get('/', async (_, res) => {
const config = {};
const providers = await getAvailableProviders();
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
for (const provider in providers) {
delete providers[provider]['embeddings'];
config['chatModelProviders'] = {};
config['embeddingModelProviders'] = {};
for (const provider in chatModelProviders) {
config['chatModelProviders'][provider] = Object.keys(
chatModelProviders[provider],
);
}
config['providers'] = {};
for (const provider in providers) {
config['providers'][provider] = Object.keys(providers[provider]);
for (const provider in embeddingModelProviders) {
config['embeddingModelProviders'][provider] = Object.keys(
embeddingModelProviders[provider],
);
}
config['openaiApiKey'] = getOpenaiApiKey();

View File

@ -1,7 +1,7 @@
import express from 'express';
import handleImageSearch from '../agents/imageSearchAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableProviders } from '../lib/providers';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
@ -19,7 +19,7 @@ router.post('/', async (req, res) => {
}
});
const chatModels = await getAvailableProviders();
const chatModels = await getAvailableChatModelProviders();
const provider = chat_model_provider || Object.keys(chatModels)[0];
const chatModel = chat_model || Object.keys(chatModels[provider])[0];

View File

@ -1,14 +1,20 @@
import express from 'express';
import logger from '../utils/logger';
import { getAvailableProviders } from '../lib/providers';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const providers = await getAvailableProviders();
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
res.status(200).json({ providers });
res.status(200).json({ chatModelProviders, embeddingModelProviders });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(err.message);

View File

@ -1,6 +1,6 @@
import express from 'express';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableProviders } from '../lib/providers';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import handleVideoSearch from '../agents/videoSearchAgent';
@ -19,7 +19,7 @@ router.post('/', async (req, res) => {
}
});
const chatModels = await getAvailableProviders();
const chatModels = await getAvailableChatModelProviders();
const provider = chat_model_provider || Object.keys(chatModels)[0];
const chatModel = chat_model || Object.keys(chatModels[provider])[0];

View File

@ -1,37 +1,79 @@
import { WebSocket } from 'ws';
import { handleMessage } from './messageHandler';
import { getAvailableProviders } from '../lib/providers';
import {
getAvailableEmbeddingModelProviders,
getAvailableChatModelProviders,
} from '../lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import type { IncomingMessage } from 'http';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
export const handleConnection = async (
ws: WebSocket,
request: IncomingMessage,
) => {
try {
const searchParams = new URL(request.url, `http://${request.headers.host}`)
.searchParams;
const models = await getAvailableProviders();
const provider =
searchParams.get('chatModelProvider') || Object.keys(models)[0];
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
searchParams.get('chatModelProvider') ||
Object.keys(chatModelProviders)[0];
const chatModel =
searchParams.get('chatModel') || Object.keys(models[provider])[0];
searchParams.get('chatModel') ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
searchParams.get('embeddingModelProvider') ||
Object.keys(embeddingModelProviders)[0];
const embeddingModel =
searchParams.get('embeddingModel') ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (models[provider] && models[provider][chatModel]) {
llm = models[provider][chatModel] as BaseChatModel | undefined;
embeddings = models[provider].embeddings as Embeddings | undefined;
if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel] &&
chatModelProvider != 'custom_openai'
) {
llm = chatModelProviders[chatModelProvider][chatModel] as
| BaseChatModel
| undefined;
} else if (chatModelProvider == 'custom_openai') {
llm = new ChatOpenAI({
modelName: chatModel,
openAIApiKey: searchParams.get('openAIApiKey'),
temperature: 0.7,
configuration: {
baseURL: searchParams.get('openAIBaseURL'),
},
});
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
] as Embeddings | undefined;
}
if (!llm || !embeddings) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid LLM or embeddings model selected',
data: 'Invalid LLM or embeddings model selected, please refresh the page and try again.',
key: 'INVALID_MODEL_SELECTED',
}),
);
ws.close();
@ -44,4 +86,15 @@ export const handleConnection = async (
);
ws.on('close', () => logger.debug('Connection closed'));
} catch (err) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Internal server error.',
key: 'INTERNAL_SERVER_ERROR',
}),
);
ws.close();
logger.error(err);
}
};

View File

@ -57,7 +57,13 @@ const handleEmitterEvents = (
});
emitter.on('error', (data) => {
const parsedData = JSON.parse(data);
ws.send(JSON.stringify({ type: 'error', data: parsedData.data }));
ws.send(
JSON.stringify({
type: 'error',
data: parsedData.data,
key: 'CHAIN_ERROR',
}),
);
});
};
@ -73,7 +79,11 @@ export const handleMessage = async (
if (!parsedMessage.content)
return ws.send(
JSON.stringify({ type: 'error', data: 'Invalid message format' }),
JSON.stringify({
type: 'error',
data: 'Invalid message format',
key: 'INVALID_FORMAT',
}),
);
const history: BaseMessage[] = parsedMessage.history.map((msg) => {
@ -99,11 +109,23 @@ export const handleMessage = async (
);
handleEmitterEvents(emitter, ws, id);
} else {
ws.send(JSON.stringify({ type: 'error', data: 'Invalid focus mode' }));
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid focus mode',
key: 'INVALID_FOCUS_MODE',
}),
);
}
}
} catch (err) {
ws.send(JSON.stringify({ type: 'error', data: 'Invalid message format' }));
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid message format',
key: 'INVALID_FORMAT',
}),
);
logger.error(`Failed to handle message: ${err}`);
}
};

View File

@ -3,6 +3,7 @@ import { Montserrat } from 'next/font/google';
import './globals.css';
import { cn } from '@/lib/utils';
import Sidebar from '@/components/Sidebar';
import { Toaster } from 'sonner';
const montserrat = Montserrat({
weight: ['300', '400', '500', '700'],
@ -26,6 +27,15 @@ export default function RootLayout({
<html className="h-full" lang="en">
<body className={cn('h-full', montserrat.className)}>
<Sidebar>{children}</Sidebar>
<Toaster
toastOptions={{
unstyled: true,
classNames: {
toast:
'bg-[#111111] text-white rounded-lg p-4 flex flex-row items-center space-x-2',
},
}}
/>
</body>
</html>
);

View File

@ -5,6 +5,7 @@ import { Document } from '@langchain/core/documents';
import Navbar from './Navbar';
import Chat from './Chat';
import EmptyChat from './EmptyChat';
import { toast } from 'sonner';
export type Message = {
id: string;
@ -22,39 +23,102 @@ const useSocket = (url: string) => {
const connectWs = async () => {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem(
'embeddingModelProvider',
);
if (!chatModel || !chatModelProvider) {
const chatModelProviders = await fetch(
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
const providers = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/models`,
).then(async (res) => (await res.json())['providers']);
{
headers: {
'Content-Type': 'application/json',
},
},
).then(async (res) => await res.json());
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return console.error('No chat models available');
return toast.error('No chat models available');
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
chatModelProvider = Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
}
const ws = new WebSocket(
`${url}?chatModel=${chatModel}&chatModelProvider=${chatModelProvider}`,
const wsURL = new URL(url);
const searchParams = new URLSearchParams({});
searchParams.append('chatModel', chatModel!);
searchParams.append('chatModelProvider', chatModelProvider);
if (chatModelProvider === 'custom_openai') {
searchParams.append(
'openAIApiKey',
localStorage.getItem('openAIApiKey')!,
);
searchParams.append(
'openAIBaseURL',
localStorage.getItem('openAIBaseURL')!,
);
}
searchParams.append('embeddingModel', embeddingModel!);
searchParams.append('embeddingModelProvider', embeddingModelProvider);
wsURL.search = searchParams.toString();
const ws = new WebSocket(wsURL.toString());
ws.onopen = () => {
console.log('[DEBUG] open');
setWs(ws);
};
ws.onmessage = (e) => {
const parsedData = JSON.parse(e.data);
if (parsedData.type === 'error') {
toast.error(parsedData.data);
if (parsedData.key === 'INVALID_MODEL_SELECTED') {
localStorage.clear();
}
}
};
};
connectWs();
}
return () => {
1;
ws?.close();
console.log('[DEBUG] closed');
};
@ -102,6 +166,12 @@ const ChatWindow = () => {
const messageHandler = (e: MessageEvent) => {
const data = JSON.parse(e.data);
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
return;
}
if (data.type === 'sources') {
sources = data.data;
if (!added) {

View File

@ -34,15 +34,13 @@ const MessageBox = ({
const [speechMessage, setSpeechMessage] = useState(message.content);
useEffect(() => {
const regex = /\[(\d+)\]/g;
if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length > 0
) {
const regex = /\[(\d+)\]/g;
setSpeechMessage(message.content.replace(regex, ''));
return setParsedMessage(
message.content.replace(
regex,
@ -51,6 +49,8 @@ const MessageBox = ({
),
);
}
setSpeechMessage(message.content.replace(regex, ''));
setParsedMessage(message.content);
}, [message.content, message.sources, message.role]);
@ -95,7 +95,7 @@ const MessageBox = ({
<Markdown className="prose max-w-none break-words prose-invert prose-p:leading-relaxed prose-pre:p-0 text-white text-sm md:text-base font-medium">
{parsedMessage}
</Markdown>
{!loading && (
{loading && isLast ? null : (
<div className="flex flex-row items-center justify-between w-full text-white py-4 -mx-2">
<div className="flex flex-row items-center space-x-1">
<button className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white">

View File

@ -3,7 +3,10 @@ import { CloudUpload, RefreshCcw, RefreshCw } from 'lucide-react';
import React, { Fragment, useEffect, useState } from 'react';
interface SettingsType {
providers: {
chatModelProviders: {
[key: string]: string[];
};
embeddingModelProviders: {
[key: string]: string[];
};
openaiApiKey: string;
@ -25,6 +28,13 @@ const SettingsDialog = ({
const [selectedChatModel, setSelectedChatModel] = useState<string | null>(
null,
);
const [selectedEmbeddingModelProvider, setSelectedEmbeddingModelProvider] =
useState<string | null>(null);
const [selectedEmbeddingModel, setSelectedEmbeddingModel] = useState<
string | null
>(null);
const [customOpenAIApiKey, setCustomOpenAIApiKey] = useState<string>('');
const [customOpenAIBaseURL, setCustomOpenAIBaseURL] = useState<string>('');
const [isLoading, setIsLoading] = useState(false);
const [isUpdating, setIsUpdating] = useState(false);
@ -32,9 +42,54 @@ const SettingsDialog = ({
if (isOpen) {
const fetchConfig = async () => {
setIsLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`);
const data = await res.json();
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
headers: {
'Content-Type': 'application/json',
},
});
const data = (await res.json()) as SettingsType;
setConfig(data);
const chatModelProvidersKeys = Object.keys(
data.chatModelProviders || {},
);
const embeddingModelProvidersKeys = Object.keys(
data.embeddingModelProviders || {},
);
const defaultChatModelProvider =
chatModelProvidersKeys.length > 0 ? chatModelProvidersKeys[0] : '';
const defaultEmbeddingModelProvider =
embeddingModelProvidersKeys.length > 0
? embeddingModelProvidersKeys[0]
: '';
const chatModelProvider =
localStorage.getItem('chatModelProvider') ||
defaultChatModelProvider ||
'';
const chatModel =
localStorage.getItem('chatModel') ||
(data.chatModelProviders &&
data.chatModelProviders[chatModelProvider]?.[0]) ||
'';
const embeddingModelProvider =
localStorage.getItem('embeddingModelProvider') ||
defaultEmbeddingModelProvider ||
'';
const embeddingModel =
localStorage.getItem('embeddingModel') ||
(data.embeddingModelProviders &&
data.embeddingModelProviders[embeddingModelProvider]?.[0]) ||
'';
setSelectedChatModelProvider(chatModelProvider);
setSelectedChatModel(chatModel);
setSelectedEmbeddingModelProvider(embeddingModelProvider);
setSelectedEmbeddingModel(embeddingModel);
setCustomOpenAIApiKey(localStorage.getItem('openAIApiKey') || '');
setCustomOpenAIBaseURL(localStorage.getItem('openAIBaseUrl') || '');
setIsLoading(false);
};
@ -43,11 +98,6 @@ const SettingsDialog = ({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isOpen]);
useEffect(() => {
setSelectedChatModelProvider(localStorage.getItem('chatModelProvider'));
setSelectedChatModel(localStorage.getItem('chatModel'));
}, []);
const handleSubmit = async () => {
setIsUpdating(true);
@ -62,6 +112,13 @@ const SettingsDialog = ({
localStorage.setItem('chatModelProvider', selectedChatModelProvider!);
localStorage.setItem('chatModel', selectedChatModel!);
localStorage.setItem(
'embeddingModelProvider',
selectedEmbeddingModelProvider!,
);
localStorage.setItem('embeddingModel', selectedEmbeddingModel!);
localStorage.setItem('openAIApiKey', customOpenAIApiKey!);
localStorage.setItem('openAIBaseURL', customOpenAIBaseURL!);
} catch (err) {
console.log(err);
} finally {
@ -107,7 +164,7 @@ const SettingsDialog = ({
</Dialog.Title>
{config && !isLoading && (
<div className="flex flex-col space-y-4 mt-6">
{config.providers && (
{config.chatModelProviders && (
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">
Chat model Provider
@ -116,36 +173,47 @@ const SettingsDialog = ({
onChange={(e) => {
setSelectedChatModelProvider(e.target.value);
setSelectedChatModel(
config.providers[e.target.value][0],
config.chatModelProviders[e.target.value][0],
);
}}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
>
{Object.keys(config.providers).map((provider) => (
{Object.keys(config.chatModelProviders).map(
(provider) => (
<option
key={provider}
value={provider}
selected={provider === selectedChatModelProvider}
selected={
provider === selectedChatModelProvider
}
>
{provider.charAt(0).toUpperCase() +
provider.slice(1)}
</option>
))}
),
)}
</select>
</div>
)}
{selectedChatModelProvider && (
{selectedChatModelProvider &&
selectedChatModelProvider != 'custom_openai' && (
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">Chat Model</p>
<select
onChange={(e) => setSelectedChatModel(e.target.value)}
onChange={(e) =>
setSelectedChatModel(e.target.value)
}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
>
{config.providers[selectedChatModelProvider] ? (
config.providers[selectedChatModelProvider].length >
0 ? (
config.providers[selectedChatModelProvider].map(
(model) => (
{config.chatModelProviders[
selectedChatModelProvider
] ? (
config.chatModelProviders[
selectedChatModelProvider
].length > 0 ? (
config.chatModelProviders[
selectedChatModelProvider
].map((model) => (
<option
key={model}
value={model}
@ -153,8 +221,7 @@ const SettingsDialog = ({
>
{model}
</option>
),
)
))
) : (
<option value="" disabled selected>
No models available
@ -168,6 +235,122 @@ const SettingsDialog = ({
</select>
</div>
)}
{selectedChatModelProvider &&
selectedChatModelProvider === 'custom_openai' && (
<>
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">Model name</p>
<input
type="text"
placeholder="Model name"
defaultValue={selectedChatModel!}
onChange={(e) =>
setSelectedChatModel(e.target.value)
}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">
Custom OpenAI API Key
</p>
<input
type="text"
placeholder="Custom OpenAI API Key"
defaultValue={customOpenAIApiKey!}
onChange={(e) =>
setCustomOpenAIApiKey(e.target.value)
}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">
Custom OpenAI Base URL
</p>
<input
type="text"
placeholder="Custom OpenAI Base URL"
defaultValue={customOpenAIBaseURL!}
onChange={(e) =>
setCustomOpenAIBaseURL(e.target.value)
}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
/>
</div>
</>
)}
{/* Embedding models */}
{config.embeddingModelProviders && (
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">
Embedding model Provider
</p>
<select
onChange={(e) => {
setSelectedEmbeddingModelProvider(e.target.value);
setSelectedEmbeddingModel(
config.embeddingModelProviders[e.target.value][0],
);
}}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
>
{Object.keys(config.embeddingModelProviders).map(
(provider) => (
<option
key={provider}
value={provider}
selected={
provider === selectedEmbeddingModelProvider
}
>
{provider.charAt(0).toUpperCase() +
provider.slice(1)}
</option>
),
)}
</select>
</div>
)}
{selectedEmbeddingModelProvider && (
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">Embedding Model</p>
<select
onChange={(e) =>
setSelectedEmbeddingModel(e.target.value)
}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
>
{config.embeddingModelProviders[
selectedEmbeddingModelProvider
] ? (
config.embeddingModelProviders[
selectedEmbeddingModelProvider
].length > 0 ? (
config.embeddingModelProviders[
selectedEmbeddingModelProvider
].map((model) => (
<option
key={model}
value={model}
selected={model === selectedEmbeddingModel}
>
{model}
</option>
))
) : (
<option value="" disabled selected>
No embedding models available
</option>
)
) : (
<option value="" disabled selected>
Invalid provider, please check backend logs
</option>
)}
</select>
</div>
)}
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">OpenAI API Key</p>
<input

View File

@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.2.0",
"version": "1.3.4",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@ -22,8 +22,9 @@
"next": "14.1.4",
"react": "^18",
"react-dom": "^18",
"react-speech-kit": "^3.0.1",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4"

View File

@ -2632,10 +2632,10 @@ react-is@^16.13.1:
resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4"
integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==
react-speech-kit@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/react-speech-kit/-/react-speech-kit-3.0.1.tgz#8bd936adfe064be1c5a07e2992dfdfd772e80d14"
integrity sha512-MXNOciISanhmnxpHJkBOev3M3NPDpW1T7nTc/eGw5pO9cXpoUccRxZkmr/IlpTPbPEneDNeTmbwri/YweyctZg==
react-text-to-speech@^0.14.5:
version "0.14.5"
resolved "https://registry.yarnpkg.com/react-text-to-speech/-/react-text-to-speech-0.14.5.tgz#f918786ab283311535682011045bd49777193300"
integrity sha512-3brr/IrK/5YTtOZSTo+Y8b+dnWelzfZiDZvkXnOct1e7O7fgA/h9bYAVrtwSRo/VxKfdw+wh6glkj6M0mlQuQQ==
react-textarea-autosize@^8.5.3:
version "8.5.3"
@ -2839,6 +2839,11 @@ slash@^3.0.0:
resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634"
integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==
sonner@^1.4.41:
version "1.4.41"
resolved "https://registry.yarnpkg.com/sonner/-/sonner-1.4.41.tgz#ff085ae4f4244713daf294959beaa3e90f842d2c"
integrity sha512-uG511ggnnsw6gcn/X+YKkWPo5ep9il9wYi3QJxHsYe7yTZ4+cOd1wuodOUmOpFuXL+/RE3R04LczdNCDygTDgQ==
source-map-js@^1.0.2, source-map-js@^1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.0.tgz#16b809c162517b5b8c3e7dcd315a2a5c2612b2af"

View File

@ -1291,11 +1291,6 @@ raw-body@2.5.2:
iconv-lite "0.4.24"
unpipe "1.0.0"
react-text-to-speech@^0.14.5:
version "0.14.5"
resolved "https://registry.yarnpkg.com/react-text-to-speech/-/react-text-to-speech-0.14.5.tgz#f918786ab283311535682011045bd49777193300"
integrity sha512-3brr/IrK/5YTtOZSTo+Y8b+dnWelzfZiDZvkXnOct1e7O7fgA/h9bYAVrtwSRo/VxKfdw+wh6glkj6M0mlQuQQ==
readable-stream@^3.4.0, readable-stream@^3.6.0:
version "3.6.2"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967"