mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-12-14 23:58:14 +00:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f618b713af | ||
|
|
ed9ff3c20f | ||
|
|
f21f5c9611 | ||
|
|
edc40d8fe6 |
12
README.md
12
README.md
@@ -38,7 +38,7 @@ Using SearxNG to stay current and fully open source, Perplexica ensures you alwa
|
|||||||
- **YouTube Search Mode:** Finds YouTube videos based on the search query.
|
- **YouTube Search Mode:** Finds YouTube videos based on the search query.
|
||||||
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
|
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
|
||||||
- **Reddit Search Mode:** Searches Reddit for discussions and opinions related to the query.
|
- **Reddit Search Mode:** Searches Reddit for discussions and opinions related to the query.
|
||||||
- **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index. Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevent source out of it, ensuring you always get the latest information without the overhead of daily data updates.
|
- **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index. Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevant source out of it, ensuring you always get the latest information without the overhead of daily data updates.
|
||||||
|
|
||||||
It has many more features like image and video search. Some of the planned features are mentioned in [upcoming features](#upcoming-features).
|
It has many more features like image and video search. Some of the planned features are mentioned in [upcoming features](#upcoming-features).
|
||||||
|
|
||||||
@@ -59,13 +59,11 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
|||||||
|
|
||||||
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
|
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
|
||||||
|
|
||||||
- `CHAT_MODEL`: The name of the LLM to use. Like `llama3:latest` (using Ollama), `gpt-3.5-turbo` (using OpenAI), etc.
|
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
|
||||||
- `CHAT_MODEL_PROVIDER`: The chat model provider, either `openai` or `ollama`. Depending upon which provider you use you would have to fill in the following fields:
|
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
||||||
|
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**
|
||||||
|
|
||||||
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
|
**Note**: You can change these after starting Perplexica from the settings dialog.
|
||||||
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
|
||||||
|
|
||||||
**Note**: You can change these and use different models after running Perplexica as well from the settings page.
|
|
||||||
|
|
||||||
- `SIMILARITY_MEASURE`: The similarity measure to use (This is filled by default; you can leave it as is if you are unsure about it.)
|
- `SIMILARITY_MEASURE`: The similarity measure to use (This is filled by default; you can leave it as is if you are unsure about it.)
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "perplexica-backend",
|
"name": "perplexica-backend",
|
||||||
"version": "1.0.0",
|
"version": "1.1.0",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"author": "ItzCrazyKns",
|
"author": "ItzCrazyKns",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
[GENERAL]
|
[GENERAL]
|
||||||
PORT = 3001 # Port to run the server on
|
PORT = 3001 # Port to run the server on
|
||||||
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
|
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
|
||||||
CHAT_MODEL_PROVIDER = "openai" # "openai" or "ollama"
|
|
||||||
CHAT_MODEL = "gpt-3.5-turbo" # Name of the model to use
|
|
||||||
|
|
||||||
[API_KEYS]
|
[API_KEYS]
|
||||||
OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef
|
OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef
|
||||||
|
GROQ = "" # Groq API key - gsk_1234567890abcdef1234567890abcdef
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
||||||
|
|||||||
@@ -8,11 +8,10 @@ interface Config {
|
|||||||
GENERAL: {
|
GENERAL: {
|
||||||
PORT: number;
|
PORT: number;
|
||||||
SIMILARITY_MEASURE: string;
|
SIMILARITY_MEASURE: string;
|
||||||
CHAT_MODEL_PROVIDER: string;
|
|
||||||
CHAT_MODEL: string;
|
|
||||||
};
|
};
|
||||||
API_KEYS: {
|
API_KEYS: {
|
||||||
OPENAI: string;
|
OPENAI: string;
|
||||||
|
GROQ: string;
|
||||||
};
|
};
|
||||||
API_ENDPOINTS: {
|
API_ENDPOINTS: {
|
||||||
SEARXNG: string;
|
SEARXNG: string;
|
||||||
@@ -34,13 +33,10 @@ export const getPort = () => loadConfig().GENERAL.PORT;
|
|||||||
export const getSimilarityMeasure = () =>
|
export const getSimilarityMeasure = () =>
|
||||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||||
|
|
||||||
export const getChatModelProvider = () =>
|
|
||||||
loadConfig().GENERAL.CHAT_MODEL_PROVIDER;
|
|
||||||
|
|
||||||
export const getChatModel = () => loadConfig().GENERAL.CHAT_MODEL;
|
|
||||||
|
|
||||||
export const getOpenaiApiKey = () => loadConfig().API_KEYS.OPENAI;
|
export const getOpenaiApiKey = () => loadConfig().API_KEYS.OPENAI;
|
||||||
|
|
||||||
|
export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ;
|
||||||
|
|
||||||
export const getSearxngApiEndpoint = () => loadConfig().API_ENDPOINTS.SEARXNG;
|
export const getSearxngApiEndpoint = () => loadConfig().API_ENDPOINTS.SEARXNG;
|
||||||
|
|
||||||
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
|
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
|
||||||
@@ -49,21 +45,19 @@ export const updateConfig = (config: RecursivePartial<Config>) => {
|
|||||||
const currentConfig = loadConfig();
|
const currentConfig = loadConfig();
|
||||||
|
|
||||||
for (const key in currentConfig) {
|
for (const key in currentConfig) {
|
||||||
/* if (currentConfig[key] && !config[key]) {
|
if (!config[key]) config[key] = {};
|
||||||
config[key] = currentConfig[key];
|
|
||||||
} */
|
|
||||||
|
|
||||||
if (currentConfig[key] && typeof currentConfig[key] === 'object') {
|
if (typeof currentConfig[key] === 'object' && currentConfig[key] !== null) {
|
||||||
for (const nestedKey in currentConfig[key]) {
|
for (const nestedKey in currentConfig[key]) {
|
||||||
if (
|
if (
|
||||||
currentConfig[key][nestedKey] &&
|
|
||||||
!config[key][nestedKey] &&
|
!config[key][nestedKey] &&
|
||||||
|
currentConfig[key][nestedKey] &&
|
||||||
config[key][nestedKey] !== ''
|
config[key][nestedKey] !== ''
|
||||||
) {
|
) {
|
||||||
config[key][nestedKey] = currentConfig[key][nestedKey];
|
config[key][nestedKey] = currentConfig[key][nestedKey];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (currentConfig[key] && !config[key] && config[key] !== '') {
|
} else if (currentConfig[key] && config[key] !== '') {
|
||||||
config[key] = currentConfig[key];
|
config[key] = currentConfig[key];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,16 @@
|
|||||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||||
import { getOllamaApiEndpoint, getOpenaiApiKey } from '../config';
|
import {
|
||||||
|
getGroqApiKey,
|
||||||
|
getOllamaApiEndpoint,
|
||||||
|
getOpenaiApiKey,
|
||||||
|
} from '../config';
|
||||||
import logger from '../utils/logger';
|
import logger from '../utils/logger';
|
||||||
|
|
||||||
export const getAvailableProviders = async () => {
|
export const getAvailableProviders = async () => {
|
||||||
const openAIApiKey = getOpenaiApiKey();
|
const openAIApiKey = getOpenaiApiKey();
|
||||||
|
const groqApiKey = getGroqApiKey();
|
||||||
const ollamaEndpoint = getOllamaApiEndpoint();
|
const ollamaEndpoint = getOllamaApiEndpoint();
|
||||||
|
|
||||||
const models = {};
|
const models = {};
|
||||||
@@ -13,17 +18,17 @@ export const getAvailableProviders = async () => {
|
|||||||
if (openAIApiKey) {
|
if (openAIApiKey) {
|
||||||
try {
|
try {
|
||||||
models['openai'] = {
|
models['openai'] = {
|
||||||
'gpt-3.5-turbo': new ChatOpenAI({
|
'GPT-3.5 turbo': new ChatOpenAI({
|
||||||
openAIApiKey,
|
openAIApiKey,
|
||||||
modelName: 'gpt-3.5-turbo',
|
modelName: 'gpt-3.5-turbo',
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
}),
|
}),
|
||||||
'gpt-4': new ChatOpenAI({
|
'GPT-4': new ChatOpenAI({
|
||||||
openAIApiKey,
|
openAIApiKey,
|
||||||
modelName: 'gpt-4',
|
modelName: 'gpt-4',
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
}),
|
}),
|
||||||
'gpt-4-turbo': new ChatOpenAI({
|
'GPT-4 turbo': new ChatOpenAI({
|
||||||
openAIApiKey,
|
openAIApiKey,
|
||||||
modelName: 'gpt-4-turbo',
|
modelName: 'gpt-4-turbo',
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
@@ -38,6 +43,59 @@ export const getAvailableProviders = async () => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (groqApiKey) {
|
||||||
|
try {
|
||||||
|
models['groq'] = {
|
||||||
|
'LLaMA3 8b': new ChatOpenAI(
|
||||||
|
{
|
||||||
|
openAIApiKey: groqApiKey,
|
||||||
|
modelName: 'llama3-8b-8192',
|
||||||
|
temperature: 0.7,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
baseURL: 'https://api.groq.com/openai/v1',
|
||||||
|
},
|
||||||
|
),
|
||||||
|
'LLaMA3 70b': new ChatOpenAI(
|
||||||
|
{
|
||||||
|
openAIApiKey: groqApiKey,
|
||||||
|
modelName: 'llama3-70b-8192',
|
||||||
|
temperature: 0.7,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
baseURL: 'https://api.groq.com/openai/v1',
|
||||||
|
},
|
||||||
|
),
|
||||||
|
'Mixtral 8x7b': new ChatOpenAI(
|
||||||
|
{
|
||||||
|
openAIApiKey: groqApiKey,
|
||||||
|
modelName: 'mixtral-8x7b-32768',
|
||||||
|
temperature: 0.7,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
baseURL: 'https://api.groq.com/openai/v1',
|
||||||
|
},
|
||||||
|
),
|
||||||
|
'Gemma 7b': new ChatOpenAI(
|
||||||
|
{
|
||||||
|
openAIApiKey: groqApiKey,
|
||||||
|
modelName: 'gemma-7b-it',
|
||||||
|
temperature: 0.7,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
baseURL: 'https://api.groq.com/openai/v1',
|
||||||
|
},
|
||||||
|
),
|
||||||
|
embeddings: new OpenAIEmbeddings({
|
||||||
|
openAIApiKey: openAIApiKey,
|
||||||
|
modelName: 'text-embedding-3-large',
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error loading Groq models: ${err}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (ollamaEndpoint) {
|
if (ollamaEndpoint) {
|
||||||
try {
|
try {
|
||||||
const response = await fetch(`${ollamaEndpoint}/api/tags`);
|
const response = await fetch(`${ollamaEndpoint}/api/tags`);
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
import express from 'express';
|
import express from 'express';
|
||||||
import { getAvailableProviders } from '../lib/providers';
|
import { getAvailableProviders } from '../lib/providers';
|
||||||
import {
|
import {
|
||||||
getChatModel,
|
getGroqApiKey,
|
||||||
getChatModelProvider,
|
|
||||||
getOllamaApiEndpoint,
|
getOllamaApiEndpoint,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
updateConfig,
|
updateConfig,
|
||||||
@@ -25,11 +24,9 @@ router.get('/', async (_, res) => {
|
|||||||
config['providers'][provider] = Object.keys(providers[provider]);
|
config['providers'][provider] = Object.keys(providers[provider]);
|
||||||
}
|
}
|
||||||
|
|
||||||
config['selectedProvider'] = getChatModelProvider();
|
|
||||||
config['selectedChatModel'] = getChatModel();
|
|
||||||
|
|
||||||
config['openeaiApiKey'] = getOpenaiApiKey();
|
config['openeaiApiKey'] = getOpenaiApiKey();
|
||||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||||
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
|
|
||||||
res.status(200).json(config);
|
res.status(200).json(config);
|
||||||
});
|
});
|
||||||
@@ -38,12 +35,9 @@ router.post('/', async (req, res) => {
|
|||||||
const config = req.body;
|
const config = req.body;
|
||||||
|
|
||||||
const updatedConfig = {
|
const updatedConfig = {
|
||||||
GENERAL: {
|
|
||||||
CHAT_MODEL_PROVIDER: config.selectedProvider,
|
|
||||||
CHAT_MODEL: config.selectedChatModel,
|
|
||||||
},
|
|
||||||
API_KEYS: {
|
API_KEYS: {
|
||||||
OPENAI: config.openeaiApiKey,
|
OPENAI: config.openeaiApiKey,
|
||||||
|
GROQ: config.groqApiKey,
|
||||||
},
|
},
|
||||||
API_ENDPOINTS: {
|
API_ENDPOINTS: {
|
||||||
OLLAMA: config.ollamaApiUrl,
|
OLLAMA: config.ollamaApiUrl,
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ import express from 'express';
|
|||||||
import handleImageSearch from '../agents/imageSearchAgent';
|
import handleImageSearch from '../agents/imageSearchAgent';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { getAvailableProviders } from '../lib/providers';
|
import { getAvailableProviders } from '../lib/providers';
|
||||||
import { getChatModel, getChatModelProvider } from '../config';
|
|
||||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||||
import logger from '../utils/logger';
|
import logger from '../utils/logger';
|
||||||
|
|
||||||
@@ -10,7 +9,7 @@ const router = express.Router();
|
|||||||
|
|
||||||
router.post('/', async (req, res) => {
|
router.post('/', async (req, res) => {
|
||||||
try {
|
try {
|
||||||
let { query, chat_history } = req.body;
|
let { query, chat_history, chat_model_provider, chat_model } = req.body;
|
||||||
|
|
||||||
chat_history = chat_history.map((msg: any) => {
|
chat_history = chat_history.map((msg: any) => {
|
||||||
if (msg.role === 'user') {
|
if (msg.role === 'user') {
|
||||||
@@ -20,14 +19,14 @@ router.post('/', async (req, res) => {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
const models = await getAvailableProviders();
|
const chatModels = await getAvailableProviders();
|
||||||
const provider = getChatModelProvider();
|
const provider = chat_model_provider || Object.keys(chatModels)[0];
|
||||||
const chatModel = getChatModel();
|
const chatModel = chat_model || Object.keys(chatModels[provider])[0];
|
||||||
|
|
||||||
let llm: BaseChatModel | undefined;
|
let llm: BaseChatModel | undefined;
|
||||||
|
|
||||||
if (models[provider] && models[provider][chatModel]) {
|
if (chatModels[provider] && chatModels[provider][chatModel]) {
|
||||||
llm = models[provider][chatModel] as BaseChatModel | undefined;
|
llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!llm) {
|
if (!llm) {
|
||||||
|
|||||||
@@ -2,11 +2,13 @@ import express from 'express';
|
|||||||
import imagesRouter from './images';
|
import imagesRouter from './images';
|
||||||
import videosRouter from './videos';
|
import videosRouter from './videos';
|
||||||
import configRouter from './config';
|
import configRouter from './config';
|
||||||
|
import modelsRouter from './models';
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
|
|
||||||
router.use('/images', imagesRouter);
|
router.use('/images', imagesRouter);
|
||||||
router.use('/videos', videosRouter);
|
router.use('/videos', videosRouter);
|
||||||
router.use('/config', configRouter);
|
router.use('/config', configRouter);
|
||||||
|
router.use('/models', modelsRouter);
|
||||||
|
|
||||||
export default router;
|
export default router;
|
||||||
|
|||||||
18
src/routes/models.ts
Normal file
18
src/routes/models.ts
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
import express from 'express';
|
||||||
|
import logger from '../utils/logger';
|
||||||
|
import { getAvailableProviders } from '../lib/providers';
|
||||||
|
|
||||||
|
const router = express.Router();
|
||||||
|
|
||||||
|
router.get('/', async (req, res) => {
|
||||||
|
try {
|
||||||
|
const providers = await getAvailableProviders();
|
||||||
|
|
||||||
|
res.status(200).json({ providers });
|
||||||
|
} catch (err) {
|
||||||
|
res.status(500).json({ message: 'An error has occurred.' });
|
||||||
|
logger.error(err.message);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
export default router;
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
import express from 'express';
|
import express from 'express';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import { getAvailableProviders } from '../lib/providers';
|
import { getAvailableProviders } from '../lib/providers';
|
||||||
import { getChatModel, getChatModelProvider } from '../config';
|
|
||||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||||
import logger from '../utils/logger';
|
import logger from '../utils/logger';
|
||||||
import handleVideoSearch from '../agents/videoSearchAgent';
|
import handleVideoSearch from '../agents/videoSearchAgent';
|
||||||
@@ -10,7 +9,7 @@ const router = express.Router();
|
|||||||
|
|
||||||
router.post('/', async (req, res) => {
|
router.post('/', async (req, res) => {
|
||||||
try {
|
try {
|
||||||
let { query, chat_history } = req.body;
|
let { query, chat_history, chat_model_provider, chat_model } = req.body;
|
||||||
|
|
||||||
chat_history = chat_history.map((msg: any) => {
|
chat_history = chat_history.map((msg: any) => {
|
||||||
if (msg.role === 'user') {
|
if (msg.role === 'user') {
|
||||||
@@ -20,14 +19,14 @@ router.post('/', async (req, res) => {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
const models = await getAvailableProviders();
|
const chatModels = await getAvailableProviders();
|
||||||
const provider = getChatModelProvider();
|
const provider = chat_model_provider || Object.keys(chatModels)[0];
|
||||||
const chatModel = getChatModel();
|
const chatModel = chat_model || Object.keys(chatModels[provider])[0];
|
||||||
|
|
||||||
let llm: BaseChatModel | undefined;
|
let llm: BaseChatModel | undefined;
|
||||||
|
|
||||||
if (models[provider] && models[provider][chatModel]) {
|
if (chatModels[provider] && chatModels[provider][chatModel]) {
|
||||||
llm = models[provider][chatModel] as BaseChatModel | undefined;
|
llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!llm) {
|
if (!llm) {
|
||||||
|
|||||||
@@ -1,15 +1,23 @@
|
|||||||
import { WebSocket } from 'ws';
|
import { WebSocket } from 'ws';
|
||||||
import { handleMessage } from './messageHandler';
|
import { handleMessage } from './messageHandler';
|
||||||
import { getChatModel, getChatModelProvider } from '../config';
|
|
||||||
import { getAvailableProviders } from '../lib/providers';
|
import { getAvailableProviders } from '../lib/providers';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import type { Embeddings } from '@langchain/core/embeddings';
|
import type { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
import type { IncomingMessage } from 'http';
|
||||||
import logger from '../utils/logger';
|
import logger from '../utils/logger';
|
||||||
|
|
||||||
export const handleConnection = async (ws: WebSocket) => {
|
export const handleConnection = async (
|
||||||
|
ws: WebSocket,
|
||||||
|
request: IncomingMessage,
|
||||||
|
) => {
|
||||||
|
const searchParams = new URL(request.url, `http://${request.headers.host}`)
|
||||||
|
.searchParams;
|
||||||
|
|
||||||
const models = await getAvailableProviders();
|
const models = await getAvailableProviders();
|
||||||
const provider = getChatModelProvider();
|
const provider =
|
||||||
const chatModel = getChatModel();
|
searchParams.get('chatModelProvider') || Object.keys(models)[0];
|
||||||
|
const chatModel =
|
||||||
|
searchParams.get('chatModel') || Object.keys(models[provider])[0];
|
||||||
|
|
||||||
let llm: BaseChatModel | undefined;
|
let llm: BaseChatModel | undefined;
|
||||||
let embeddings: Embeddings | undefined;
|
let embeddings: Embeddings | undefined;
|
||||||
|
|||||||
@@ -10,9 +10,7 @@ export const initServer = (
|
|||||||
const port = getPort();
|
const port = getPort();
|
||||||
const wss = new WebSocketServer({ server });
|
const wss = new WebSocketServer({ server });
|
||||||
|
|
||||||
wss.on('connection', (ws) => {
|
wss.on('connection', handleConnection);
|
||||||
handleConnection(ws);
|
|
||||||
});
|
|
||||||
|
|
||||||
logger.info(`WebSocket server started on port ${port}`);
|
logger.info(`WebSocket server started on port ${port}`);
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -19,14 +19,42 @@ const useSocket = (url: string) => {
|
|||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!ws) {
|
if (!ws) {
|
||||||
const ws = new WebSocket(url);
|
const connectWs = async () => {
|
||||||
ws.onopen = () => {
|
let chatModel = localStorage.getItem('chatModel');
|
||||||
console.log('[DEBUG] open');
|
let chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||||
setWs(ws);
|
|
||||||
|
if (!chatModel || !chatModelProvider) {
|
||||||
|
const chatModelProviders = await fetch(
|
||||||
|
`${process.env.NEXT_PUBLIC_API_URL}/models`,
|
||||||
|
).then(async (res) => (await res.json())['providers']);
|
||||||
|
|
||||||
|
if (
|
||||||
|
!chatModelProviders ||
|
||||||
|
Object.keys(chatModelProviders).length === 0
|
||||||
|
)
|
||||||
|
return console.error('No chat models available');
|
||||||
|
|
||||||
|
chatModelProvider = Object.keys(chatModelProviders)[0];
|
||||||
|
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||||
|
|
||||||
|
localStorage.setItem('chatModel', chatModel!);
|
||||||
|
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||||
|
}
|
||||||
|
|
||||||
|
const ws = new WebSocket(
|
||||||
|
`${url}?chatModel=${chatModel}&chatModelProvider=${chatModelProvider}`,
|
||||||
|
);
|
||||||
|
ws.onopen = () => {
|
||||||
|
console.log('[DEBUG] open');
|
||||||
|
setWs(ws);
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
connectWs();
|
||||||
}
|
}
|
||||||
|
|
||||||
return () => {
|
return () => {
|
||||||
|
1;
|
||||||
ws?.close();
|
ws?.close();
|
||||||
console.log('[DEBUG] closed');
|
console.log('[DEBUG] closed');
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -29,6 +29,10 @@ const SearchImages = ({
|
|||||||
<button
|
<button
|
||||||
onClick={async () => {
|
onClick={async () => {
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
|
|
||||||
|
const chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||||
|
const chatModel = localStorage.getItem('chatModel');
|
||||||
|
|
||||||
const res = await fetch(
|
const res = await fetch(
|
||||||
`${process.env.NEXT_PUBLIC_API_URL}/images`,
|
`${process.env.NEXT_PUBLIC_API_URL}/images`,
|
||||||
{
|
{
|
||||||
@@ -39,6 +43,8 @@ const SearchImages = ({
|
|||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
query: query,
|
query: query,
|
||||||
chat_history: chat_history,
|
chat_history: chat_history,
|
||||||
|
chat_model_provider: chatModelProvider,
|
||||||
|
chat_model: chatModel,
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -42,6 +42,10 @@ const Searchvideos = ({
|
|||||||
<button
|
<button
|
||||||
onClick={async () => {
|
onClick={async () => {
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
|
|
||||||
|
const chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||||
|
const chatModel = localStorage.getItem('chatModel');
|
||||||
|
|
||||||
const res = await fetch(
|
const res = await fetch(
|
||||||
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
|
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
|
||||||
{
|
{
|
||||||
@@ -52,6 +56,8 @@ const Searchvideos = ({
|
|||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
query: query,
|
query: query,
|
||||||
chat_history: chat_history,
|
chat_history: chat_history,
|
||||||
|
chat_model_provider: chatModelProvider,
|
||||||
|
chat_model: chatModel,
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -6,9 +6,8 @@ interface SettingsType {
|
|||||||
providers: {
|
providers: {
|
||||||
[key: string]: string[];
|
[key: string]: string[];
|
||||||
};
|
};
|
||||||
selectedProvider: string;
|
|
||||||
selectedChatModel: string;
|
|
||||||
openeaiApiKey: string;
|
openeaiApiKey: string;
|
||||||
|
groqApiKey: string;
|
||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -20,6 +19,12 @@ const SettingsDialog = ({
|
|||||||
setIsOpen: (isOpen: boolean) => void;
|
setIsOpen: (isOpen: boolean) => void;
|
||||||
}) => {
|
}) => {
|
||||||
const [config, setConfig] = useState<SettingsType | null>(null);
|
const [config, setConfig] = useState<SettingsType | null>(null);
|
||||||
|
const [selectedChatModelProvider, setSelectedChatModelProvider] = useState<
|
||||||
|
string | null
|
||||||
|
>(null);
|
||||||
|
const [selectedChatModel, setSelectedChatModel] = useState<string | null>(
|
||||||
|
null,
|
||||||
|
);
|
||||||
const [isLoading, setIsLoading] = useState(false);
|
const [isLoading, setIsLoading] = useState(false);
|
||||||
const [isUpdating, setIsUpdating] = useState(false);
|
const [isUpdating, setIsUpdating] = useState(false);
|
||||||
|
|
||||||
@@ -38,6 +43,11 @@ const SettingsDialog = ({
|
|||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, [isOpen]);
|
}, [isOpen]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
setSelectedChatModelProvider(localStorage.getItem('chatModelProvider'));
|
||||||
|
setSelectedChatModel(localStorage.getItem('chatModel'));
|
||||||
|
}, []);
|
||||||
|
|
||||||
const handleSubmit = async () => {
|
const handleSubmit = async () => {
|
||||||
setIsUpdating(true);
|
setIsUpdating(true);
|
||||||
|
|
||||||
@@ -49,6 +59,9 @@ const SettingsDialog = ({
|
|||||||
},
|
},
|
||||||
body: JSON.stringify(config),
|
body: JSON.stringify(config),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
localStorage.setItem('chatModelProvider', selectedChatModelProvider!);
|
||||||
|
localStorage.setItem('chatModel', selectedChatModel!);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.log(err);
|
console.log(err);
|
||||||
} finally {
|
} finally {
|
||||||
@@ -100,21 +113,19 @@ const SettingsDialog = ({
|
|||||||
Chat model Provider
|
Chat model Provider
|
||||||
</p>
|
</p>
|
||||||
<select
|
<select
|
||||||
onChange={(e) =>
|
onChange={(e) => {
|
||||||
setConfig({
|
setSelectedChatModelProvider(e.target.value);
|
||||||
...config,
|
setSelectedChatModel(
|
||||||
selectedProvider: e.target.value,
|
config.providers[e.target.value][0],
|
||||||
selectedChatModel:
|
);
|
||||||
config.providers[e.target.value][0],
|
}}
|
||||||
})
|
|
||||||
}
|
|
||||||
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
|
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
|
||||||
>
|
>
|
||||||
{Object.keys(config.providers).map((provider) => (
|
{Object.keys(config.providers).map((provider) => (
|
||||||
<option
|
<option
|
||||||
key={provider}
|
key={provider}
|
||||||
value={provider}
|
value={provider}
|
||||||
selected={provider === config.selectedProvider}
|
selected={provider === selectedChatModelProvider}
|
||||||
>
|
>
|
||||||
{provider.charAt(0).toUpperCase() +
|
{provider.charAt(0).toUpperCase() +
|
||||||
provider.slice(1)}
|
provider.slice(1)}
|
||||||
@@ -123,29 +134,22 @@ const SettingsDialog = ({
|
|||||||
</select>
|
</select>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
{config.selectedProvider && (
|
{selectedChatModelProvider && (
|
||||||
<div className="flex flex-col space-y-1">
|
<div className="flex flex-col space-y-1">
|
||||||
<p className="text-white/70 text-sm">Chat Model</p>
|
<p className="text-white/70 text-sm">Chat Model</p>
|
||||||
<select
|
<select
|
||||||
onChange={(e) =>
|
onChange={(e) => setSelectedChatModel(e.target.value)}
|
||||||
setConfig({
|
|
||||||
...config,
|
|
||||||
selectedChatModel: e.target.value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
|
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
|
||||||
>
|
>
|
||||||
{config.providers[config.selectedProvider] ? (
|
{config.providers[selectedChatModelProvider] ? (
|
||||||
config.providers[config.selectedProvider].length >
|
config.providers[selectedChatModelProvider].length >
|
||||||
0 ? (
|
0 ? (
|
||||||
config.providers[config.selectedProvider].map(
|
config.providers[selectedChatModelProvider].map(
|
||||||
(model) => (
|
(model) => (
|
||||||
<option
|
<option
|
||||||
key={model}
|
key={model}
|
||||||
value={model}
|
value={model}
|
||||||
selected={
|
selected={model === selectedChatModel}
|
||||||
model === config.selectedChatModel
|
|
||||||
}
|
|
||||||
>
|
>
|
||||||
{model}
|
{model}
|
||||||
</option>
|
</option>
|
||||||
@@ -194,6 +198,21 @@ const SettingsDialog = ({
|
|||||||
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
|
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-white/70 text-sm">GROQ API Key</p>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
placeholder="GROQ API Key"
|
||||||
|
defaultValue={config.groqApiKey}
|
||||||
|
onChange={(e) =>
|
||||||
|
setConfig({
|
||||||
|
...config,
|
||||||
|
groqApiKey: e.target.value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
{isLoading && (
|
{isLoading && (
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "perplexica-frontend",
|
"name": "perplexica-frontend",
|
||||||
"version": "1.0.0",
|
"version": "1.1.0",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"author": "ItzCrazyKns",
|
"author": "ItzCrazyKns",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
|||||||
Reference in New Issue
Block a user