4 Commits

Author SHA1 Message Date
ItzCrazyKns
f618b713af feat(chatModels): load model from localstorage 2024-05-02 12:14:26 +05:30
ItzCrazyKns
ed9ff3c20f feat(providers): use correct model name 2024-05-02 12:09:25 +05:30
ItzCrazyKns
f21f5c9611 feat(readme): correct spellings, closes #32 2024-05-01 20:12:58 +05:30
ItzCrazyKns
edc40d8fe6 feat(providers): add Groq provider 2024-05-01 19:43:06 +05:30
17 changed files with 212 additions and 86 deletions

View File

@ -38,7 +38,7 @@ Using SearxNG to stay current and fully open source, Perplexica ensures you alwa
- **YouTube Search Mode:** Finds YouTube videos based on the search query.
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
- **Reddit Search Mode:** Searches Reddit for discussions and opinions related to the query.
- **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index. Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevent source out of it, ensuring you always get the latest information without the overhead of daily data updates.
- **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index. Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevant source out of it, ensuring you always get the latest information without the overhead of daily data updates.
It has many more features like image and video search. Some of the planned features are mentioned in [upcoming features](#upcoming-features).
@ -59,13 +59,11 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
- `CHAT_MODEL`: The name of the LLM to use. Like `llama3:latest` (using Ollama), `gpt-3.5-turbo` (using OpenAI), etc.
- `CHAT_MODEL_PROVIDER`: The chat model provider, either `openai` or `ollama`. Depending upon which provider you use you would have to fill in the following fields:
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
**Note**: You can change these and use different models after running Perplexica as well from the settings page.
**Note**: You can change these after starting Perplexica from the settings dialog.
- `SIMILARITY_MEASURE`: The similarity measure to use (This is filled by default; you can leave it as is if you are unsure about it.)

View File

@ -1,6 +1,6 @@
{
"name": "perplexica-backend",
"version": "1.0.0",
"version": "1.1.0",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {

View File

@ -1,11 +1,10 @@
[GENERAL]
PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
CHAT_MODEL_PROVIDER = "openai" # "openai" or "ollama"
CHAT_MODEL = "gpt-3.5-turbo" # Name of the model to use
[API_KEYS]
OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef
GROQ = "" # Groq API key - gsk_1234567890abcdef1234567890abcdef
[API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL

View File

@ -8,11 +8,10 @@ interface Config {
GENERAL: {
PORT: number;
SIMILARITY_MEASURE: string;
CHAT_MODEL_PROVIDER: string;
CHAT_MODEL: string;
};
API_KEYS: {
OPENAI: string;
GROQ: string;
};
API_ENDPOINTS: {
SEARXNG: string;
@ -34,13 +33,10 @@ export const getPort = () => loadConfig().GENERAL.PORT;
export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE;
export const getChatModelProvider = () =>
loadConfig().GENERAL.CHAT_MODEL_PROVIDER;
export const getChatModel = () => loadConfig().GENERAL.CHAT_MODEL;
export const getOpenaiApiKey = () => loadConfig().API_KEYS.OPENAI;
export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ;
export const getSearxngApiEndpoint = () => loadConfig().API_ENDPOINTS.SEARXNG;
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
@ -49,21 +45,19 @@ export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig();
for (const key in currentConfig) {
/* if (currentConfig[key] && !config[key]) {
config[key] = currentConfig[key];
} */
if (!config[key]) config[key] = {};
if (currentConfig[key] && typeof currentConfig[key] === 'object') {
if (typeof currentConfig[key] === 'object' && currentConfig[key] !== null) {
for (const nestedKey in currentConfig[key]) {
if (
currentConfig[key][nestedKey] &&
!config[key][nestedKey] &&
currentConfig[key][nestedKey] &&
config[key][nestedKey] !== ''
) {
config[key][nestedKey] = currentConfig[key][nestedKey];
}
}
} else if (currentConfig[key] && !config[key] && config[key] !== '') {
} else if (currentConfig[key] && config[key] !== '') {
config[key] = currentConfig[key];
}
}

View File

@ -1,11 +1,16 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getOllamaApiEndpoint, getOpenaiApiKey } from '../config';
import {
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
} from '../config';
import logger from '../utils/logger';
export const getAvailableProviders = async () => {
const openAIApiKey = getOpenaiApiKey();
const groqApiKey = getGroqApiKey();
const ollamaEndpoint = getOllamaApiEndpoint();
const models = {};
@ -13,17 +18,17 @@ export const getAvailableProviders = async () => {
if (openAIApiKey) {
try {
models['openai'] = {
'gpt-3.5-turbo': new ChatOpenAI({
'GPT-3.5 turbo': new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-3.5-turbo',
temperature: 0.7,
}),
'gpt-4': new ChatOpenAI({
'GPT-4': new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
'gpt-4-turbo': new ChatOpenAI({
'GPT-4 turbo': new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
@ -38,6 +43,59 @@ export const getAvailableProviders = async () => {
}
}
if (groqApiKey) {
try {
models['groq'] = {
'LLaMA3 8b': new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
'LLaMA3 70b': new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
'Mixtral 8x7b': new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
'Gemma 7b': new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma-7b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
embeddings: new OpenAIEmbeddings({
openAIApiKey: openAIApiKey,
modelName: 'text-embedding-3-large',
}),
};
} catch (err) {
logger.error(`Error loading Groq models: ${err}`);
}
}
if (ollamaEndpoint) {
try {
const response = await fetch(`${ollamaEndpoint}/api/tags`);

View File

@ -1,8 +1,7 @@
import express from 'express';
import { getAvailableProviders } from '../lib/providers';
import {
getChatModel,
getChatModelProvider,
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
updateConfig,
@ -25,11 +24,9 @@ router.get('/', async (_, res) => {
config['providers'][provider] = Object.keys(providers[provider]);
}
config['selectedProvider'] = getChatModelProvider();
config['selectedChatModel'] = getChatModel();
config['openeaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['groqApiKey'] = getGroqApiKey();
res.status(200).json(config);
});
@ -38,12 +35,9 @@ router.post('/', async (req, res) => {
const config = req.body;
const updatedConfig = {
GENERAL: {
CHAT_MODEL_PROVIDER: config.selectedProvider,
CHAT_MODEL: config.selectedChatModel,
},
API_KEYS: {
OPENAI: config.openeaiApiKey,
GROQ: config.groqApiKey,
},
API_ENDPOINTS: {
OLLAMA: config.ollamaApiUrl,

View File

@ -2,7 +2,6 @@ import express from 'express';
import handleImageSearch from '../agents/imageSearchAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableProviders } from '../lib/providers';
import { getChatModel, getChatModelProvider } from '../config';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
@ -10,7 +9,7 @@ const router = express.Router();
router.post('/', async (req, res) => {
try {
let { query, chat_history } = req.body;
let { query, chat_history, chat_model_provider, chat_model } = req.body;
chat_history = chat_history.map((msg: any) => {
if (msg.role === 'user') {
@ -20,14 +19,14 @@ router.post('/', async (req, res) => {
}
});
const models = await getAvailableProviders();
const provider = getChatModelProvider();
const chatModel = getChatModel();
const chatModels = await getAvailableProviders();
const provider = chat_model_provider || Object.keys(chatModels)[0];
const chatModel = chat_model || Object.keys(chatModels[provider])[0];
let llm: BaseChatModel | undefined;
if (models[provider] && models[provider][chatModel]) {
llm = models[provider][chatModel] as BaseChatModel | undefined;
if (chatModels[provider] && chatModels[provider][chatModel]) {
llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
}
if (!llm) {

View File

@ -2,11 +2,13 @@ import express from 'express';
import imagesRouter from './images';
import videosRouter from './videos';
import configRouter from './config';
import modelsRouter from './models';
const router = express.Router();
router.use('/images', imagesRouter);
router.use('/videos', videosRouter);
router.use('/config', configRouter);
router.use('/models', modelsRouter);
export default router;

18
src/routes/models.ts Normal file
View File

@ -0,0 +1,18 @@
import express from 'express';
import logger from '../utils/logger';
import { getAvailableProviders } from '../lib/providers';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const providers = await getAvailableProviders();
res.status(200).json({ providers });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(err.message);
}
});
export default router;

View File

@ -1,7 +1,6 @@
import express from 'express';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableProviders } from '../lib/providers';
import { getChatModel, getChatModelProvider } from '../config';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import handleVideoSearch from '../agents/videoSearchAgent';
@ -10,7 +9,7 @@ const router = express.Router();
router.post('/', async (req, res) => {
try {
let { query, chat_history } = req.body;
let { query, chat_history, chat_model_provider, chat_model } = req.body;
chat_history = chat_history.map((msg: any) => {
if (msg.role === 'user') {
@ -20,14 +19,14 @@ router.post('/', async (req, res) => {
}
});
const models = await getAvailableProviders();
const provider = getChatModelProvider();
const chatModel = getChatModel();
const chatModels = await getAvailableProviders();
const provider = chat_model_provider || Object.keys(chatModels)[0];
const chatModel = chat_model || Object.keys(chatModels[provider])[0];
let llm: BaseChatModel | undefined;
if (models[provider] && models[provider][chatModel]) {
llm = models[provider][chatModel] as BaseChatModel | undefined;
if (chatModels[provider] && chatModels[provider][chatModel]) {
llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
}
if (!llm) {

View File

@ -1,15 +1,23 @@
import { WebSocket } from 'ws';
import { handleMessage } from './messageHandler';
import { getChatModel, getChatModelProvider } from '../config';
import { getAvailableProviders } from '../lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import type { IncomingMessage } from 'http';
import logger from '../utils/logger';
export const handleConnection = async (ws: WebSocket) => {
export const handleConnection = async (
ws: WebSocket,
request: IncomingMessage,
) => {
const searchParams = new URL(request.url, `http://${request.headers.host}`)
.searchParams;
const models = await getAvailableProviders();
const provider = getChatModelProvider();
const chatModel = getChatModel();
const provider =
searchParams.get('chatModelProvider') || Object.keys(models)[0];
const chatModel =
searchParams.get('chatModel') || Object.keys(models[provider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;

View File

@ -10,9 +10,7 @@ export const initServer = (
const port = getPort();
const wss = new WebSocketServer({ server });
wss.on('connection', (ws) => {
handleConnection(ws);
});
wss.on('connection', handleConnection);
logger.info(`WebSocket server started on port ${port}`);
};

View File

@ -19,14 +19,42 @@ const useSocket = (url: string) => {
useEffect(() => {
if (!ws) {
const ws = new WebSocket(url);
ws.onopen = () => {
console.log('[DEBUG] open');
setWs(ws);
const connectWs = async () => {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
if (!chatModel || !chatModelProvider) {
const chatModelProviders = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/models`,
).then(async (res) => (await res.json())['providers']);
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return console.error('No chat models available');
chatModelProvider = Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
}
const ws = new WebSocket(
`${url}?chatModel=${chatModel}&chatModelProvider=${chatModelProvider}`,
);
ws.onopen = () => {
console.log('[DEBUG] open');
setWs(ws);
};
};
connectWs();
}
return () => {
1;
ws?.close();
console.log('[DEBUG] closed');
};

View File

@ -29,6 +29,10 @@ const SearchImages = ({
<button
onClick={async () => {
setLoading(true);
const chatModelProvider = localStorage.getItem('chatModelProvider');
const chatModel = localStorage.getItem('chatModel');
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/images`,
{
@ -39,6 +43,8 @@ const SearchImages = ({
body: JSON.stringify({
query: query,
chat_history: chat_history,
chat_model_provider: chatModelProvider,
chat_model: chatModel,
}),
},
);

View File

@ -42,6 +42,10 @@ const Searchvideos = ({
<button
onClick={async () => {
setLoading(true);
const chatModelProvider = localStorage.getItem('chatModelProvider');
const chatModel = localStorage.getItem('chatModel');
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
{
@ -52,6 +56,8 @@ const Searchvideos = ({
body: JSON.stringify({
query: query,
chat_history: chat_history,
chat_model_provider: chatModelProvider,
chat_model: chatModel,
}),
},
);

View File

@ -6,9 +6,8 @@ interface SettingsType {
providers: {
[key: string]: string[];
};
selectedProvider: string;
selectedChatModel: string;
openeaiApiKey: string;
groqApiKey: string;
ollamaApiUrl: string;
}
@ -20,6 +19,12 @@ const SettingsDialog = ({
setIsOpen: (isOpen: boolean) => void;
}) => {
const [config, setConfig] = useState<SettingsType | null>(null);
const [selectedChatModelProvider, setSelectedChatModelProvider] = useState<
string | null
>(null);
const [selectedChatModel, setSelectedChatModel] = useState<string | null>(
null,
);
const [isLoading, setIsLoading] = useState(false);
const [isUpdating, setIsUpdating] = useState(false);
@ -38,6 +43,11 @@ const SettingsDialog = ({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isOpen]);
useEffect(() => {
setSelectedChatModelProvider(localStorage.getItem('chatModelProvider'));
setSelectedChatModel(localStorage.getItem('chatModel'));
}, []);
const handleSubmit = async () => {
setIsUpdating(true);
@ -49,6 +59,9 @@ const SettingsDialog = ({
},
body: JSON.stringify(config),
});
localStorage.setItem('chatModelProvider', selectedChatModelProvider!);
localStorage.setItem('chatModel', selectedChatModel!);
} catch (err) {
console.log(err);
} finally {
@ -100,21 +113,19 @@ const SettingsDialog = ({
Chat model Provider
</p>
<select
onChange={(e) =>
setConfig({
...config,
selectedProvider: e.target.value,
selectedChatModel:
config.providers[e.target.value][0],
})
}
onChange={(e) => {
setSelectedChatModelProvider(e.target.value);
setSelectedChatModel(
config.providers[e.target.value][0],
);
}}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
>
{Object.keys(config.providers).map((provider) => (
<option
key={provider}
value={provider}
selected={provider === config.selectedProvider}
selected={provider === selectedChatModelProvider}
>
{provider.charAt(0).toUpperCase() +
provider.slice(1)}
@ -123,29 +134,22 @@ const SettingsDialog = ({
</select>
</div>
)}
{config.selectedProvider && (
{selectedChatModelProvider && (
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">Chat Model</p>
<select
onChange={(e) =>
setConfig({
...config,
selectedChatModel: e.target.value,
})
}
onChange={(e) => setSelectedChatModel(e.target.value)}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
>
{config.providers[config.selectedProvider] ? (
config.providers[config.selectedProvider].length >
{config.providers[selectedChatModelProvider] ? (
config.providers[selectedChatModelProvider].length >
0 ? (
config.providers[config.selectedProvider].map(
config.providers[selectedChatModelProvider].map(
(model) => (
<option
key={model}
value={model}
selected={
model === config.selectedChatModel
}
selected={model === selectedChatModel}
>
{model}
</option>
@ -194,6 +198,21 @@ const SettingsDialog = ({
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-white/70 text-sm">GROQ API Key</p>
<input
type="text"
placeholder="GROQ API Key"
defaultValue={config.groqApiKey}
onChange={(e) =>
setConfig({
...config,
groqApiKey: e.target.value,
})
}
className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm"
/>
</div>
</div>
)}
{isLoading && (

View File

@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.0.0",
"version": "1.1.0",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {