Compare commits

..

11 Commits

Author SHA1 Message Date
6b72114010 Update index.ts
resolve conflict
2025-02-15 15:26:11 +04:00
5cf0598985 Update config.ts
resolve conflict
2025-02-15 15:24:58 +04:00
b38413e92d Update config.ts
resolve conflict
2025-02-15 15:24:24 +04:00
0c908618c9 Update sample.config.toml 2025-02-15 15:23:32 +04:00
42fa951fd6 Update tsconfig.json
UI/UX improvements for the Reasoning Panel
2025-02-15 15:22:49 +04:00
ece1a60f4d Update MessageBox.tsx
UI/UX improvements to the Reasoning Panel
2025-02-15 15:18:56 +04:00
802627afcd save current changes 2025-02-15 11:57:22 +04:00
f12c39f41e feat: add Gemini 2.0 Flash Exp models
# Description
   Added two new Gemini models:
   - gemini-2.0-flash-exp
   - gemini-2.0-flash-thinking-exp-01-21

   # Changes Made
   - Updated src/lib/providers/gemini.ts to include new models
   - Maintained consistent configuration with existing models

   # Testing
   - Tested locally using Docker
   - Verified models appear in UI and are selectable
   - Confirmed functionality with sample queries

   # Additional Notes
   These models expand the available options for users who want to use the latest Gemini capabilities.
2025-02-15 11:57:22 +04:00
65692f1d52 feat(package): update markdown-to-jsx version 2025-02-15 11:57:21 +04:00
c4f818f602 feat(output-parsers): add empty check 2025-02-15 11:57:21 +04:00
6edac6938c feat: Add LM Studio Support and Thinking Model Panel
LM Studio Integration:
- Added LM Studio provider with OpenAI-compatible API support
- Dynamic model discovery via /v1/models endpoint
- Support for both chat and embeddings models
- Docker-compatible networking configuration

Thinking Model Panel:
- Added collapsible UI panel for model's chain of thought
- Parses responses with <think> tags to separate reasoning
- Maintains backward compatibility with regular responses
- Styled consistently with app theme for light/dark modes
- Preserves all existing message functionality (sources, markdown, etc.)

These improvements enhance the app's compatibility with local LLMs and
provide better visibility into model reasoning processes while maintaining
existing functionality.
2025-01-26 18:18:35 +04:00
41 changed files with 7922 additions and 2162 deletions

4
.gitignore vendored
View File

@ -2,7 +2,6 @@
node_modules/
npm-debug.log
yarn-error.log
package-lock.json
# Build output
/.next/
@ -38,6 +37,3 @@ Thumbs.db
# Db
db.sqlite
/searxng
# Dev
docker-compose-dev.yaml

View File

@ -2,6 +2,7 @@
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?)
## Table of Contents <!-- omit in toc -->
@ -43,7 +44,7 @@ Want to know more about its architecture and how it works? You can read it [here
- **Normal Mode:** Processes your query and performs a web search.
- **Focus Modes:** Special modes to better answer specific types of questions. Perplexica currently has 6 focus modes:
- **All Mode:** Searches the entire web to find the best results.
- **Writing Assistant Mode:** Helpful for writing tasks that do not require searching the web.
- **Writing Assistant Mode:** Helpful for writing tasks that does not require searching the web.
- **Academic Search Mode:** Finds articles and papers, ideal for academic research.
- **YouTube Search Mode:** Finds YouTube videos based on the search query.
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
@ -142,7 +143,6 @@ You can access Perplexica over your home network by following our networking gui
## One-Click Deployment
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
## Upcoming Features

View File

@ -4,7 +4,7 @@ services:
volumes:
- ./searxng:/etc/searxng:rw
ports:
- '4000:8080'
- 4000:8080
networks:
- perplexica-network
restart: unless-stopped
@ -19,7 +19,7 @@ services:
depends_on:
- searxng
ports:
- '3001:3001'
- 3001:3001
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
@ -37,11 +37,12 @@ services:
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
network: host
image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
ports:
- '3000:3000'
- 3000:3000
networks:
- perplexica-network
restart: unless-stopped

View File

@ -7,43 +7,34 @@ To update Perplexica to the latest version, follow these steps:
1. Clone the latest version of Perplexica from GitHub:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
git clone https://github.com/ItzCrazyKns/Perplexica.git
```
2. Navigate to the project directory.
2. Navigate to the Project Directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. Pull the latest images from the registry.
3. Pull latest images from registry.
```bash
docker compose pull
```
5. Update and recreate the containers.
4. Update and Recreate containers.
```bash
docker compose up -d
```
6. Once the command completes, go to http://localhost:3000 and verify the latest changes.
5. Once the command completes running go to http://localhost:3000 and verify the latest changes.
## For non-Docker users
## For non Docker users
1. Clone the latest version of Perplexica from GitHub:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
git clone https://github.com/ItzCrazyKns/Perplexica.git
```
2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. Execute `npm i` in both the `ui` folder and the root directory.
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
---
2. Navigate to the Project Directory
3. Execute `npm i` in both the `ui` folder and the root directory.
4. Once packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
5. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.

6912
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{
"name": "perplexica-backend",
"version": "1.10.0-rc3",
"version": "1.10.0-rc2",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@ -30,8 +30,8 @@
"@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3",
"@langchain/community": "^0.2.16",
"@langchain/google-genai": "^0.0.23",
"@langchain/openai": "^0.0.25",
"@langchain/google-genai": "^0.0.23",
"@xenova/transformers": "^2.17.1",
"axios": "^1.6.8",
"better-sqlite3": "^11.0.0",

View File

@ -3,12 +3,6 @@ PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
[SEARCH_ENGINE_BACKENDS] # "google" | "searxng" | "bing" | "brave" | "yacy"
SEARCH = "searxng"
IMAGE = "searxng"
VIDEO = "searxng"
NEWS = "searxng"
[MODELS.OPENAI]
API_KEY = ""
@ -21,25 +15,16 @@ API_KEY = ""
[MODELS.GEMINI]
API_KEY = ""
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
API_URL = ""
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[SEARCH_ENGINES.GOOGLE]
[MODELS.LMSTUDIO]
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234/v1
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
CSE_ID = ""
API_URL = ""
MODEL_NAME = ""
[SEARCH_ENGINES.SEARXNG]
ENDPOINT = ""
[SEARCH_ENGINES.BING]
SUBSCRIPTION_KEY = ""
[SEARCH_ENGINES.BRAVE]
API_KEY = ""
[SEARCH_ENGINES.YACY]
ENDPOINT = ""
[API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL

View File

@ -15,5 +15,3 @@ server:
engines:
- name: wolframalpha
disabled: false
- name: qwant
disabled: true

View File

@ -7,12 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchYaCy } from '../lib/searchEngines/yacy';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { getImageSearchEngineBackend } from '../config';
import { searchSearxng } from '../lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
@ -41,103 +36,6 @@ type ImageSearchChainInput = {
query: string;
};
async function performImageSearch(query: string) {
const searchEngine = getImageSearchEngineBackend();
let images = [];
switch (searchEngine) {
case 'google': {
const googleResult = await searchGooglePSE(query);
images = googleResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.displayLink,
};
}
})
.filter(Boolean);
break;
}
case 'searxng': {
const searxResult = await searchSearxng(query, {
engines: ['google images', 'bing images'],
pageno: 1,
});
searxResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
break;
}
case 'brave': {
const braveResult = await searchBraveAPI(query);
images = braveResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
case 'yacy': {
const yacyResult = await searchYaCy(query);
images = yacyResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
case 'bing': {
const bingResult = await searchBingAPI(query);
images = bingResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
return images;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
@ -154,7 +52,22 @@ const createImageSearchChain = (llm: BaseChatModel) => {
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const images = await performImageSearch(input);
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
return images.slice(0, 10);
}),
]);

View File

@ -7,30 +7,26 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { getVideoSearchEngineBackend } from '../config';
import { searchSearxng } from '../lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
@ -42,102 +38,6 @@ type VideoSearchChainInput = {
const strParser = new StringOutputParser();
async function performVideoSearch(query: string) {
const searchEngine = getVideoSearchEngineBackend();
const youtubeQuery = `${query} site:youtube.com`;
let videos = [];
switch (searchEngine) {
case 'google': {
const googleResult = await searchGooglePSE(youtubeQuery);
googleResult.results.forEach((result) => {
// Use .results instead of .originalres
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
case 'searxng': {
const searxResult = await searchSearxng(query, {
engines: ['youtube'],
});
searxResult.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
break;
}
case 'brave': {
const braveResult = await searchBraveAPI(youtubeQuery);
braveResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
case 'yacy': {
console.log('Not available for yacy');
videos = [];
break;
}
case 'bing': {
const bingResult = await searchBingAPI(youtubeQuery);
bingResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
return videos;
}
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
@ -152,7 +52,28 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const videos = await performVideoSearch(input);
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos = [];
res.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
}),
]);

View File

@ -10,12 +10,6 @@ interface Config {
SIMILARITY_MEASURE: string;
KEEP_ALIVE: string;
};
SEARCH_ENGINE_BACKENDS: {
SEARCH: string;
IMAGE: string;
VIDEO: string;
NEWS: string;
};
MODELS: {
OPENAI: {
API_KEY: string;
@ -32,29 +26,17 @@ interface Config {
OLLAMA: {
API_URL: string;
};
LMSTUDIO: {
API_URL: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
MODEL_NAME: string;
};
};
SEARCH_ENGINES: {
GOOGLE: {
API_KEY: string;
CSE_ID: string;
};
SEARXNG: {
ENDPOINT: string;
};
BING: {
SUBSCRIPTION_KEY: string;
};
BRAVE: {
API_KEY: string;
};
YACY: {
ENDPOINT: string;
};
API_ENDPOINTS: {
SEARXNG: string;
};
}
@ -82,35 +64,13 @@ export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.SEARCH;
export const getImageSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.IMAGE || getSearchEngineBackend();
export const getVideoSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.VIDEO || getSearchEngineBackend();
export const getNewsSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.NEWS || getSearchEngineBackend();
export const getGoogleApiKey = () => loadConfig().SEARCH_ENGINES.GOOGLE.API_KEY;
export const getGoogleCseId = () => loadConfig().SEARCH_ENGINES.GOOGLE.CSE_ID;
export const getBraveApiKey = () => loadConfig().SEARCH_ENGINES.BRAVE.API_KEY;
export const getBingSubscriptionKey = () =>
loadConfig().SEARCH_ENGINES.BING.SUBSCRIPTION_KEY;
export const getYacyJsonEndpoint = () =>
loadConfig().SEARCH_ENGINES.YACY.ENDPOINT;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().SEARCH_ENGINES.SEARXNG.ENDPOINT;
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LMSTUDIO.API_URL;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@ -121,10 +81,6 @@ export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
}
if (typeof current !== 'object' || current === null) {
return update;
}

1
src/lib/chat/service.ts Normal file
View File

@ -0,0 +1 @@

View File

@ -4,6 +4,7 @@ import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
@ -17,6 +18,7 @@ const chatModelProviders = {
ollama: loadOllamaChatModels,
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
lm_studio: loadLMStudioChatModels,
};
const embeddingModelProviders = {
@ -24,6 +26,7 @@ const embeddingModelProviders = {
local: loadTransformersEmbeddingsModels,
ollama: loadOllamaEmbeddingsModels,
gemini: loadGeminiEmbeddingsModels,
lm_studio: loadLMStudioEmbeddingsModels,
};
export const getAvailableChatModelProviders = async () => {
@ -36,27 +39,7 @@ export const getAvailableChatModelProviders = async () => {
}
}
const customOpenAiApiKey = getCustomOpenaiApiKey();
const customOpenAiApiUrl = getCustomOpenaiApiUrl();
const customOpenAiModelName = getCustomOpenaiModelName();
models['custom_openai'] = {
...(customOpenAiApiKey && customOpenAiApiUrl && customOpenAiModelName
? {
[customOpenAiModelName]: {
displayName: customOpenAiModelName,
model: new ChatOpenAI({
openAIApiKey: customOpenAiApiKey,
modelName: customOpenAiModelName,
temperature: 0.7,
configuration: {
baseURL: customOpenAiApiUrl,
},
}),
},
}
: {}),
};
models['custom_openai'] = {};
return models;
};

View File

@ -0,0 +1,89 @@
import { OpenAIEmbeddings } from '@langchain/openai';
import { ChatOpenAI } from '@langchain/openai';
import { getKeepAlive, getLMStudioApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import axios from 'axios';
interface LMStudioModel {
id: string;
// add other properties if LM Studio API provides them
}
interface ChatModelConfig {
displayName: string;
model: ChatOpenAI;
}
export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
const lmStudioEndpoint = getLMStudioApiEndpoint();
if (!lmStudioEndpoint) {
logger.debug('LM Studio endpoint not configured, skipping');
return {};
}
try {
const response = await axios.get<{ data: LMStudioModel[] }>(`${lmStudioEndpoint}/models`, {
headers: {
'Content-Type': 'application/json',
},
});
const lmStudioModels = response.data.data;
const chatModels = lmStudioModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
acc[model.id] = {
displayName: model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: lmStudioEndpoint,
},
modelName: model.id,
temperature: 0.7,
}),
};
return acc;
}, {});
return chatModels;
} catch (err) {
logger.error(`Error loading LM Studio models: ${err}`);
return {};
}
};
export const loadLMStudioEmbeddingsModels = async () => {
const lmStudioEndpoint = getLMStudioApiEndpoint();
if (!lmStudioEndpoint) return {};
try {
const response = await axios.get(`${lmStudioEndpoint}/models`, {
headers: {
'Content-Type': 'application/json',
},
});
const lmStudioModels = response.data.data;
const embeddingsModels = lmStudioModels.reduce((acc, model) => {
acc[model.id] = {
displayName: model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio', // Dummy key required by LangChain
configuration: {
baseURL: lmStudioEndpoint,
},
modelName: model.id,
}),
};
return acc;
}, {});
return embeddingsModels;
} catch (err) {
logger.error(`Error loading LM Studio embeddings model: ${err}`);
return {};
}
};

View File

@ -1,105 +0,0 @@
import axios from 'axios';
import { getBingSubscriptionKey } from '../../config';
interface BingAPISearchResult {
_type: string;
name: string;
url: string;
displayUrl: string;
snippet?: string;
dateLastCrawled?: string;
thumbnailUrl?: string;
contentUrl?: string;
hostPageUrl?: string;
width?: number;
height?: number;
accentColor?: string;
contentSize?: string;
datePublished?: string;
encodingFormat?: string;
hostPageDisplayUrl?: string;
id?: string;
isLicensed?: boolean;
isFamilyFriendly?: boolean;
language?: string;
mediaUrl?: string;
motionThumbnailUrl?: string;
publisher?: string;
viewCount?: number;
webSearchUrl?: string;
primaryImageOfPage?: {
thumbnailUrl?: string;
width?: number;
height?: number;
};
video?: {
allowHttpsEmbed?: boolean;
embedHtml?: string;
allowMobileEmbed?: boolean;
viewCount?: number;
duration?: string;
};
image?: {
thumbnail?: {
contentUrl?: string;
width?: number;
height?: number;
};
imageInsightsToken?: string;
imageId?: string;
};
}
export const searchBingAPI = async (query: string) => {
try {
const bingApiKey = await getBingSubscriptionKey();
const url = new URL(`https://api.cognitive.microsoft.com/bing/v7.0/search`);
url.searchParams.append('q', query);
url.searchParams.append('responseFilter', 'Webpages,Images,Videos');
const res = await axios.get(url.toString(), {
headers: {
'Ocp-Apim-Subscription-Key': bingApiKey,
Accept: 'application/json',
},
});
if (res.data.error) {
throw new Error(`Bing API Error: ${res.data.error.message}`);
}
const originalres = res.data;
// Extract web, image, and video results
const webResults = originalres.webPages?.value || [];
const imageResults = originalres.images?.value || [];
const videoResults = originalres.videos?.value || [];
const results = webResults.map((item: BingAPISearchResult) => ({
title: item.name,
url: item.url,
content: item.snippet,
img_src:
item.primaryImageOfPage?.thumbnailUrl ||
imageResults.find((img: any) => img.hostPageUrl === item.url)
?.thumbnailUrl ||
videoResults.find((vid: any) => vid.hostPageUrl === item.url)
?.thumbnailUrl,
...(item.video && {
videoData: {
duration: item.video.duration,
embedUrl: item.video.embedHtml?.match(/src="(.*?)"/)?.[1],
},
publisher: item.publisher,
datePublished: item.datePublished,
}),
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Bing API Error: ${errorMessage}`);
}
};

View File

@ -1,102 +0,0 @@
import axios from 'axios';
import { getBraveApiKey } from '../../config';
interface BraveSearchResult {
title: string;
url: string;
content?: string;
img_src?: string;
age?: string;
family_friendly?: boolean;
language?: string;
video?: {
embedUrl?: string;
duration?: string;
};
rating?: {
value: number;
scale: number;
};
products?: Array<{
name: string;
price?: string;
}>;
recipe?: {
ingredients?: string[];
cookTime?: string;
};
meta?: {
fetched?: string;
lastCrawled?: string;
};
}
export const searchBraveAPI = async (
query: string,
numResults: number = 20,
): Promise<{ results: BraveSearchResult[]; originalres: any }> => {
try {
const braveApiKey = await getBraveApiKey();
const url = new URL(`https://api.search.brave.com/res/v1/web/search`);
url.searchParams.append('q', query);
url.searchParams.append('count', numResults.toString());
const res = await axios.get(url.toString(), {
headers: {
'X-Subscription-Token': braveApiKey,
Accept: 'application/json',
},
});
if (res.data.error) {
throw new Error(`Brave API Error: ${res.data.error.message}`);
}
const originalres = res.data;
const webResults = originalres.web?.results || [];
const results: BraveSearchResult[] = webResults.map((item: any) => ({
title: item.title,
url: item.url,
content: item.description,
img_src: item.thumbnail?.src || item.deep_results?.images?.[0]?.src,
age: item.age,
family_friendly: item.family_friendly,
language: item.language,
video: item.video
? {
embedUrl: item.video.embed_url,
duration: item.video.duration,
}
: undefined,
rating: item.rating
? {
value: item.rating.value,
scale: item.rating.scale_max,
}
: undefined,
products: item.deep_results?.product_cluster?.map((p: any) => ({
name: p.name,
price: p.price,
})),
recipe: item.recipe
? {
ingredients: item.recipe.ingredients,
cookTime: item.recipe.cook_time,
}
: undefined,
meta: {
fetched: item.meta?.fetched,
lastCrawled: item.meta?.last_crawled,
},
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Brave API Error: ${errorMessage}`);
}
};

View File

@ -1,85 +0,0 @@
import axios from 'axios';
import { getGoogleApiKey, getGoogleCseId } from '../../config';
interface GooglePSESearchResult {
kind: string;
title: string;
htmlTitle: string;
link: string;
displayLink: string;
snippet?: string;
htmlSnippet?: string;
cacheId?: string;
formattedUrl: string;
htmlFormattedUrl: string;
pagemap?: {
videoobject: any;
cse_thumbnail?: Array<{
src: string;
width: string;
height: string;
}>;
metatags?: Array<{
[key: string]: string;
author?: string;
}>;
cse_image?: Array<{
src: string;
}>;
};
fileFormat?: string;
image?: {
contextLink: string;
thumbnailLink: string;
};
mime?: string;
labels?: Array<{
name: string;
displayName: string;
}>;
}
export const searchGooglePSE = async (query: string) => {
try {
const [googleApiKey, googleCseID] = await Promise.all([
getGoogleApiKey(),
getGoogleCseId(),
]);
const url = new URL(`https://www.googleapis.com/customsearch/v1`);
url.searchParams.append('q', query);
url.searchParams.append('cx', googleCseID);
url.searchParams.append('key', googleApiKey);
const res = await axios.get(url.toString());
if (res.data.error) {
throw new Error(`Google PSE Error: ${res.data.error.message}`);
}
const originalres = res.data.items;
const results = originalres.map((item: GooglePSESearchResult) => ({
title: item.title,
url: item.link,
content: item.snippet,
img_src:
item.pagemap?.cse_image?.[0]?.src ||
item.pagemap?.cse_thumbnail?.[0]?.src ||
item.image?.thumbnailLink,
...(item.pagemap?.videoobject?.[0] && {
videoData: {
duration: item.pagemap.videoobject[0].duration,
embedUrl: item.pagemap.videoobject[0].embedurl,
},
}),
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Google PSE Error: ${errorMessage}`);
}
};

View File

@ -1,79 +0,0 @@
import axios from 'axios';
import { getYacyJsonEndpoint } from '../../config';
interface YaCySearchResult {
channels: {
title: string;
description: string;
link: string;
image: {
url: string;
title: string;
link: string;
};
startIndex: string;
itemsPerPage: string;
searchTerms: string;
items: {
title: string;
link: string;
code: string;
description: string;
pubDate: string;
image?: string;
size: string;
sizename: string;
guid: string;
faviconUrl: string;
host: string;
path: string;
file: string;
urlhash: string;
ranking: string;
}[];
navigation: {
facetname: string;
displayname: string;
type: string;
min: string;
max: string;
mean: string;
elements: {
name: string;
count: string;
modifier: string;
url: string;
}[];
}[];
}[];
}
export const searchYaCy = async (query: string, numResults: number = 20) => {
try {
const yacyBaseUrl = getYacyJsonEndpoint();
const url = new URL(`${yacyBaseUrl}/yacysearch.json`);
url.searchParams.append('query', query);
url.searchParams.append('count', numResults.toString());
const res = await axios.get(url.toString());
const originalres = res.data as YaCySearchResult;
const results = originalres.channels[0].items.map((item) => ({
title: item.title,
url: item.link,
content: item.description,
img_src: item.image || null,
pubDate: item.pubDate,
host: item.host,
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`YaCy Error: ${errorMessage}`);
}
};

View File

@ -1,5 +1,5 @@
import axios from 'axios';
import { getSearxngApiEndpoint } from '../../config';
import { getSearxngApiEndpoint } from '../config';
interface SearxngSearchOptions {
categories?: string[];

View File

@ -6,6 +6,7 @@ import {
import {
getGroqApiKey,
getOllamaApiEndpoint,
getLMStudioApiEndpoint,
getAnthropicApiKey,
getGeminiApiKey,
getOpenaiApiKey,
@ -13,16 +14,6 @@ import {
getCustomOpenaiApiUrl,
getCustomOpenaiApiKey,
getCustomOpenaiModelName,
getSearchEngineBackend,
getImageSearchEngineBackend,
getVideoSearchEngineBackend,
getNewsSearchEngineBackend,
getSearxngApiEndpoint,
getGoogleApiKey,
getGoogleCseId,
getBingSubscriptionKey,
getBraveApiKey,
getYacyJsonEndpoint,
} from '../config';
import logger from '../utils/logger';
@ -64,27 +55,13 @@ router.get('/', async (_, res) => {
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
// Add search engine configuration
config['searchEngineBackends'] = {
search: getSearchEngineBackend(),
image: getImageSearchEngineBackend(),
video: getVideoSearchEngineBackend(),
news: getNewsSearchEngineBackend(),
};
config['searxngEndpoint'] = getSearxngApiEndpoint();
config['googleApiKey'] = getGoogleApiKey();
config['googleCseId'] = getGoogleCseId();
config['bingSubscriptionKey'] = getBingSubscriptionKey();
config['braveApiKey'] = getBraveApiKey();
config['yacyEndpoint'] = getYacyJsonEndpoint();
config['customOpenaiModelName'] = getCustomOpenaiModelName()
res.status(200).json(config);
} catch (err: any) {
@ -113,36 +90,15 @@ router.post('/', async (req, res) => {
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
LMSTUDIO: {
API_URL: config.lmStudioApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
SEARCH_ENGINE_BACKENDS: config.searchEngineBackends ? {
SEARCH: config.searchEngineBackends.search,
IMAGE: config.searchEngineBackends.image,
VIDEO: config.searchEngineBackends.video,
NEWS: config.searchEngineBackends.news,
} : undefined,
SEARCH_ENGINES: {
GOOGLE: {
API_KEY: config.googleApiKey,
CSE_ID: config.googleCseId,
},
SEARXNG: {
ENDPOINT: config.searxngEndpoint,
},
BING: {
SUBSCRIPTION_KEY: config.bingSubscriptionKey,
},
BRAVE: {
API_KEY: config.braveApiKey,
},
YACY: {
ENDPOINT: config.yacyEndpoint,
},
},
};
updateConfig(updatedConfig);

View File

@ -1,125 +1,42 @@
import express from 'express';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchYaCy } from '../lib/searchEngines/yacy';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { getNewsSearchEngineBackend } from '../config';
import { searchSearxng } from '../lib/searxng';
import logger from '../utils/logger';
const router = express.Router();
async function performSearch(query: string, site: string) {
const searchEngine = getNewsSearchEngineBackend();
switch (searchEngine) {
case 'google': {
const googleResult = await searchGooglePSE(query);
return googleResult.originalres.map((item) => {
const imageSources = [
item.pagemap?.cse_image?.[0]?.src,
item.pagemap?.cse_thumbnail?.[0]?.src,
item.pagemap?.metatags?.[0]?.['og:image'],
item.pagemap?.metatags?.[0]?.['twitter:image'],
item.pagemap?.metatags?.[0]?.['image'],
].filter(Boolean); // Remove undefined values
return {
title: item.title,
url: item.link,
content: item.snippet,
thumbnail: imageSources[0], // First available image
img_src: imageSources[0], // Same as thumbnail for consistency
iframe_src: null,
author: item.pagemap?.metatags?.[0]?.['og:site_name'] || site,
publishedDate:
item.pagemap?.metatags?.[0]?.['article:published_time'],
};
});
}
case 'searxng': {
const searxResult = await searchSearxng(query, {
engines: ['bing news'],
pageno: 1,
});
return searxResult.results;
}
case 'brave': {
const braveResult = await searchBraveAPI(query);
return braveResult.results.map((item) => ({
title: item.title,
url: item.url,
content: item.content,
thumbnail: item.img_src,
img_src: item.img_src,
iframe_src: null,
author: item.meta?.fetched || site,
publishedDate: item.meta?.lastCrawled,
}));
}
case 'yacy': {
const yacyResult = await searchYaCy(query);
return yacyResult.results.map((item) => ({
title: item.title,
url: item.url,
content: item.content,
thumbnail: item.img_src,
img_src: item.img_src,
iframe_src: null,
author: item?.host || site,
publishedDate: item?.pubDate,
}));
}
case 'bing': {
const bingResult = await searchBingAPI(query);
return bingResult.results.map((item) => ({
title: item.title,
url: item.url,
content: item.content,
thumbnail: item.img_src,
img_src: item.img_src,
iframe_src: null,
author: item?.publisher || site,
publishedDate: item?.datePublished,
}));
}
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
}
router.get('/', async (req, res) => {
try {
const queries = [
{ site: 'businessinsider.com', topic: 'AI' },
{ site: 'www.exchangewire.com', topic: 'AI' },
{ site: 'yahoo.com', topic: 'AI' },
{ site: 'businessinsider.com', topic: 'tech' },
{ site: 'www.exchangewire.com', topic: 'tech' },
{ site: 'yahoo.com', topic: 'tech' },
];
const data = (
await Promise.all(
queries.map(async ({ site, topic }) => {
try {
const query = `site:${site} ${topic}`;
return await performSearch(query, site);
} catch (error) {
logger.error(`Error searching ${site}: ${error.message}`);
return [];
}
await Promise.all([
searchSearxng('site:businessinsider.com AI', {
engines: ['bing news'],
pageno: 1,
}),
)
searchSearxng('site:www.exchangewire.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:yahoo.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:businessinsider.com tech', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:www.exchangewire.com tech', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:yahoo.com tech', {
engines: ['bing news'],
pageno: 1,
}),
])
)
.map((result) => result.results)
.flat()
.sort(() => Math.random() - 0.5)
.filter((item) => item.title && item.url && item.content);
.sort(() => Math.random() - 0.5);
return res.json({ blogs: data });
} catch (err: any) {

View File

@ -5,17 +5,14 @@ import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
customOpenAIBaseURL?: string;
customOpenAIKey?: string;
}
interface ImageSearchBody {
@ -47,12 +44,21 @@ router.post('/', async (req, res) => {
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
if (
!body.chatModel?.customOpenAIBaseURL ||
!body.chatModel?.customOpenAIKey
) {
return res
.status(400)
.json({ message: 'Missing custom OpenAI base URL or key' });
}
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
modelName: body.chatModel.model,
openAIApiKey: body.chatModel.customOpenAIKey,
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
baseURL: body.chatModel.customOpenAIBaseURL,
},
}) as unknown as BaseChatModel;
} else if (

View File

@ -10,19 +10,14 @@ import {
import { searchHandlers } from '../websocket/messageHandler';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '../search/metaSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface chatModel {
provider: string;
model: string;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
customOpenAIKey?: string;
}
interface embeddingModel {
@ -83,14 +78,21 @@ router.post('/', async (req, res) => {
let embeddings: Embeddings | undefined;
if (body.chatModel?.provider === 'custom_openai') {
if (
!body.chatModel?.customOpenAIBaseURL ||
!body.chatModel?.customOpenAIKey
) {
return res
.status(400)
.json({ message: 'Missing custom OpenAI base URL or key' });
}
llm = new ChatOpenAI({
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
modelName: body.chatModel.model,
openAIApiKey: body.chatModel.customOpenAIKey,
temperature: 0.7,
configuration: {
baseURL:
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
baseURL: body.chatModel.customOpenAIBaseURL,
},
}) as unknown as BaseChatModel;
} else if (

View File

@ -5,17 +5,14 @@ import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
customOpenAIBaseURL?: string;
customOpenAIKey?: string;
}
interface SuggestionsBody {
@ -46,12 +43,21 @@ router.post('/', async (req, res) => {
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
if (
!body.chatModel?.customOpenAIBaseURL ||
!body.chatModel?.customOpenAIKey
) {
return res
.status(400)
.json({ message: 'Missing custom OpenAI base URL or key' });
}
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
modelName: body.chatModel.model,
openAIApiKey: body.chatModel.customOpenAIKey,
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
baseURL: body.chatModel.customOpenAIBaseURL,
},
}) as unknown as BaseChatModel;
} else if (

View File

@ -5,17 +5,14 @@ import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import handleVideoSearch from '../chains/videoSearchAgent';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
customOpenAIBaseURL?: string;
customOpenAIKey?: string;
}
interface VideoSearchBody {
@ -47,12 +44,21 @@ router.post('/', async (req, res) => {
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
if (
!body.chatModel?.customOpenAIBaseURL ||
!body.chatModel?.customOpenAIKey
) {
return res
.status(400)
.json({ message: 'Missing custom OpenAI base URL or key' });
}
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
modelName: body.chatModel.model,
openAIApiKey: body.chatModel.customOpenAIKey,
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
baseURL: body.chatModel.customOpenAIBaseURL,
},
}) as unknown as BaseChatModel;
} else if (

View File

@ -17,12 +17,7 @@ import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchYaCy } from '../lib/searchEngines/yacy';
import { getSearchEngineBackend } from '../config';
import { searchSearxng } from '../lib/searxng';
import path from 'path';
import fs from 'fs';
import computeSimilarity from '../utils/computeSimilarity';
@ -137,7 +132,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
You are a web search summarizer, tasked with summarizing a piece of text retrieved from a web search. Your job is to summarize the
text into a detailed, 2-4 paragraph explanation that captures the main ideas and provides a comprehensive answer to the query.
If the query is \"summarize\", you should provide a detailed summary of the text. If the query is a specific question, you should answer it in the summary.
- **Journalistic tone**: The summary should sound professional and journalistic, not too casual or vague.
- **Thorough and detailed**: Ensure that every key point from the text is captured and that the summary directly answers the query.
- **Not too lengthy, but detailed**: The summary should be informative but not excessively long. Focus on providing detailed information in a concise format.
@ -208,37 +203,10 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: docs };
} else {
const searchEngine = getSearchEngineBackend();
let res;
switch (searchEngine) {
case 'searxng':
res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
break;
case 'google':
res = await searchGooglePSE(question);
break;
case 'bing':
res = await searchBingAPI(question);
break;
case 'brave':
res = await searchBraveAPI(question);
break;
case 'yacy':
res = await searchYaCy(question);
break;
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
if (!res?.results) {
throw new Error(
`No results found for search engine: ${searchEngine}`,
);
}
const res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
const documents = res.results.map(
(result) =>

View File

@ -9,11 +9,6 @@ import type { Embeddings } from '@langchain/core/embeddings';
import type { IncomingMessage } from 'http';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
export const handleConnection = async (
ws: WebSocket,
@ -53,20 +48,14 @@ export const handleConnection = async (
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
} else if (chatModelProvider == 'custom_openai') {
const customOpenaiApiKey = getCustomOpenaiApiKey();
const customOpenaiApiUrl = getCustomOpenaiApiUrl();
const customOpenaiModelName = getCustomOpenaiModelName();
if (customOpenaiApiKey && customOpenaiApiUrl && customOpenaiModelName) {
llm = new ChatOpenAI({
modelName: customOpenaiModelName,
openAIApiKey: customOpenaiApiKey,
temperature: 0.7,
configuration: {
baseURL: customOpenaiApiUrl,
},
}) as unknown as BaseChatModel;
}
llm = new ChatOpenAI({
modelName: chatModel,
openAIApiKey: searchParams.get('openAIApiKey'),
temperature: 0.7,
configuration: {
baseURL: searchParams.get('openAIBaseURL'),
},
}) as unknown as BaseChatModel;
}
if (

View File

@ -1,18 +1,27 @@
{
"compilerOptions": {
"lib": ["ESNext"],
"module": "Node16",
"moduleResolution": "Node16",
"target": "ESNext",
"outDir": "dist",
"sourceMap": false,
"esModuleInterop": true,
"experimentalDecorators": true,
"emitDecoratorMetadata": true,
"allowSyntheticDefaultImports": true,
"target": "es2018",
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"skipLibCheck": true,
"skipDefaultLibCheck": true
"strict": true,
"noEmit": true,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"plugins": [
{
"name": "next"
}
],
"paths": {
"@/*": ["./*"]
}
},
"include": ["src"],
"exclude": ["node_modules", "**/*.spec.ts"]
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
"exclude": ["node_modules"]
}

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,7 @@ import { toast } from 'sonner';
import { useSearchParams } from 'next/navigation';
import { getSuggestions } from '@/lib/actions';
import { Settings } from 'lucide-react';
import Link from 'next/link';
import SettingsDialog from './SettingsDialog';
import NextError from 'next/error';
export type Message = {
@ -40,7 +40,6 @@ const useSocket = (
const isCleaningUpRef = useRef(false);
const MAX_RETRIES = 3;
const INITIAL_BACKOFF = 1000; // 1 second
const isConnectionErrorRef = useRef(false);
const getBackoffDelay = (retryCount: number) => {
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000); // Cap at 10 seconds
@ -59,17 +58,14 @@ const useSocket = (
let embeddingModelProvider = localStorage.getItem(
'embeddingModelProvider',
);
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
let openAIBaseURL =
chatModelProvider === 'custom_openai'
? localStorage.getItem('openAIBaseURL')
: null;
let openAIPIKey =
chatModelProvider === 'custom_openai'
? localStorage.getItem('openAIApiKey')
: null;
const providers = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/models`,
@ -98,13 +94,21 @@ const useSocket = (
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (chatModelProvider === 'custom_openai') {
toast.error(
'Seems like you are using the custom OpenAI provider, please open the settings and enter a model name to use.',
);
setError(true);
return;
} else {
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return toast.error('No chat models available');
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return toast.error('No chat models available');
}
}
if (!embeddingModel || !embeddingModelProvider) {
@ -135,7 +139,9 @@ const useSocket = (
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
(((!openAIBaseURL || !openAIPIKey) &&
chatModelProvider === 'custom_openai') ||
!chatModelProviders[chatModelProvider])
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
@ -143,11 +149,23 @@ const useSocket = (
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
if (
chatModelProvider === 'custom_openai' &&
(!openAIBaseURL || !openAIPIKey)
) {
toast.error(
'Seems like you are using the custom OpenAI provider, please open the settings and configure the API key and base URL',
);
setError(true);
return;
}
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
(!openAIBaseURL || !openAIPIKey) &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
@ -233,8 +251,6 @@ const useSocket = (
console.debug(new Date(), 'ws:connected');
}
if (data.type === 'error') {
isConnectionErrorRef.current = true;
setError(true);
toast.error(data.data);
}
});
@ -249,7 +265,7 @@ const useSocket = (
clearTimeout(timeoutId);
setIsWSReady(false);
console.debug(new Date(), 'ws:disconnected');
if (!isCleaningUpRef.current && !isConnectionErrorRef.current) {
if (!isCleaningUpRef.current) {
toast.error('Connection lost. Attempting to reconnect...');
attemptReconnect();
}
@ -579,17 +595,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
}),
);
}
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document.getElementById('search-images')?.click();
}
if (autoVideoSearch === 'true') {
document.getElementById('search-videos')?.click();
}
}
};
@ -624,15 +629,17 @@ const ChatWindow = ({ id }: { id?: string }) => {
return (
<div className="relative">
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
<Link href="/settings">
<Settings className="cursor-pointer lg:hidden" />
</Link>
<Settings
className="cursor-pointer lg:hidden"
onClick={() => setIsSettingsOpen(true)}
/>
</div>
<div className="flex flex-col items-center justify-center min-h-screen">
<p className="dark:text-white/70 text-black/70 text-sm">
Failed to connect to the server. Please try again later.
</p>
</div>
<SettingsDialog isOpen={isSettingsOpen} setIsOpen={setIsSettingsOpen} />
</div>
);
}

View File

@ -1,8 +1,8 @@
import { Settings } from 'lucide-react';
import EmptyChatMessageInput from './EmptyChatMessageInput';
import SettingsDialog from './SettingsDialog';
import { useState } from 'react';
import { File } from './ChatWindow';
import Link from 'next/link';
const EmptyChat = ({
sendMessage,
@ -29,10 +29,12 @@ const EmptyChat = ({
return (
<div className="relative">
<SettingsDialog isOpen={isSettingsOpen} setIsOpen={setIsSettingsOpen} />
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
<Link href="/settings">
<Settings className="cursor-pointer lg:hidden" />
</Link>
<Settings
className="cursor-pointer lg:hidden"
onClick={() => setIsSettingsOpen(true)}
/>
</div>
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-8">
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">

View File

@ -11,6 +11,8 @@ import {
StopCircle,
Layers3,
Plus,
Brain,
ChevronDown,
} from 'lucide-react';
import Markdown from 'markdown-to-jsx';
import Copy from './MessageActions/Copy';
@ -41,26 +43,58 @@ const MessageBox = ({
}) => {
const [parsedMessage, setParsedMessage] = useState(message.content);
const [speechMessage, setSpeechMessage] = useState(message.content);
const [thinking, setThinking] = useState<string>('');
const [answer, setAnswer] = useState<string>('');
const [isThinkingExpanded, setIsThinkingExpanded] = useState(false);
useEffect(() => {
const regex = /\[(\d+)\]/g;
const thinkRegex = /<think>(.*?)(?:<\/think>|$)(.*)/s;
if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length > 0
) {
return setParsedMessage(
message.content.replace(
regex,
(_, number) =>
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
),
);
// Check for thinking content, including partial tags
const match = message.content.match(thinkRegex);
if (match) {
const [_, thinkingContent, answerContent] = match;
// Set thinking content even if </think> hasn't appeared yet
if (thinkingContent) {
setThinking(thinkingContent.trim());
setIsThinkingExpanded(true); // Auto-expand when thinking starts
}
// Only set answer content if we have it (after </think>)
if (answerContent) {
setAnswer(answerContent.trim());
// Process the answer part for sources if needed
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
setParsedMessage(
answerContent.trim().replace(
regex,
(_, number) =>
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
),
);
} else {
setParsedMessage(answerContent.trim());
}
setSpeechMessage(answerContent.trim().replace(regex, ''));
}
} else {
// No thinking content - process as before
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
setParsedMessage(
message.content.replace(
regex,
(_, number) =>
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
),
);
} else {
setParsedMessage(message.content);
}
setSpeechMessage(message.content.replace(regex, ''));
}
setSpeechMessage(message.content.replace(regex, ''));
setParsedMessage(message.content);
}, [message.content, message.sources, message.role]);
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
@ -68,13 +102,7 @@ const MessageBox = ({
return (
<div>
{message.role === 'user' && (
<div
className={cn(
'w-full',
messageIndex === 0 ? 'pt-16' : 'pt-8',
'break-words',
)}
>
<div className={cn('w-full', messageIndex === 0 ? 'pt-16' : 'pt-8')}>
<h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12">
{message.content}
</h2>
@ -98,27 +126,71 @@ const MessageBox = ({
<MessageSources sources={message.sources} />
</div>
)}
<div className="flex flex-col space-y-2">
<div className="flex flex-row items-center space-x-2">
<Disc3
className={cn(
'text-black dark:text-white',
isLast && loading ? 'animate-spin' : 'animate-none',
<div className="flex flex-col space-y-4">
{thinking && (
<div className="flex flex-col space-y-2 mb-4">
<button
onClick={() => setIsThinkingExpanded(!isThinkingExpanded)}
className="flex flex-row items-center space-x-2 group text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white transition duration-200"
>
<Brain size={20} />
<h3 className="font-medium text-xl">Reasoning</h3>
<ChevronDown
size={16}
className={cn(
"transition-transform duration-200",
isThinkingExpanded ? "rotate-180" : ""
)}
/>
</button>
{isThinkingExpanded && (
<div className="rounded-lg bg-light-secondary/50 dark:bg-dark-secondary/50 p-4">
{thinking.split('\n\n').map((paragraph, index) => {
if (!paragraph.trim()) return null;
const content = paragraph.replace(/^[•\-\d.]\s*/, '');
return (
<div key={index} className="mb-2 last:mb-0">
<details className="group [&_summary::-webkit-details-marker]:hidden">
<summary className="flex items-center cursor-pointer list-none text-sm text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white">
<span className="arrow mr-2 inline-block transition-transform duration-200 group-open:rotate-90 group-open:self-start group-open:mt-1"></span>
<p className="relative whitespace-normal line-clamp-1 group-open:line-clamp-none after:content-['...'] after:inline group-open:after:hidden transition-all duration-200 text-ellipsis overflow-hidden group-open:overflow-visible">
{content}
</p>
</summary>
</details>
</div>
);
})}
</div>
)}
size={20}
/>
<h3 className="text-black dark:text-white font-medium text-xl">
Answer
</h3>
</div>
)}
<div className="flex flex-col space-y-2">
<div className="flex flex-row items-center space-x-2">
<Disc3
className={cn(
'text-black dark:text-white',
isLast && loading ? 'animate-spin' : 'animate-none',
)}
size={20}
/>
<h3 className="text-black dark:text-white font-medium text-xl">
Answer
</h3>
</div>
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white',
)}
>
{parsedMessage}
</Markdown>
</div>
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white',
)}
>
{parsedMessage}
</Markdown>
{loading && isLast ? null : (
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
<div className="flex flex-row items-center space-x-1">

View File

@ -110,7 +110,7 @@ const Attach = ({
<button
type="button"
onClick={() => fileInputRef.current.click()}
className="flex flex-row items-center space-x-1 text-black/70 dark:text-white/70 hover:text-black hover:dark:text-white transition duration-200"
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
>
<input
type="file"
@ -128,7 +128,7 @@ const Attach = ({
setFiles([]);
setFileIds([]);
}}
className="flex flex-row items-center space-x-1 text-black/70 dark:text-white/70 hover:text-black hover:dark:text-white transition duration-200"
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
>
<Trash size={14} />
<p className="text-xs">Clear</p>
@ -145,7 +145,7 @@ const Attach = ({
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
</div>
<p className="text-black/70 dark:text-white/70 text-sm">
<p className="text-white/70 text-sm">
{file.fileName.length > 25
? file.fileName.replace(/\.\w+$/, '').substring(0, 25) +
'...' +

View File

@ -82,7 +82,7 @@ const AttachSmall = ({
<button
type="button"
onClick={() => fileInputRef.current.click()}
className="flex flex-row items-center space-x-1 text-black/70 dark:text-white/70 hover:text-black hover:dark:text-white transition duration-200"
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
>
<input
type="file"
@ -100,7 +100,7 @@ const AttachSmall = ({
setFiles([]);
setFileIds([]);
}}
className="flex flex-row items-center space-x-1 text-black/70 dark:text-white/70 hover:text-black hover:dark:text-white transition duration-200"
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
>
<Trash size={14} />
<p className="text-xs">Clear</p>
@ -117,7 +117,7 @@ const AttachSmall = ({
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
</div>
<p className="text-black/70 dark:text-white/70 text-sm">
<p className="text-white/70 text-sm">
{file.fileName.length > 25
? file.fileName.replace(/\.\w+$/, '').substring(0, 25) +
'...' +

View File

@ -27,7 +27,6 @@ const SearchImages = ({
<>
{!loading && images === null && (
<button
id="search-images"
onClick={async () => {
setLoading(true);

View File

@ -42,7 +42,6 @@ const Searchvideos = ({
<>
{!loading && videos === null && (
<button
id="search-videos"
onClick={async () => {
setLoading(true);

View File

@ -0,0 +1,528 @@
import { cn } from '@/lib/utils';
import {
Dialog,
DialogPanel,
DialogTitle,
Transition,
TransitionChild,
} from '@headlessui/react';
import { CloudUpload, RefreshCcw, RefreshCw } from 'lucide-react';
import React, {
Fragment,
useEffect,
useState,
type SelectHTMLAttributes,
} from 'react';
import ThemeSwitcher from './theme/Switcher';
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {}
const Input = ({ className, ...restProps }: InputProps) => {
return (
<input
{...restProps}
className={cn(
'bg-light-secondary dark:bg-dark-secondary px-3 py-2 flex items-center overflow-hidden border border-light-200 dark:border-dark-200 dark:text-white rounded-lg text-sm',
className,
)}
/>
);
};
interface SelectProps extends SelectHTMLAttributes<HTMLSelectElement> {
options: { value: string; label: string; disabled?: boolean }[];
}
export const Select = ({ className, options, ...restProps }: SelectProps) => {
return (
<select
{...restProps}
className={cn(
'bg-light-secondary dark:bg-dark-secondary px-3 py-2 flex items-center overflow-hidden border border-light-200 dark:border-dark-200 dark:text-white rounded-lg text-sm',
className,
)}
>
{options.map(({ label, value, disabled }) => {
return (
<option key={value} value={value} disabled={disabled}>
{label}
</option>
);
})}
</select>
);
};
interface SettingsType {
chatModelProviders: {
[key: string]: [Record<string, any>];
};
embeddingModelProviders: {
[key: string]: [Record<string, any>];
};
openaiApiKey: string;
groqApiKey: string;
anthropicApiKey: string;
geminiApiKey: string;
ollamaApiUrl: string;
}
const SettingsDialog = ({
isOpen,
setIsOpen,
}: {
isOpen: boolean;
setIsOpen: (isOpen: boolean) => void;
}) => {
const [config, setConfig] = useState<SettingsType | null>(null);
const [chatModels, setChatModels] = useState<Record<string, any>>({});
const [embeddingModels, setEmbeddingModels] = useState<Record<string, any>>(
{},
);
const [selectedChatModelProvider, setSelectedChatModelProvider] = useState<
string | null
>(null);
const [selectedChatModel, setSelectedChatModel] = useState<string | null>(
null,
);
const [selectedEmbeddingModelProvider, setSelectedEmbeddingModelProvider] =
useState<string | null>(null);
const [selectedEmbeddingModel, setSelectedEmbeddingModel] = useState<
string | null
>(null);
const [customOpenAIApiKey, setCustomOpenAIApiKey] = useState<string>('');
const [customOpenAIBaseURL, setCustomOpenAIBaseURL] = useState<string>('');
const [isLoading, setIsLoading] = useState(false);
const [isUpdating, setIsUpdating] = useState(false);
useEffect(() => {
if (isOpen) {
const fetchConfig = async () => {
setIsLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
headers: {
'Content-Type': 'application/json',
},
});
const data = (await res.json()) as SettingsType;
setConfig(data);
const chatModelProvidersKeys = Object.keys(
data.chatModelProviders || {},
);
const embeddingModelProvidersKeys = Object.keys(
data.embeddingModelProviders || {},
);
const defaultChatModelProvider =
chatModelProvidersKeys.length > 0 ? chatModelProvidersKeys[0] : '';
const defaultEmbeddingModelProvider =
embeddingModelProvidersKeys.length > 0
? embeddingModelProvidersKeys[0]
: '';
const chatModelProvider =
localStorage.getItem('chatModelProvider') ||
defaultChatModelProvider ||
'';
const chatModel =
localStorage.getItem('chatModel') ||
(data.chatModelProviders &&
data.chatModelProviders[chatModelProvider]?.length > 0
? data.chatModelProviders[chatModelProvider][0].name
: undefined) ||
'';
const embeddingModelProvider =
localStorage.getItem('embeddingModelProvider') ||
defaultEmbeddingModelProvider ||
'';
const embeddingModel =
localStorage.getItem('embeddingModel') ||
(data.embeddingModelProviders &&
data.embeddingModelProviders[embeddingModelProvider]?.[0].name) ||
'';
setSelectedChatModelProvider(chatModelProvider);
setSelectedChatModel(chatModel);
setSelectedEmbeddingModelProvider(embeddingModelProvider);
setSelectedEmbeddingModel(embeddingModel);
setCustomOpenAIApiKey(localStorage.getItem('openAIApiKey') || '');
setCustomOpenAIBaseURL(localStorage.getItem('openAIBaseURL') || '');
setChatModels(data.chatModelProviders || {});
setEmbeddingModels(data.embeddingModelProviders || {});
setIsLoading(false);
};
fetchConfig();
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isOpen]);
const handleSubmit = async () => {
setIsUpdating(true);
try {
await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(config),
});
localStorage.setItem('chatModelProvider', selectedChatModelProvider!);
localStorage.setItem('chatModel', selectedChatModel!);
localStorage.setItem(
'embeddingModelProvider',
selectedEmbeddingModelProvider!,
);
localStorage.setItem('embeddingModel', selectedEmbeddingModel!);
localStorage.setItem('openAIApiKey', customOpenAIApiKey!);
localStorage.setItem('openAIBaseURL', customOpenAIBaseURL!);
} catch (err) {
console.log(err);
} finally {
setIsUpdating(false);
setIsOpen(false);
window.location.reload();
}
};
return (
<Transition appear show={isOpen} as={Fragment}>
<Dialog
as="div"
className="relative z-50"
onClose={() => setIsOpen(false)}
>
<TransitionChild
as={Fragment}
enter="ease-out duration-300"
enterFrom="opacity-0"
enterTo="opacity-100"
leave="ease-in duration-200"
leaveFrom="opacity-100"
leaveTo="opacity-0"
>
<div className="fixed inset-0 bg-white/50 dark:bg-black/50" />
</TransitionChild>
<div className="fixed inset-0 overflow-y-auto">
<div className="flex min-h-full items-center justify-center p-4 text-center">
<TransitionChild
as={Fragment}
enter="ease-out duration-200"
enterFrom="opacity-0 scale-95"
enterTo="opacity-100 scale-100"
leave="ease-in duration-100"
leaveFrom="opacity-100 scale-200"
leaveTo="opacity-0 scale-95"
>
<DialogPanel className="w-full max-w-md transform rounded-2xl bg-light-secondary dark:bg-dark-secondary border border-light-200 dark:border-dark-200 p-6 text-left align-middle shadow-xl transition-all">
<DialogTitle className="text-xl font-medium leading-6 dark:text-white">
Settings
</DialogTitle>
{config && !isLoading && (
<div className="flex flex-col space-y-4 mt-6">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Theme
</p>
<ThemeSwitcher />
</div>
{config.chatModelProviders && (
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Chat model Provider
</p>
<Select
value={selectedChatModelProvider ?? undefined}
onChange={(e) => {
setSelectedChatModelProvider(e.target.value);
if (e.target.value === 'custom_openai') {
setSelectedChatModel('');
} else {
setSelectedChatModel(
config.chatModelProviders[e.target.value][0]
.name,
);
}
}}
options={Object.keys(config.chatModelProviders).map(
(provider) => ({
value: provider,
label:
provider.charAt(0).toUpperCase() +
provider.slice(1),
}),
)}
/>
</div>
)}
{selectedChatModelProvider &&
selectedChatModelProvider != 'custom_openai' && (
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Chat Model
</p>
<Select
value={selectedChatModel ?? undefined}
onChange={(e) =>
setSelectedChatModel(e.target.value)
}
options={(() => {
const chatModelProvider =
config.chatModelProviders[
selectedChatModelProvider
];
return chatModelProvider
? chatModelProvider.length > 0
? chatModelProvider.map((model) => ({
value: model.name,
label: model.displayName,
}))
: [
{
value: '',
label: 'No models available',
disabled: true,
},
]
: [
{
value: '',
label:
'Invalid provider, please check backend logs',
disabled: true,
},
];
})()}
/>
</div>
)}
{selectedChatModelProvider &&
selectedChatModelProvider === 'custom_openai' && (
<>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Model name
</p>
<Input
type="text"
placeholder="Model name"
defaultValue={selectedChatModel!}
onChange={(e) =>
setSelectedChatModel(e.target.value)
}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Custom OpenAI API Key
</p>
<Input
type="text"
placeholder="Custom OpenAI API Key"
defaultValue={customOpenAIApiKey!}
onChange={(e) =>
setCustomOpenAIApiKey(e.target.value)
}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Custom OpenAI Base URL
</p>
<Input
type="text"
placeholder="Custom OpenAI Base URL"
defaultValue={customOpenAIBaseURL!}
onChange={(e) =>
setCustomOpenAIBaseURL(e.target.value)
}
/>
</div>
</>
)}
{/* Embedding models */}
{config.embeddingModelProviders && (
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Embedding model Provider
</p>
<Select
value={selectedEmbeddingModelProvider ?? undefined}
onChange={(e) => {
setSelectedEmbeddingModelProvider(e.target.value);
setSelectedEmbeddingModel(
config.embeddingModelProviders[e.target.value][0]
.name,
);
}}
options={Object.keys(
config.embeddingModelProviders,
).map((provider) => ({
label:
provider.charAt(0).toUpperCase() +
provider.slice(1),
value: provider,
}))}
/>
</div>
)}
{selectedEmbeddingModelProvider && (
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Embedding Model
</p>
<Select
value={selectedEmbeddingModel ?? undefined}
onChange={(e) =>
setSelectedEmbeddingModel(e.target.value)
}
options={(() => {
const embeddingModelProvider =
config.embeddingModelProviders[
selectedEmbeddingModelProvider
];
return embeddingModelProvider
? embeddingModelProvider.length > 0
? embeddingModelProvider.map((model) => ({
label: model.displayName,
value: model.name,
}))
: [
{
label: 'No embedding models available',
value: '',
disabled: true,
},
]
: [
{
label:
'Invalid provider, please check backend logs',
value: '',
disabled: true,
},
];
})()}
/>
</div>
)}
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
OpenAI API Key
</p>
<Input
type="text"
placeholder="OpenAI API Key"
defaultValue={config.openaiApiKey}
onChange={(e) =>
setConfig({
...config,
openaiApiKey: e.target.value,
})
}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Ollama API URL
</p>
<Input
type="text"
placeholder="Ollama API URL"
defaultValue={config.ollamaApiUrl}
onChange={(e) =>
setConfig({
...config,
ollamaApiUrl: e.target.value,
})
}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
GROQ API Key
</p>
<Input
type="text"
placeholder="GROQ API Key"
defaultValue={config.groqApiKey}
onChange={(e) =>
setConfig({
...config,
groqApiKey: e.target.value,
})
}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Anthropic API Key
</p>
<Input
type="text"
placeholder="Anthropic API key"
defaultValue={config.anthropicApiKey}
onChange={(e) =>
setConfig({
...config,
anthropicApiKey: e.target.value,
})
}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Gemini API Key
</p>
<Input
type="text"
placeholder="Gemini API key"
defaultValue={config.geminiApiKey}
onChange={(e) =>
setConfig({
...config,
geminiApiKey: e.target.value,
})
}
/>
</div>
</div>
)}
{isLoading && (
<div className="w-full flex items-center justify-center mt-6 text-black/70 dark:text-white/70 py-6">
<RefreshCcw className="animate-spin" />
</div>
)}
<div className="w-full mt-6 space-y-2">
<p className="text-xs text-black/50 dark:text-white/50">
We&apos;ll refresh the page after updating the settings.
</p>
<button
onClick={handleSubmit}
className="bg-[#24A0ED] flex flex-row items-center space-x-2 text-white disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#ececec21] rounded-full px-4 py-2"
disabled={isLoading || isUpdating}
>
{isUpdating ? (
<RefreshCw size={20} className="animate-spin" />
) : (
<CloudUpload size={20} />
)}
</button>
</div>
</DialogPanel>
</TransitionChild>
</div>
</div>
</Dialog>
</Transition>
);
};
export default SettingsDialog;

View File

@ -6,6 +6,7 @@ import Link from 'next/link';
import { useSelectedLayoutSegments } from 'next/navigation';
import React, { useState, type ReactNode } from 'react';
import Layout from './Layout';
import SettingsDialog from './SettingsDialog';
const VerticalIconContainer = ({ children }: { children: ReactNode }) => {
return (
@ -66,9 +67,15 @@ const Sidebar = ({ children }: { children: React.ReactNode }) => {
))}
</VerticalIconContainer>
<Link href="/settings">
<Settings className="cursor-pointer" />
</Link>
<Settings
onClick={() => setIsSettingsOpen(!isSettingsOpen)}
className="cursor-pointer"
/>
<SettingsDialog
isOpen={isSettingsOpen}
setIsOpen={setIsSettingsOpen}
/>
</div>
</div>

View File

@ -1,7 +1,8 @@
'use client';
import { useTheme } from 'next-themes';
import { SunIcon, MoonIcon, MonitorIcon } from 'lucide-react';
import { useCallback, useEffect, useState } from 'react';
import Select from '../ui/Select';
import { Select } from '../SettingsDialog';
type Theme = 'dark' | 'light' | 'system';

View File

@ -1,28 +0,0 @@
import { cn } from '@/lib/utils';
import { SelectHTMLAttributes } from 'react';
interface SelectProps extends SelectHTMLAttributes<HTMLSelectElement> {
options: { value: string; label: string; disabled?: boolean }[];
}
export const Select = ({ className, options, ...restProps }: SelectProps) => {
return (
<select
{...restProps}
className={cn(
'bg-light-secondary dark:bg-dark-secondary px-3 py-2 flex items-center overflow-hidden border border-light-200 dark:border-dark-200 dark:text-white rounded-lg text-sm',
className,
)}
>
{options.map(({ label, value, disabled }) => {
return (
<option key={value} value={value} disabled={disabled}>
{label}
</option>
);
})}
</select>
);
};
export default Select;

View File

@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.10.0-rc3",
"version": "1.10.0-rc2",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {