Compare commits

...

100 Commits

Author SHA1 Message Date
OTYAK
e20d5ecc01 Merge branch 'ItzCrazyKns:master' into master 2025-04-11 16:05:41 +01:00
sjiampojamarn
41b258e4d8 Set speech message before return 2025-04-08 23:17:52 -07:00
OTYAK
18533d58c2 Merge branch 'ItzCrazyKns:master' into master 2025-04-08 10:41:33 +01:00
OTYAK
54c71e33e0 feat(Tavily): update sample configuration for Tavily integration 2025-04-08 10:41:00 +01:00
ItzCrazyKns
da1123d84b feat(groq): update model name 2025-04-07 23:30:51 +05:30
ItzCrazyKns
627775c430 feat(groq): remove maverick (not being run yet) 2025-04-07 23:29:51 +05:30
ItzCrazyKns
245573efca feat(groq): update model list 2025-04-07 23:23:18 +05:30
OTYAK
2c56aa3cb3 feat(tavily): integrate Tavily search engine with configuration and UI support 2025-04-07 16:41:54 +01:00
ItzCrazyKns
a85f762c58 feat(package): bump version 2025-04-07 10:27:04 +05:30
ItzCrazyKns
3ddcceda0a feat(gemini-provider): update embedding models 2025-04-07 10:26:29 +05:30
ItzCrazyKns
e226645bc7 feat(app): lint & beautify 2025-04-06 13:48:58 +05:30
ItzCrazyKns
5447530ece Merge branch 'feat/deepseek-provider' 2025-04-06 13:48:10 +05:30
ItzCrazyKns
ed6d46a440 Merge branch 'pr/719' 2025-04-06 13:47:57 +05:30
ItzCrazyKns
588e68e93e feat(providers): add deepseek provider 2025-04-06 13:37:43 +05:30
ItzCrazyKns
c4440327db Merge pull request #720 from OmarElKadri/master
feat(search): add optional systemInstructions to API request body
2025-04-06 10:34:29 +05:30
OTYAK
64e2d457cc feat(search): add optional systemInstructions to API request body 2025-04-05 19:06:18 +01:00
ItzCrazyKns
bf705afc21 feat(message-box): change styles, lint & beautify 2025-04-05 22:32:56 +05:30
singleparadox
2e4433a6b3 feat(message-box): support [1,2,3,4] citation format instead of just [1][2][3] 2025-04-05 15:24:45 +00:00
ItzCrazyKns
09661ae11d feat(prompts): fix typo, closes #715 2025-04-02 13:02:28 +05:30
ItzCrazyKns
a8d410bc2f Merge pull request #716 from ItzCrazyKns/feat/system-instructions
Feat/system instructions
2025-04-01 15:59:18 +05:30
ItzCrazyKns
7d52fbb368 feat(settings): add system instructions 2025-04-01 15:50:24 +05:30
ItzCrazyKns
4b8e0ea1aa feat(chat-window): handle system instructions 2025-04-01 15:50:05 +05:30
ItzCrazyKns
5b1055e8c9 feat(routes): add system instructions 2025-04-01 15:49:36 +05:30
ItzCrazyKns
4b2a7916fd feat(docker-build): fix image tag errors 2025-03-30 22:51:59 +05:30
ItzCrazyKns
97e64aa65e Merge branch 'pr/703' 2025-03-30 21:12:27 +05:30
ItzCrazyKns
90e303f737 feat(search): lint & beautify, update content type 2025-03-30 21:12:04 +05:30
ItzCrazyKns
7955d8e408 Merge pull request #705 from ottsch/add-gemini-2.5
feat(models): Update Gemini chat models
2025-03-29 21:53:02 +05:30
ottsch
b285cb4323 Update Gemini chat models 2025-03-28 17:07:11 +01:00
OTYAK
5d60ab1139 feat(api): Switch to newline-delimited JSON streaming instead of SSE 2025-03-27 13:04:09 +01:00
OTYAK
9095996356 Merge branch 'ItzCrazyKns:master' into master 2025-03-27 13:01:09 +01:00
ItzCrazyKns
310c8a75fd feat(routes): fix typo, closes #692 2025-03-27 11:36:58 +05:30
OTYAK
191d1dc25f refactor(api): clean up comments and improve abort handling in search route 2025-03-26 11:32:46 +01:00
OTYAK
d3b2f8983d feat(api): add streaming support to search route 2025-03-26 11:28:05 +01:00
ItzCrazyKns
27286465a3 feat(package): bump version 2025-03-26 13:34:09 +05:30
ItzCrazyKns
defc677932 feat(providers): update gemini & anthropic provider 2025-03-25 22:01:24 +05:30
ItzCrazyKns
45df9dc5bf feat(readme): update networking guide 2025-03-21 11:27:12 +05:30
ItzCrazyKns
06db95d7c0 feat(dockerfile): fix onnx issues 2025-03-21 11:25:28 +05:30
ItzCrazyKns
74f7eaed6e feat(workflow): fix build errors 2025-03-20 13:43:29 +05:30
ItzCrazyKns
dddd944a18 feat(workflow): update docker build 2025-03-20 13:22:43 +05:30
ItzCrazyKns
7eccd4d75b Merge pull request #679 from ItzCrazyKns/feat/remove-backend
feat(app): fix build errors
2025-03-20 12:48:27 +05:30
ItzCrazyKns
62e6c24840 feat(app): fix build errors 2025-03-20 12:47:54 +05:30
ItzCrazyKns
04a0342b52 Merge pull request #678 from ItzCrazyKns/feat/remove-backend
Feat/remove backend
2025-03-20 12:42:18 +05:30
ItzCrazyKns
5c016127cb feat(package): bump version 2025-03-20 12:41:07 +05:30
ItzCrazyKns
8b552010f9 feat(docs): update docs 2025-03-20 12:33:15 +05:30
ItzCrazyKns
97804e7b4d feat(config): remove unused vars 2025-03-20 12:30:06 +05:30
ItzCrazyKns
33b895b75e feat(app): add search API 2025-03-20 12:29:52 +05:30
ItzCrazyKns
048de2cb74 feat(docs): update docs 2025-03-20 12:29:31 +05:30
ItzCrazyKns
274e6ca88c feat(sidebar): remove unused state 2025-03-20 11:49:00 +05:30
ItzCrazyKns
f628b6e416 feat(groq): remove deprecated model 2025-03-20 11:48:44 +05:30
ItzCrazyKns
cf7144db96 feat(providers): add HF transformers 2025-03-20 11:48:26 +05:30
ItzCrazyKns
ffa793056d feat(chains): remove think tags 2025-03-20 11:47:54 +05:30
ItzCrazyKns
584d02b92a feat(app): add thinking model support 2025-03-20 10:56:03 +05:30
ItzCrazyKns
008c7cbec0 feat(chat-window): remove debugging code, 2025-03-20 09:47:32 +05:30
ItzCrazyKns
4d1ee79b8d feat(package): migrate db when built 2025-03-20 09:47:12 +05:30
ItzCrazyKns
ea638279e5 feat(docker): use standalone build 2025-03-20 09:46:50 +05:30
ItzCrazyKns
403d13eb50 feat(package): update scripts 2025-03-19 16:34:55 +05:30
ItzCrazyKns
217736d05a feat(app): remove backend 2025-03-19 16:23:27 +05:30
ItzCrazyKns
8a24572cd2 feat(app): add upload functionality 2025-03-19 15:32:32 +05:30
ItzCrazyKns
649c68f292 feat(ui): fix type errors 2025-03-19 13:42:28 +05:30
ItzCrazyKns
bab5dba6e1 feat(app): port history saving features 2025-03-19 13:42:15 +05:30
ItzCrazyKns
c24edac16d feat(app): add chat functionality 2025-03-19 13:41:52 +05:30
ItzCrazyKns
3150c21f17 feat(icons): fix type errors 2025-03-19 13:41:01 +05:30
ItzCrazyKns
c46fd7a9c8 feat(utils): add files utils, remove logger, fix API url 2025-03-19 13:40:35 +05:30
ItzCrazyKns
bab32e8d70 feat(app): add suggestions route 2025-03-19 13:40:10 +05:30
ItzCrazyKns
1130746f5d feat(app): add image & video search functionality 2025-03-19 13:38:40 +05:30
ItzCrazyKns
d1e9361665 feat(routes): add discover route 2025-03-19 13:37:54 +05:30
ItzCrazyKns
3bf2337697 feat(app): add db & schema 2025-03-19 13:37:01 +05:30
ItzCrazyKns
ee6e197ec0 feat(app): lint & beautify 2025-03-18 11:29:04 +05:30
ItzCrazyKns
32f26bb4e8 feat(app): add groq, gemini & anthropic provider 2025-03-18 11:28:47 +05:30
ItzCrazyKns
4cb20542a5 feat(config): update file path, add post endpoint 2025-03-18 10:33:32 +05:30
ItzCrazyKns
97f6196d9b feat(app): add GET config route 2025-03-18 10:25:09 +05:30
ItzCrazyKns
6c227cab6f feat(providers): move providers to UI 2025-03-18 10:24:51 +05:30
ItzCrazyKns
e9e34ddff9 feat(ui): add meta search agent 2025-03-18 10:24:33 +05:30
ItzCrazyKns
e29a08dc46 feat(ui): add necessary utils 2025-03-18 10:24:16 +05:30
ItzCrazyKns
5c313e9bed feat(ui): update packages, add config, add searxng 2025-03-18 10:23:59 +05:30
ItzCrazyKns
6b5bd9d79b feat(prompts): move to UI 2025-03-18 10:23:21 +05:30
ItzCrazyKns
64d2a467b0 Merge pull request #672 from sjiampojamarn/scrolling
Only set scrollIntoView for user msg.
2025-03-17 12:03:05 +05:30
sjiampojamarn
9a2c4fe3b6 Only set scrollIntoView for user msg. 2025-03-16 22:15:58 -07:00
ItzCrazyKns
060c68a900 feat(message-box): lint & beautify 2025-03-14 22:05:07 +05:30
ItzCrazyKns
e6b87f89ec feat(sample-config): add custom openai model name 2025-03-08 20:08:27 +05:30
ItzCrazyKns
89b5229ce9 Merge pull request #663 from ericdachen/master
Update Readme
2025-03-05 11:11:07 +05:30
ItzCrazyKns
7756340dd9 Update README.md 2025-03-05 11:09:19 +05:30
ItzCrazyKns
bbd2e9c359 feat(readme): update warp banner 2025-03-05 11:05:25 +05:30
ItzCrazyKns
a32eb1dda3 feat(readme): lint & beautify, update anchor URL 2025-03-05 10:55:02 +05:30
Eric Chen
aa834f7f04 Update README.md 2025-03-04 14:45:10 -05:00
Eric Chen
064c0fbe42 Update README.md 2025-03-04 12:16:10 -05:00
Eric Chen
bf4cf8eaeb Update README.md 2025-03-04 12:14:17 -05:00
ItzCrazyKns
a24992a3db Merge pull request #655 from ShortCipher5/patch-1
chore: Add Sealos 1-click deployment
2025-03-01 21:56:01 +05:30
ShortCipher5
d584067bb1 Update README.md 2025-02-27 23:26:45 -08:00
ItzCrazyKns
df4350f966 Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-02-26 10:40:34 +05:30
ItzCrazyKns
652ca2fdf4 Merge pull request #649 from QuietlyChan/fix/light-theme-ui-bug
fix(ui): improve dark mode text color for attachment buttons
2025-02-26 10:36:41 +05:30
QuietlyChan
216576128d fix(ui): update attachment text color for light and dark modes 2025-02-25 19:26:58 +08:00
QuietlyChan
bb3f180583 fix(ui): improve dark mode text color for attachment buttons 2025-02-25 17:26:33 +08:00
ItzCrazyKns
4d24d73161 Merge pull request #631 from user1007017/patch-1
Update README.md grammatical error
2025-02-20 10:37:33 +05:30
wellCh4n
2e166c217b fix(MessageBox): break too long message title 2025-02-19 10:34:51 +08:00
ItzCrazyKns
4c73caadf6 feat(custom-openai): save live changes 2025-02-17 16:24:41 +05:30
user1007017
5f0b87f4a9 Update README.md 2025-02-15 19:06:46 +01:00
ItzCrazyKns
115e6b2a71 Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-02-15 12:52:30 +05:30
ItzCrazyKns
a5c79c92ed feat(settings): add embedding provider settings 2025-02-15 12:52:27 +05:30
ItzCrazyKns
db3cea446e Update UPDATING.md 2025-02-15 12:33:43 +05:30
127 changed files with 6000 additions and 7082 deletions

View File

@ -8,18 +8,12 @@ on:
types: [published]
jobs:
build-and-push:
build-amd64:
runs-on: ubuntu-latest
strategy:
matrix:
service: [backend, app]
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
@ -36,38 +30,109 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push Docker image for ${{ matrix.service }}
- name: Build and push AMD64 Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:amd64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:main \
-t itzcrazykns1337/${IMAGE_NAME}:amd64 \
--push .
- name: Build and push release Docker image for ${{ matrix.service }}
- name: Build and push AMD64 release Docker image
if: github.event_name == 'release'
run: |
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--push .
build-arm64:
runs-on: ubuntu-24.04-arm
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push ARM64 Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:arm64 \
--push .
- name: Build and push ARM64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--push .
manifest:
needs: [build-amd64, build-arm64]
runs-on: ubuntu-latest
steps:
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Create and push multi-arch manifest for main
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:main \
--amend itzcrazykns1337/${IMAGE_NAME}:amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:main
- name: Create and push multi-arch manifest for releases
if: github.event_name == 'release'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}

6
.gitignore vendored
View File

@ -4,9 +4,9 @@ npm-debug.log
yarn-error.log
# Build output
/.next/
/out/
/dist/
.next/
out/
dist/
# IDE/Editor specific
.vscode/

View File

@ -6,7 +6,6 @@ const config = {
endOfLine: 'auto',
singleQuote: true,
tabWidth: 2,
semi: true,
};
module.exports = config;

View File

@ -1,32 +1,43 @@
# How to Contribute to Perplexica
Hey there, thanks for deciding to contribute to Perplexica. Anything you help with will support the development of Perplexica and will make it better. Let's walk you through the key aspects to ensure your contributions are effective and in harmony with the project's setup.
Thanks for your interest in contributing to Perplexica! Your help makes this project better. This guide explains how to contribute effectively.
Perplexica is a modern AI chat application with advanced search capabilities.
## Project Structure
Perplexica's design consists of two main domains:
Perplexica's codebase is organized as follows:
- **Frontend (`ui` directory)**: This is a Next.js application holding all user interface components. It's a self-contained environment that manages everything the user interacts with.
- **Backend (root and `src` directory)**: The backend logic is situated in the `src` folder, but the root directory holds the main `package.json` for backend dependency management.
- All of the focus modes are created using the Meta Search Agent class present in `src/search/metaSearchAgent.ts`. The main logic behind Perplexica lies there.
- **UI Components and Pages**:
- **Components (`src/components`)**: Reusable UI components.
- **Pages and Routes (`src/app`)**: Next.js app directory structure with page components.
- Main app routes include: home (`/`), chat (`/c`), discover (`/discover`), library (`/library`), and settings (`/settings`).
- **API Routes (`src/app/api`)**: API endpoints implemented with Next.js API routes.
- `/api/chat`: Handles chat interactions.
- `/api/search`: Provides direct access to Perplexica's search capabilities.
- Other endpoints for models, files, and suggestions.
- **Backend Logic (`src/lib`)**: Contains all the backend functionality including search, database, and API logic.
- The search functionality is present inside `src/lib/search` directory.
- All of the focus modes are implemented using the Meta Search Agent class in `src/lib/search/metaSearchAgent.ts`.
- Database functionality is in `src/lib/db`.
- Chat model and embedding model providers are managed in `src/lib/providers`.
- Prompt templates and LLM chain definitions are in `src/lib/prompts` and `src/lib/chains` respectively.
## API Documentation
Perplexica exposes several API endpoints for programmatic access, including:
- **Search API**: Access Perplexica's advanced search capabilities directly via the `/api/search` endpoint. For detailed documentation, see `docs/api/search.md`.
## Setting Up Your Environment
Before diving into coding, setting up your local environment is key. Here's what you need to do:
### Backend
1. In the root directory, locate the `sample.config.toml` file.
2. Rename it to `config.toml` and fill in the necessary configuration fields specific to the backend.
3. Run `npm install` to install dependencies.
4. Run `npm run db:push` to set up the local sqlite.
5. Use `npm run dev` to start the backend in development mode.
### Frontend
1. Navigate to the `ui` folder and repeat the process of renaming `.env.example` to `.env`, making sure to provide the frontend-specific variables.
2. Execute `npm install` within the `ui` directory to get the frontend dependencies ready.
3. Launch the frontend development server with `npm run dev`.
2. Rename it to `config.toml` and fill in the necessary configuration fields.
3. Run `npm install` to install all dependencies.
4. Run `npm run db:push` to set up the local sqlite database.
5. Use `npm run dev` to start the application in development mode.
**Please note**: Docker configurations are present for setting up production environments, whereas `npm run dev` is used for development purposes.

View File

@ -1,7 +1,22 @@
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT)
<div align="center" markdown="1">
<sup>Special thanks to:</sup>
<br>
<br>
<a href="https://www.warp.dev/perplexica">
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/775dd593-9b5f-40f1-bf48-479faff4c27b">
</a>
### [Warp, the AI Devtool that lives in your terminal](https://www.warp.dev/perplexica)
[Available for MacOS, Linux, & Windows](https://www.warp.dev/perplexica)
</div>
<hr/>
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?)
@ -44,7 +59,7 @@ Want to know more about its architecture and how it works? You can read it [here
- **Normal Mode:** Processes your query and performs a web search.
- **Focus Modes:** Special modes to better answer specific types of questions. Perplexica currently has 6 focus modes:
- **All Mode:** Searches the entire web to find the best results.
- **Writing Assistant Mode:** Helpful for writing tasks that does not require searching the web.
- **Writing Assistant Mode:** Helpful for writing tasks that do not require searching the web.
- **Academic Search Mode:** Finds articles and papers, ideal for academic research.
- **YouTube Search Mode:** Finds YouTube videos based on the search query.
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
@ -94,14 +109,13 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
1. Install SearXNG and allow `JSON` format in the SearXNG settings.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. Rename the `.env.example` file to `.env` in the `ui` folder and fill in all necessary fields.
4. After populating the configuration and environment files, run `npm i` in both the `ui` folder and the root directory.
5. Install the dependencies and then execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm rum start`
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like exposing it your network, etc.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
### Ollama Connection Errors
@ -139,10 +153,11 @@ For more details, check out the full documentation [here](https://github.com/Itz
## Expose Perplexica to network
You can access Perplexica over your home network by following our networking guide [here](https://github.com/ItzCrazyKns/Perplexica/blob/master/docs/installation/NETWORKING.md).
Perplexica runs on Next.js and handles all API requests. It works right away on the same network and stays accessible even with port forwarding.
## One-Click Deployment
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
## Upcoming Features

View File

@ -1,15 +1,27 @@
FROM node:20.18.0-alpine
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
FROM node:20.18.0-slim AS builder
WORKDIR /home/perplexica
COPY ui /home/perplexica/
COPY package.json yarn.lock ./
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn install --frozen-lockfile
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
RUN mkdir -p /home/perplexica/data
RUN yarn build
CMD ["yarn", "start"]
FROM node:20.18.0-slim
WORKDIR /home/perplexica
COPY --from=builder /home/perplexica/public ./public
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data
RUN mkdir /home/perplexica/uploads
CMD ["node", "server.js"]

View File

@ -1,17 +0,0 @@
FROM node:18-slim
WORKDIR /home/perplexica
COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/
COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn build
CMD ["yarn", "start"]

View File

@ -9,41 +9,21 @@ services:
- perplexica-network
restart: unless-stopped
perplexica-backend:
build:
context: .
dockerfile: backend.dockerfile
image: itzcrazykns1337/perplexica-backend:main
environment:
- SEARXNG_API_URL=http://searxng:8080
depends_on:
- searxng
ports:
- 3001:3001
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- perplexica-network
restart: unless-stopped
perplexica-frontend:
app:
image: itzcrazykns1337/perplexica:main
build:
context: .
dockerfile: app.dockerfile
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
environment:
- SEARXNG_API_URL=http://searxng:8080
ports:
- 3000:3000
networks:
- perplexica-network
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
restart: unless-stopped
networks:

View File

@ -6,9 +6,9 @@ Perplexicas Search API makes it easy to use our AI-powered search engine. You
## Endpoint
### **POST** `http://localhost:3001/api/search`
### **POST** `http://localhost:3000/api/search`
**Note**: Replace `3001` with any other port if you've changed the default PORT
**Note**: Replace `3000` with any other port if you've changed the default PORT
### Request
@ -20,11 +20,11 @@ The API accepts a JSON object in the request body, where you define the focus mo
{
"chatModel": {
"provider": "openai",
"model": "gpt-4o-mini"
"name": "gpt-4o-mini"
},
"embeddingModel": {
"provider": "openai",
"model": "text-embedding-3-large"
"name": "text-embedding-3-large"
},
"optimizationMode": "speed",
"focusMode": "webSearch",
@ -32,24 +32,26 @@ The API accepts a JSON object in the request body, where you define the focus mo
"history": [
["human", "Hi, how are you?"],
["assistant", "I am doing well, how can I help you today?"]
]
],
"systemInstructions": "Focus on providing technical details about Perplexica's architecture.",
"stream": false
}
```
### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- `name`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- Optional fields for custom OpenAI configuration:
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- `provider`: The provider for the embedding model (e.g., `openai`).
- `model`: The specific embedding model (e.g., `text-embedding-3-large`).
- `name`: The specific embedding model (e.g., `text-embedding-3-large`).
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:
@ -62,6 +64,8 @@ The API accepts a JSON object in the request body, where you define the focus mo
- **`query`** (string, required): The search query or question.
- **`systemInstructions`** (string, optional): Custom instructions provided by the user to guide the AI's response. These instructions are treated as user preferences and have lower priority than the system's core instructions. For example, you can specify a particular writing style, format, or focus area.
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
```json
@ -71,11 +75,13 @@ The API accepts a JSON object in the request body, where you define the focus mo
]
```
- **`stream`** (boolean, optional): When set to `true`, enables streaming responses. Default is `false`.
### Response
The response from the API includes both the final message and the sources used to generate that message.
#### Example Response
#### Standard Response (stream: false)
```json
{
@ -100,6 +106,28 @@ The response from the API includes both the final message and the sources used t
}
```
#### Streaming Response (stream: true)
When streaming is enabled, the API returns a stream of newline-delimited JSON objects. Each line contains a complete, valid JSON object. The response has Content-Type: application/json.
Example of streamed response objects:
```
{"type":"init","data":"Stream connected"}
{"type":"sources","data":[{"pageContent":"...","metadata":{"title":"...","url":"..."}},...]}
{"type":"response","data":"Perplexica is an "}
{"type":"response","data":"innovative, open-source "}
{"type":"response","data":"AI-powered search engine..."}
{"type":"done"}
```
Clients should process each line as a separate JSON object. The different message types include:
- **`init`**: Initial connection message
- **`sources`**: All sources used for the response
- **`response`**: Chunks of the generated answer text
- **`done`**: Indicates the stream is complete
### Fields in the Response
- **`message`** (string): The search result, generated based on the query and focus mode.

View File

@ -4,7 +4,7 @@ Curious about how Perplexica works? Don't worry, we'll cover it here. Before we
We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows:
1. The message is sent via WS to the backend server where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
1. The message is sent to the `/api/chat` route where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources and searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started.
3. The query returned by the first chain is passed to SearXNG to search the web for information.
4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query.

View File

@ -1,109 +0,0 @@
# Expose Perplexica to a network
This guide will show you how to make Perplexica available over a network. Follow these steps to allow computers on the same network to interact with Perplexica. Choose the instructions that match the operating system you are using.
## Windows
1. Open PowerShell as Administrator
2. Navigate to the directory containing the `docker-compose.yaml` file
3. Stop and remove the existing Perplexica containers and images:
```bash
docker compose down --rmi all
```
4. Open the `docker-compose.yaml` file in a text editor like Notepad++
5. Replace `127.0.0.1` with the IP address of the server Perplexica is running on in these two lines:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and close the `docker-compose.yaml` file
7. Rebuild and restart the Perplexica container:
```bash
docker compose up -d --build
```
## macOS
1. Open the Terminal application
2. Navigate to the directory with the `docker-compose.yaml` file:
```bash
cd /path/to/docker-compose.yaml
```
3. Stop and remove existing containers and images:
```bash
docker compose down --rmi all
```
4. Open `docker-compose.yaml` in a text editor like Sublime Text:
```bash
nano docker-compose.yaml
```
5. Replace `127.0.0.1` with the server IP in these lines:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and exit the editor
7. Rebuild and restart Perplexica:
```bash
docker compose up -d --build
```
## Linux
1. Open the terminal
2. Navigate to the `docker-compose.yaml` directory:
```bash
cd /path/to/docker-compose.yaml
```
3. Stop and remove containers and images:
```bash
docker compose down --rmi all
```
4. Edit `docker-compose.yaml`:
```bash
nano docker-compose.yaml
```
5. Replace `127.0.0.1` with the server IP:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and exit the editor
7. Rebuild and restart Perplexica:
```bash
docker compose up -d --build
```

View File

@ -7,34 +7,40 @@ To update Perplexica to the latest version, follow these steps:
1. Clone the latest version of Perplexica from GitHub:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
git clone https://github.com/ItzCrazyKns/Perplexica.git
```
2. Navigate to the Project Directory.
2. Navigate to the project directory.
3. Pull latest images from registry.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. Pull the latest images from the registry.
```bash
docker compose pull
```
4. Update and Recreate containers.
5. Update and recreate the containers.
```bash
docker compose up -d
```
5. Once the command completes running go to http://localhost:3000 and verify the latest changes.
6. Once the command completes, go to http://localhost:3000 and verify the latest changes.
## For non Docker users
## For non-Docker users
1. Clone the latest version of Perplexica from GitHub:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
git clone https://github.com/ItzCrazyKns/Perplexica.git
```
2. Navigate to the Project Directory
3. Execute `npm i` in both the `ui` folder and the root directory.
4. Once packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
5. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm rum start`
---

View File

@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
export default defineConfig({
dialect: 'sqlite',
schema: './src/db/schema.ts',
schema: './src/lib/db/schema.ts',
out: './drizzle',
dbCredentials: {
url: './data/db.sqlite',

5
next-env.d.ts vendored Normal file
View File

@ -0,0 +1,5 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@ -1,5 +1,6 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
output: 'standalone',
images: {
remotePatterns: [
{
@ -7,6 +8,7 @@ const nextConfig = {
},
],
},
serverExternalPackages: ['pdf-parse'],
};
export default nextConfig;

View File

@ -1,53 +1,65 @@
{
"name": "perplexica-backend",
"version": "1.10.0-rc3",
"name": "perplexica-frontend",
"version": "1.10.2",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
"start": "npm run db:push && node dist/app.js",
"build": "tsc",
"dev": "nodemon --ignore uploads/ src/app.ts ",
"db:push": "drizzle-kit push sqlite",
"format": "prettier . --check",
"format:write": "prettier . --write"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.10",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/html-to-text": "^9.0.4",
"@types/multer": "^1.4.12",
"@types/pdf-parse": "^1.1.4",
"@types/readable-stream": "^4.0.11",
"@types/ws": "^8.5.12",
"drizzle-kit": "^0.22.7",
"nodemon": "^3.1.0",
"prettier": "^3.2.5",
"ts-node": "^10.9.2",
"typescript": "^5.4.3"
"dev": "next dev",
"build": "npm run db:push && next build",
"start": "next start",
"lint": "next lint",
"format:write": "prettier . --write",
"db:push": "drizzle-kit push"
},
"dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3",
"@langchain/community": "^0.2.16",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/google-genai": "^0.1.12",
"@langchain/openai": "^0.0.25",
"@langchain/google-genai": "^0.0.23",
"@xenova/transformers": "^2.17.1",
"axios": "^1.6.8",
"better-sqlite3": "^11.0.0",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
"axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
"compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0",
"cors": "^2.8.5",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.31.2",
"express": "^4.19.2",
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"langchain": "^0.1.30",
"mammoth": "^1.8.0",
"multer": "^1.4.5-lts.1",
"lucide-react": "^0.363.0",
"markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"pdf-parse": "^1.1.1",
"winston": "^3.13.0",
"ws": "^8.17.1",
"react": "^18",
"react-dom": "^18",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/html-to-text": "^9.0.4",
"@types/node": "^20",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"drizzle-kit": "^0.30.5",
"eslint": "^8",
"eslint-config-next": "14.1.4",
"postcss": "^8",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"typescript": "^5"
}
}

View File

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

Before

Width:  |  Height:  |  Size: 629 B

After

Width:  |  Height:  |  Size: 629 B

View File

@ -1,5 +1,4 @@
[GENERAL]
PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
@ -18,9 +17,17 @@ API_KEY = ""
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
API_URL = ""
MODEL_NAME = ""
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK]
API_KEY = ""
[API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL
SEARXNG = "" # SearxNG API URL - http://localhost:32768
TAVILY = "" # Tavily API key
[SEARCH]
ENGINE = "searxng" # "searxng" or "tavily"

View File

@ -1,38 +0,0 @@
import { startWebSocketServer } from './websocket';
import express from 'express';
import cors from 'cors';
import http from 'http';
import routes from './routes';
import { getPort } from './config';
import logger from './utils/logger';
const port = getPort();
const app = express();
const server = http.createServer(app);
const corsOptions = {
origin: '*',
};
app.use(cors(corsOptions));
app.use(express.json());
app.use('/api', routes);
app.get('/api', (_, res) => {
res.status(200).json({ status: 'ok' });
});
server.listen(port, () => {
logger.info(`Server is running on port ${port}`);
});
startWebSocketServer(server);
process.on('uncaughtException', (err, origin) => {
logger.error(`Uncaught Exception at ${origin}: ${err}`);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
});

306
src/app/api/chat/route.ts Normal file
View File

@ -0,0 +1,306 @@
import prompts from '@/lib/prompts';
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream';
import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import db from '@/lib/db';
import { chats, messages as messagesSchema } from '@/lib/db/schema';
import { and, eq, gt } from 'drizzle-orm';
import { getFileDetails } from '@/lib/utils/files';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
type Message = {
messageId: string;
chatId: string;
content: string;
};
type ChatModel = {
provider: string;
name: string;
};
type EmbeddingModel = {
provider: string;
name: string;
};
type Body = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
systemInstructions: string;
};
const handleEmitterEvents = async (
stream: EventEmitter,
writer: WritableStreamDefaultWriter,
encoder: TextEncoder,
aiMessageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources: any[] = [];
stream.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'sources',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
sources = parsedData.data;
}
});
stream.on('end', () => {
writer.write(
encoder.encode(
JSON.stringify({
type: 'messageEnd',
messageId: aiMessageId,
}) + '\n',
),
);
writer.close();
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
})
.execute();
});
stream.on('error', (data) => {
const parsedData = JSON.parse(data);
writer.write(
encoder.encode(
JSON.stringify({
type: 'error',
data: parsedData.data,
}),
),
);
writer.close();
});
};
const handleHistorySave = async (
message: Message,
humanMessageId: string,
focusMode: string,
files: string[],
) => {
const chat = await db.query.chats.findFirst({
where: eq(chats.id, message.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: message.chatId,
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: message.content,
chatId: message.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gt(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, message.chatId),
),
)
.execute();
}
};
export const POST = async (req: Request) => {
try {
const body = (await req.json()) as Body;
const { message } = body;
if (message.content === '') {
return Response.json(
{
message: 'Please provide a message to process',
},
{ status: 400 },
);
}
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.name || Object.keys(chatModelProvider)[0]
];
const embeddingProvider =
embeddingModelProviders[
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
];
const embeddingModel =
embeddingProvider[
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
];
let llm: BaseChatModel | undefined;
let embedding = embeddingModel.model;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
if (!embedding) {
return Response.json(
{ error: 'Invalid embedding model' },
{ status: 400 },
);
}
const humanMessageId =
message.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const handler = searchHandlers[body.focusMode];
if (!handler) {
return Response.json(
{
message: 'Invalid focus mode',
},
{ status: 400 },
);
}
const stream = await handler.searchAndAnswer(
message.content,
history,
llm,
embedding,
body.optimizationMode,
body.files,
body.systemInstructions,
);
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId);
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
return new Response(responseStream.readable, {
headers: {
'Content-Type': 'text/event-stream',
Connection: 'keep-alive',
'Cache-Control': 'no-cache, no-transform',
},
});
} catch (err) {
console.error('An error occurred while processing chat request:', err);
return Response.json(
{ message: 'An error occurred while processing chat request' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,69 @@
import db from '@/lib/db';
import { chats, messages } from '@/lib/db/schema';
import { eq } from 'drizzle-orm';
export const GET = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, id),
});
return Response.json(
{
chat: chatExists,
messages: chatMessages,
},
{ status: 200 },
);
} catch (err) {
console.error('Error in getting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};
export const DELETE = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
await db.delete(chats).where(eq(chats.id, id)).execute();
await db.delete(messages).where(eq(messages.chatId, id)).execute();
return Response.json(
{ message: 'Chat deleted successfully' },
{ status: 200 },
);
} catch (err) {
console.error('Error in deleting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,15 @@
import db from '@/lib/db';
export const GET = async (req: Request) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return Response.json({ chats: chats }, { status: 200 });
} catch (err) {
console.error('Error in getting chats: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

124
src/app/api/config/route.ts Normal file
View File

@ -0,0 +1,124 @@
import {
getAnthropicApiKey,
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
getGeminiApiKey,
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
getDeepseekApiKey,
getSearchEngine,
getTavilyApiKey,
updateConfig,
} from '@/lib/config';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const config: Record<string, any> = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
config['chatModelProviders'] = {};
config['embeddingModelProviders'] = {};
for (const provider in chatModelProviders) {
config['chatModelProviders'][provider] = Object.keys(
chatModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: chatModelProviders[provider][model].displayName,
};
});
}
for (const provider in embeddingModelProviders) {
config['embeddingModelProviders'][provider] = Object.keys(
embeddingModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: embeddingModelProviders[provider][model].displayName,
};
});
}
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
config['searchEngine'] = getSearchEngine();
config['tavilyApiKey'] = getTavilyApiKey();
return Response.json({ ...config }, { status: 200 });
} catch (err) {
console.error('An error occurred while getting config:', err);
return Response.json(
{ message: 'An error occurred while getting config' },
{ status: 500 },
);
}
};
export const POST = async (req: Request) => {
try {
const config = await req.json();
const updatedConfig = {
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
DEEPSEEK: {
API_KEY: config.deepseekApiKey,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
SEARCH: {
ENGINE: config.searchEngine,
},
API_ENDPOINTS: {
TAVILY: config.tavilyApiKey || '',
},
};
updateConfig(updatedConfig);
return Response.json({ message: 'Config updated' }, { status: 200 });
} catch (err) {
console.error('An error occurred while updating config:', err);
return Response.json(
{ message: 'An error occurred while updating config' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,61 @@
import { searchSearxng } from '../../../lib/searchEngines/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
export const GET = async (req: Request) => {
try {
const data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
).results;
}),
])
)
.map((result) => result)
.flat()
.sort(() => Math.random() - 0.5);
return Response.json(
{
blogs: data,
},
{
status: 200,
},
);
} catch (err) {
console.error(`An error occurred in discover route: ${err}`);
return Response.json(
{
message: 'An error has occurred',
},
{
status: 500,
},
);
}
};

View File

@ -0,0 +1,83 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const images = await handleImageSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ images }, { status: 200 });
} catch (err) {
console.error(`An error occurred while searching images: ${err}`);
return Response.json(
{ message: 'An error occurred while searching images' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,47 @@
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete (chatModelProviders[provider][model] as { model?: unknown })
.model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete (embeddingModelProviders[provider][model] as { model?: unknown })
.model;
});
});
return Response.json(
{
chatModelProviders,
embeddingModelProviders,
},
{
status: 200,
},
);
} catch (err) {
console.error('An error occurred while fetching models', err);
return Response.json(
{
message: 'An error has occurred.',
},
{
status: 500,
},
);
}
};

270
src/app/api/search/route.ts Normal file
View File

@ -0,0 +1,270 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import { ChatOpenAI } from '@langchain/openai';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '@/lib/search/metaSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
interface chatModel {
provider: string;
name: string;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
}
interface embeddingModel {
provider: string;
name: string;
}
interface ChatRequestBody {
optimizationMode: 'speed' | 'balanced';
focusMode: string;
chatModel?: chatModel;
embeddingModel?: embeddingModel;
query: string;
history: Array<[string, string]>;
stream?: boolean;
systemInstructions?: string;
}
export const POST = async (req: Request) => {
try {
const body: ChatRequestBody = await req.json();
if (!body.focusMode || !body.query) {
return Response.json(
{ message: 'Missing focus mode or query' },
{ status: 400 },
);
}
body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
body.stream = body.stream || false;
const history: BaseMessage[] = body.history.map((msg) => {
return msg[0] === 'human'
? new HumanMessage({ content: msg[1] })
: new AIMessage({ content: msg[1] });
});
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.name ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
const embeddingModel =
body.embeddingModel?.name ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
return Response.json(
{ message: 'Invalid model selected' },
{ status: 400 },
);
}
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {
return Response.json({ message: 'Invalid focus mode' }, { status: 400 });
}
const emitter = await searchHandler.searchAndAnswer(
body.query,
history,
llm,
embeddings,
body.optimizationMode,
[],
body.systemInstructions || '',
);
if (!body.stream) {
return new Promise(
(
resolve: (value: Response) => void,
reject: (value: Response) => void,
) => {
let message = '';
let sources: any[] = [];
emitter.on('data', (data: string) => {
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
message += parsedData.data;
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
}
} catch (error) {
reject(
Response.json(
{ message: 'Error parsing data' },
{ status: 500 },
),
);
}
});
emitter.on('end', () => {
resolve(Response.json({ message, sources }, { status: 200 }));
});
emitter.on('error', (error: any) => {
reject(
Response.json(
{ message: 'Search error', error },
{ status: 500 },
),
);
});
},
);
}
const encoder = new TextEncoder();
const abortController = new AbortController();
const { signal } = abortController;
const stream = new ReadableStream({
start(controller) {
let sources: any[] = [];
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'init',
data: 'Stream connected',
}) + '\n',
),
);
signal.addEventListener('abort', () => {
emitter.removeAllListeners();
try {
controller.close();
} catch (error) {}
});
emitter.on('data', (data: string) => {
if (signal.aborted) return;
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'response',
data: parsedData.data,
}) + '\n',
),
);
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'sources',
data: sources,
}) + '\n',
),
);
}
} catch (error) {
controller.error(error);
}
});
emitter.on('end', () => {
if (signal.aborted) return;
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'done',
}) + '\n',
),
);
controller.close();
});
emitter.on('error', (error: any) => {
if (signal.aborted) return;
controller.error(error);
});
},
cancel() {
abortController.abort();
},
});
return new Response(stream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache, no-transform',
Connection: 'keep-alive',
},
});
} catch (err: any) {
console.error(`Error in getting search results: ${err.message}`);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,81 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsGenerationBody {
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
},
llm,
);
return Response.json({ suggestions }, { status: 200 });
} catch (err) {
console.error(`An error occurred while generating suggestions: ${err}`);
return Response.json(
{ message: 'An error occurred while generating suggestions' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,134 @@
import { NextResponse } from 'next/server';
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import { getAvailableEmbeddingModelProviders } from '@/lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
interface FileRes {
fileName: string;
fileExtension: string;
fileId: string;
}
const uploadDir = path.join(process.cwd(), 'uploads');
if (!fs.existsSync(uploadDir)) {
fs.mkdirSync(uploadDir, { recursive: true });
}
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
export async function POST(req: Request) {
try {
const formData = await req.formData();
const files = formData.getAll('files') as File[];
const embedding_model = formData.get('embedding_model');
const embedding_model_provider = formData.get('embedding_model_provider');
if (!embedding_model || !embedding_model_provider) {
return NextResponse.json(
{ message: 'Missing embedding model or provider' },
{ status: 400 },
);
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel =
embedding_model ?? Object.keys(embeddingModels[provider as string])[0];
let embeddingsModel =
embeddingModels[provider as string]?.[embeddingModel as string]?.model;
if (!embeddingsModel) {
return NextResponse.json(
{ message: 'Invalid embedding model selected' },
{ status: 400 },
);
}
const processedFiles: FileRes[] = [];
await Promise.all(
files.map(async (file: any) => {
const fileExtension = file.name.split('.').pop();
if (!['pdf', 'docx', 'txt'].includes(fileExtension!)) {
return NextResponse.json(
{ message: 'File type not supported' },
{ status: 400 },
);
}
const uniqueFileName = `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`;
const filePath = path.join(uploadDir, uniqueFileName);
const buffer = Buffer.from(await file.arrayBuffer());
fs.writeFileSync(filePath, new Uint8Array(buffer));
let docs: any[] = [];
if (fileExtension === 'pdf') {
const loader = new PDFLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'docx') {
const loader = new DocxLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'txt') {
const text = fs.readFileSync(filePath, 'utf-8');
docs = [
new Document({ pageContent: text, metadata: { title: file.name } }),
];
}
const splitted = await splitter.splitDocuments(docs);
const extractedDataPath = filePath.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(
extractedDataPath,
JSON.stringify({
title: file.name,
contents: splitted.map((doc) => doc.pageContent),
}),
);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsDataPath = filePath.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(
embeddingsDataPath,
JSON.stringify({
title: file.name,
embeddings,
}),
);
processedFiles.push({
fileName: file.name,
fileExtension: fileExtension,
fileId: uniqueFileName.replace(/\.\w+$/, ''),
});
}),
);
return NextResponse.json({
files: processedFiles,
});
} catch (error) {
console.error('Error uploading file:', error);
return NextResponse.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
}

View File

@ -0,0 +1,83 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ videos }, { status: 200 });
} catch (err) {
console.error(`An error occurred while searching videos: ${err}`);
return Response.json(
{ message: 'An error occurred while searching videos' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,9 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
};
export default Page;

View File

@ -19,7 +19,7 @@ const Page = () => {
useEffect(() => {
const fetchData = async () => {
try {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, {
const res = await fetch(`/api/discover`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

@ -21,7 +21,7 @@ const Page = () => {
const fetchChats = async () => {
setLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats`, {
const res = await fetch(`/api/chats`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

View File

@ -20,9 +20,12 @@ interface SettingsType {
anthropicApiKey: string;
geminiApiKey: string;
ollamaApiUrl: string;
deepseekApiKey: string;
customOpenaiApiKey: string;
customOpenaiApiUrl: string;
customOpenaiModelName: string;
searchEngine: string;
tavilyApiKey?: string;
}
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
@ -54,6 +57,38 @@ const Input = ({ className, isSaving, onSave, ...restProps }: InputProps) => {
);
};
interface TextareaProps extends React.InputHTMLAttributes<HTMLTextAreaElement> {
isSaving?: boolean;
onSave?: (value: string) => void;
}
const Textarea = ({
className,
isSaving,
onSave,
...restProps
}: TextareaProps) => {
return (
<div className="relative">
<textarea
placeholder="Any special instructions for the LLM"
className="placeholder:text-sm text-sm w-full flex items-center justify-between p-3 bg-light-secondary dark:bg-dark-secondary rounded-lg hover:bg-light-200 dark:hover:bg-dark-200 transition-colors"
rows={4}
onBlur={(e) => onSave?.(e.target.value)}
{...restProps}
/>
{isSaving && (
<div className="absolute right-3 top-3">
<Loader2
size={16}
className="animate-spin text-black/70 dark:text-white/70"
/>
</div>
)}
</div>
);
};
const Select = ({
className,
options,
@ -111,12 +146,14 @@ const Page = () => {
const [isLoading, setIsLoading] = useState(false);
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [systemInstructions, setSystemInstructions] = useState<string>('');
const [searchEngine, setSearchEngine] = useState<string>('searxng');
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
useEffect(() => {
const fetchConfig = async () => {
setIsLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
const res = await fetch(`/api/config`, {
headers: {
'Content-Type': 'application/json',
},
@ -172,6 +209,9 @@ const Page = () => {
localStorage.getItem('autoVideoSearch') === 'true',
);
setSystemInstructions(localStorage.getItem('systemInstructions')!);
setSearchEngine(localStorage.getItem('searchEngine') || 'searxng');
setIsLoading(false);
};
@ -187,16 +227,13 @@ const Page = () => {
[key]: value,
} as SettingsType;
const response = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/config`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(updatedConfig),
const response = await fetch(`/api/config`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify(updatedConfig),
});
if (!response.ok) {
throw new Error('Failed to update config');
@ -208,7 +245,7 @@ const Page = () => {
key.toLowerCase().includes('api') ||
key.toLowerCase().includes('url')
) {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
const res = await fetch(`/api/config`, {
headers: {
'Content-Type': 'application/json',
},
@ -223,11 +260,11 @@ const Page = () => {
setChatModels(data.chatModelProviders || {});
setEmbeddingModels(data.embeddingModelProviders || {});
const currentProvider = selectedChatModelProvider;
const newProviders = Object.keys(data.chatModelProviders || {});
const currentChatProvider = selectedChatModelProvider;
const newChatProviders = Object.keys(data.chatModelProviders || {});
if (!currentProvider && newProviders.length > 0) {
const firstProvider = newProviders[0];
if (!currentChatProvider && newChatProviders.length > 0) {
const firstProvider = newChatProviders[0];
const firstModel = data.chatModelProviders[firstProvider]?.[0]?.name;
if (firstModel) {
@ -237,11 +274,11 @@ const Page = () => {
localStorage.setItem('chatModel', firstModel);
}
} else if (
currentProvider &&
currentChatProvider &&
(!data.chatModelProviders ||
!data.chatModelProviders[currentProvider] ||
!Array.isArray(data.chatModelProviders[currentProvider]) ||
data.chatModelProviders[currentProvider].length === 0)
!data.chatModelProviders[currentChatProvider] ||
!Array.isArray(data.chatModelProviders[currentChatProvider]) ||
data.chatModelProviders[currentChatProvider].length === 0)
) {
const firstValidProvider = Object.entries(
data.chatModelProviders || {},
@ -267,6 +304,55 @@ const Page = () => {
}
}
const currentEmbeddingProvider = selectedEmbeddingModelProvider;
const newEmbeddingProviders = Object.keys(
data.embeddingModelProviders || {},
);
if (!currentEmbeddingProvider && newEmbeddingProviders.length > 0) {
const firstProvider = newEmbeddingProviders[0];
const firstModel =
data.embeddingModelProviders[firstProvider]?.[0]?.name;
if (firstModel) {
setSelectedEmbeddingModelProvider(firstProvider);
setSelectedEmbeddingModel(firstModel);
localStorage.setItem('embeddingModelProvider', firstProvider);
localStorage.setItem('embeddingModel', firstModel);
}
} else if (
currentEmbeddingProvider &&
(!data.embeddingModelProviders ||
!data.embeddingModelProviders[currentEmbeddingProvider] ||
!Array.isArray(
data.embeddingModelProviders[currentEmbeddingProvider],
) ||
data.embeddingModelProviders[currentEmbeddingProvider].length === 0)
) {
const firstValidProvider = Object.entries(
data.embeddingModelProviders || {},
).find(
([_, models]) => Array.isArray(models) && models.length > 0,
)?.[0];
if (firstValidProvider) {
setSelectedEmbeddingModelProvider(firstValidProvider);
setSelectedEmbeddingModel(
data.embeddingModelProviders[firstValidProvider][0].name,
);
localStorage.setItem('embeddingModelProvider', firstValidProvider);
localStorage.setItem(
'embeddingModel',
data.embeddingModelProviders[firstValidProvider][0].name,
);
} else {
setSelectedEmbeddingModelProvider(null);
setSelectedEmbeddingModel(null);
localStorage.removeItem('embeddingModelProvider');
localStorage.removeItem('embeddingModel');
}
}
setConfig(data);
}
@ -278,6 +364,16 @@ const Page = () => {
localStorage.setItem('chatModelProvider', value);
} else if (key === 'chatModel') {
localStorage.setItem('chatModel', value);
} else if (key === 'embeddingModelProvider') {
localStorage.setItem('embeddingModelProvider', value);
} else if (key === 'embeddingModel') {
localStorage.setItem('embeddingModel', value);
} else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value);
} else if (key === 'searchEngine') {
localStorage.setItem('searchEngine', value);
} else if (key === 'tavilyApiKey') {
localStorage.setItem('tavilyApiKey', value);
}
} catch (err) {
console.error('Failed to save:', err);
@ -420,6 +516,45 @@ const Page = () => {
/>
</Switch>
</div>
<div className="flex flex-col space-y-1 mt-2">
<p className="text-black/70 dark:text-white/70 text-sm">
Search Engine
</p>
<Select
value={searchEngine}
onChange={(e) => {
const value = e.target.value;
setSearchEngine(value);
saveConfig('searchEngine', value);
}}
options={[
{ value: 'searxng', label: 'SearxNG' },
...(config.tavilyApiKey ? [{ value: 'tavily', label: 'Tavily' }] : []),
]}
/>
<p className="text-xs text-black/60 dark:text-white/60 mt-1">
Select which search engine to use for web searches
</p>
{searchEngine === 'tavily' && !config.tavilyApiKey && (
<p className="text-xs text-red-500 mt-1">
Tavily API key is required to use this search engine
</p>
)}
</div>
</div>
</SettingsSection>
<SettingsSection title="System Instructions">
<div className="flex flex-col space-y-4">
<Textarea
value={systemInstructions}
isSaving={savingStates['systemInstructions']}
onChange={(e) => {
setSystemInstructions(e.target.value);
}}
onSave={(value) => saveConfig('systemInstructions', value)}
/>
</div>
</SettingsSection>
@ -436,7 +571,6 @@ const Page = () => {
const value = e.target.value;
setSelectedChatModelProvider(value);
saveConfig('chatModelProvider', value);
// Auto-select first model of new provider
const firstModel =
config.chatModelProviders[value]?.[0]?.name;
if (firstModel) {
@ -511,12 +645,16 @@ const Page = () => {
<Input
type="text"
placeholder="Model name"
defaultValue={config.customOpenaiModelName}
onChange={(e) =>
setConfig({
...config,
value={config.customOpenaiModelName}
isSaving={savingStates['customOpenaiModelName']}
onChange={(e: React.ChangeEvent<HTMLInputElement>) => {
setConfig((prev) => ({
...prev!,
customOpenaiModelName: e.target.value,
})
}));
}}
onSave={(value) =>
saveConfig('customOpenaiModelName', value)
}
/>
</div>
@ -527,12 +665,16 @@ const Page = () => {
<Input
type="text"
placeholder="Custom OpenAI API Key"
defaultValue={config.customOpenaiApiKey}
onChange={(e) =>
setConfig({
...config,
value={config.customOpenaiApiKey}
isSaving={savingStates['customOpenaiApiKey']}
onChange={(e: React.ChangeEvent<HTMLInputElement>) => {
setConfig((prev) => ({
...prev!,
customOpenaiApiKey: e.target.value,
})
}));
}}
onSave={(value) =>
saveConfig('customOpenaiApiKey', value)
}
/>
</div>
@ -543,17 +685,96 @@ const Page = () => {
<Input
type="text"
placeholder="Custom OpenAI Base URL"
defaultValue={config.customOpenaiApiUrl}
onChange={(e) =>
setConfig({
...config,
value={config.customOpenaiApiUrl}
isSaving={savingStates['customOpenaiApiUrl']}
onChange={(e: React.ChangeEvent<HTMLInputElement>) => {
setConfig((prev) => ({
...prev!,
customOpenaiApiUrl: e.target.value,
})
}));
}}
onSave={(value) =>
saveConfig('customOpenaiApiUrl', value)
}
/>
</div>
</div>
)}
{config.embeddingModelProviders && (
<div className="flex flex-col space-y-4 mt-4 pt-4 border-t border-light-200 dark:border-dark-200">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Embedding Model Provider
</p>
<Select
value={selectedEmbeddingModelProvider ?? undefined}
onChange={(e) => {
const value = e.target.value;
setSelectedEmbeddingModelProvider(value);
saveConfig('embeddingModelProvider', value);
const firstModel =
config.embeddingModelProviders[value]?.[0]?.name;
if (firstModel) {
setSelectedEmbeddingModel(firstModel);
saveConfig('embeddingModel', firstModel);
}
}}
options={Object.keys(config.embeddingModelProviders).map(
(provider) => ({
value: provider,
label:
provider.charAt(0).toUpperCase() +
provider.slice(1),
}),
)}
/>
</div>
{selectedEmbeddingModelProvider && (
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Embedding Model
</p>
<Select
value={selectedEmbeddingModel ?? undefined}
onChange={(e) => {
const value = e.target.value;
setSelectedEmbeddingModel(value);
saveConfig('embeddingModel', value);
}}
options={(() => {
const embeddingModelProvider =
config.embeddingModelProviders[
selectedEmbeddingModelProvider
];
return embeddingModelProvider
? embeddingModelProvider.length > 0
? embeddingModelProvider.map((model) => ({
value: model.name,
label: model.displayName,
}))
: [
{
value: '',
label: 'No models available',
disabled: true,
},
]
: [
{
value: '',
label:
'Invalid provider, please check backend logs',
disabled: true,
},
];
})()}
/>
</div>
)}
</div>
)}
</SettingsSection>
<SettingsSection title="API Keys">
@ -652,6 +873,51 @@ const Page = () => {
onSave={(value) => saveConfig('geminiApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Deepseek API Key
</p>
<Input
type="text"
placeholder="Deepseek API Key"
value={config.deepseekApiKey}
isSaving={savingStates['deepseekApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
deepseekApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('deepseekApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1 mt-4 pt-4 border-t border-light-200 dark:border-dark-200">
<p className="text-black/90 dark:text-white/90 font-medium">Search Engine API Keys</p>
<p className="text-sm text-black/60 dark:text-white/60 mt-0.5">
API keys for search engines used in the application
</p>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Tavily API Key
</p>
<Input
type="text"
placeholder="Tavily API key"
value={config.tavilyApiKey || ''}
isSaving={savingStates['tavilyApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
tavilyApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('tavilyApiKey', value)}
/>
</div>
</div>
</SettingsSection>
</div>

View File

@ -48,11 +48,17 @@ const Chat = ({
});
useEffect(() => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
};
if (messages.length === 1) {
document.title = `${messages[0].content.substring(0, 30)} - Perplexica`;
}
if (messages[messages.length - 1]?.role == 'user') {
scroll();
}
}, [messages]);
return (

View File

@ -29,280 +29,154 @@ export interface File {
fileId: string;
}
const useSocket = (
url: string,
setIsWSReady: (ready: boolean) => void,
setError: (error: boolean) => void,
interface ChatModelProvider {
name: string;
provider: string;
}
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => {
const wsRef = useRef<WebSocket | null>(null);
const reconnectTimeoutRef = useRef<NodeJS.Timeout>();
const retryCountRef = useRef(0);
const isCleaningUpRef = useRef(false);
const MAX_RETRIES = 3;
const INITIAL_BACKOFF = 1000; // 1 second
const isConnectionErrorRef = useRef(false);
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const getBackoffDelay = (retryCount: number) => {
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000); // Cap at 10 seconds
};
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
useEffect(() => {
const connectWs = async () => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
}
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem(
'embeddingModelProvider',
);
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/models`,
{
headers: {
'Content-Type': 'application/json',
},
},
).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return toast.error('No chat models available');
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
const wsURL = new URL(url);
const searchParams = new URLSearchParams({});
searchParams.append('chatModel', chatModel!);
searchParams.append('chatModelProvider', chatModelProvider);
if (chatModelProvider === 'custom_openai') {
searchParams.append(
'openAIApiKey',
localStorage.getItem('openAIApiKey')!,
);
searchParams.append(
'openAIBaseURL',
localStorage.getItem('openAIBaseURL')!,
);
}
searchParams.append('embeddingModel', embeddingModel!);
searchParams.append('embeddingModelProvider', embeddingModelProvider);
wsURL.search = searchParams.toString();
const ws = new WebSocket(wsURL.toString());
wsRef.current = ws;
const timeoutId = setTimeout(() => {
if (ws.readyState !== 1) {
toast.error(
'Failed to connect to the server. Please try again later.',
);
}
}, 10000);
ws.addEventListener('message', (e) => {
const data = JSON.parse(e.data);
if (data.type === 'signal' && data.data === 'open') {
const interval = setInterval(() => {
if (ws.readyState === 1) {
setIsWSReady(true);
setError(false);
if (retryCountRef.current > 0) {
toast.success('Connection restored.');
}
retryCountRef.current = 0;
clearInterval(interval);
}
}, 5);
clearTimeout(timeoutId);
console.debug(new Date(), 'ws:connected');
}
if (data.type === 'error') {
isConnectionErrorRef.current = true;
setError(true);
toast.error(data.data);
}
});
ws.onerror = () => {
clearTimeout(timeoutId);
setIsWSReady(false);
toast.error('WebSocket connection error.');
};
ws.onclose = () => {
clearTimeout(timeoutId);
setIsWSReady(false);
console.debug(new Date(), 'ws:disconnected');
if (!isCleaningUpRef.current && !isConnectionErrorRef.current) {
toast.error('Connection lost. Attempting to reconnect...');
attemptReconnect();
}
};
} catch (error) {
console.debug(new Date(), 'ws:error', error);
setIsWSReady(false);
attemptReconnect();
}
};
const attemptReconnect = () => {
retryCountRef.current += 1;
if (retryCountRef.current > MAX_RETRIES) {
console.debug(new Date(), 'ws:max_retries');
setError(true);
toast.error(
'Unable to connect to server after multiple attempts. Please refresh the page to try again.',
);
return;
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
const backoffDelay = getBackoffDelay(retryCountRef.current);
console.debug(
new Date(),
`ws:retry attempt=${retryCountRef.current}/${MAX_RETRIES} delay=${backoffDelay}ms`,
);
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
reconnectTimeoutRef.current = setTimeout(() => {
connectWs();
}, backoffDelay);
};
connectWs();
return () => {
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
isCleaningUpRef.current = true;
console.debug(new Date(), 'ws:cleanup');
}
};
}, [url, setIsWSReady, setError]);
return wsRef.current;
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
};
const loadMessages = async (
@ -315,15 +189,12 @@ const loadMessages = async (
setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void,
) => {
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
const res = await fetch(`/api/chats/${chatId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
);
});
if (res.status === 404) {
setNotFound(true);
@ -373,15 +244,32 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false);
const [isWSReady, setIsWSReady] = useState(false);
const ws = useSocket(
process.env.NEXT_PUBLIC_WS_URL!,
setIsWSReady,
setHasError,
);
useEffect(() => {
checkConfig(
setChatModelProvider,
setEmbeddingModelProvider,
setIsConfigReady,
setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false);
@ -399,8 +287,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [notFound, setNotFound] = useState(false);
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
useEffect(() => {
if (
chatId &&
@ -426,16 +312,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
return () => {
if (ws?.readyState === 1) {
ws.close();
console.debug(new Date(), 'ws:cleanup');
}
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const messagesRef = useRef<Message[]>([]);
useEffect(() => {
@ -443,18 +319,18 @@ const ChatWindow = ({ id }: { id?: string }) => {
}, [messages]);
useEffect(() => {
if (isMessagesLoaded && isWSReady) {
if (isMessagesLoaded && isConfigReady) {
setIsReady(true);
console.debug(new Date(), 'app:ready');
} else {
setIsReady(false);
}
}, [isMessagesLoaded, isWSReady]);
}, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => {
if (loading) return;
if (!ws || ws.readyState !== WebSocket.OPEN) {
toast.error('Cannot send message while disconnected');
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
return;
}
@ -467,21 +343,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
ws.send(
JSON.stringify({
type: 'message',
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: [...chatHistory, ['human', message]],
}),
);
setMessages((prevMessages) => [
...prevMessages,
{
@ -493,9 +354,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
},
]);
const messageHandler = async (e: MessageEvent) => {
const data = JSON.parse(e.data);
const messageHandler = async (data: any) => {
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
@ -558,11 +417,25 @@ const ChatWindow = ({ id }: { id?: string }) => {
['assistant', recievedMessage],
]);
ws?.removeEventListener('message', messageHandler);
setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
}
if (
lastMsg.role === 'assistant' &&
lastMsg.sources &&
@ -579,21 +452,63 @@ const ChatWindow = ({ id }: { id?: string }) => {
}),
);
}
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document.getElementById('search-images')?.click();
}
if (autoVideoSearch === 'true') {
document.getElementById('search-videos')?.click();
}
}
};
ws?.addEventListener('message', messageHandler);
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
systemInstructions: localStorage.getItem('systemInstructions'),
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
};
const rewrite = (messageId: string) => {
@ -614,11 +529,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
};
useEffect(() => {
if (isReady && initialMessage && ws?.readyState === 1) {
if (isReady && initialMessage && isConfigReady) {
sendMessage(initialMessage);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [ws?.readyState, isReady, initialMessage, isWSReady]);
}, [isConfigReady, isReady, initialMessage]);
if (hasError) {
return (

View File

@ -29,15 +29,12 @@ const DeleteChat = ({
const handleDelete = async () => {
setLoading(true);
try {
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
{
method: 'DELETE',
headers: {
'Content-Type': 'application/json',
},
const res = await fetch(`/api/chats/${chatId}`, {
method: 'DELETE',
headers: {
'Content-Type': 'application/json',
},
);
});
if (res.status != 200) {
throw new Error('Failed to delete chat');

View File

@ -12,13 +12,18 @@ import {
Layers3,
Plus,
} from 'lucide-react';
import Markdown from 'markdown-to-jsx';
import Markdown, { MarkdownToJSX } from 'markdown-to-jsx';
import Copy from './MessageActions/Copy';
import Rewrite from './MessageActions/Rewrite';
import MessageSources from './MessageSources';
import SearchImages from './SearchImages';
import SearchVideos from './SearchVideos';
import { useSpeech } from 'react-text-to-speech';
import ThinkBox from './ThinkBox';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
return <ThinkBox content={children as string} />;
};
const MessageBox = ({
message,
@ -43,32 +48,83 @@ const MessageBox = ({
const [speechMessage, setSpeechMessage] = useState(message.content);
useEffect(() => {
const citationRegex = /\[([^\]]+)\]/g;
const regex = /\[(\d+)\]/g;
let processedMessage = message.content;
if (message.role === 'assistant' && message.content.includes('<think>')) {
const openThinkTag = processedMessage.match(/<think>/g)?.length || 0;
const closeThinkTag = processedMessage.match(/<\/think>/g)?.length || 0;
if (openThinkTag > closeThinkTag) {
processedMessage += '</think> <a> </a>'; // The extra <a> </a> is to prevent the the think component from looking bad
}
}
if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length > 0
) {
return setParsedMessage(
message.content.replace(
regex,
(_, number) =>
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
setParsedMessage(
processedMessage.replace(
citationRegex,
(_, capturedContent: string) => {
const numbers = capturedContent
.split(',')
.map((numStr) => numStr.trim());
const linksHtml = numbers
.map((numStr) => {
const number = parseInt(numStr);
if (isNaN(number) || number <= 0) {
return `[${numStr}]`;
}
const source = message.sources?.[number - 1];
const url = source?.metadata?.url;
if (url) {
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
} else {
return `[${numStr}]`;
}
})
.join('');
return linksHtml;
},
),
);
setSpeechMessage(message.content.replace(regex, ''));
return;
}
setSpeechMessage(message.content.replace(regex, ''));
setParsedMessage(message.content);
setParsedMessage(processedMessage);
}, [message.content, message.sources, message.role]);
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
const markdownOverrides: MarkdownToJSX.Options = {
overrides: {
think: {
component: ThinkTagProcessor,
},
},
};
return (
<div>
{message.role === 'user' && (
<div className={cn('w-full', messageIndex === 0 ? 'pt-16' : 'pt-8')}>
<div
className={cn(
'w-full',
messageIndex === 0 ? 'pt-16' : 'pt-8',
'break-words',
)}
>
<h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12">
{message.content}
</h2>
@ -105,11 +161,13 @@ const MessageBox = ({
Answer
</h3>
</div>
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white',
)}
options={markdownOverrides}
>
{parsedMessage}
</Markdown>
@ -187,10 +245,12 @@ const MessageBox = ({
<SearchImages
query={history[messageIndex - 1].content}
chatHistory={history.slice(0, messageIndex - 1)}
messageId={message.messageId}
/>
<SearchVideos
chatHistory={history.slice(0, messageIndex - 1)}
query={history[messageIndex - 1].content}
messageId={message.messageId}
/>
</div>
</div>

View File

@ -41,7 +41,7 @@ const Attach = ({
data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
const res = await fetch(`/api/uploads`, {
method: 'POST',
body: data,
});
@ -110,7 +110,7 @@ const Attach = ({
<button
type="button"
onClick={() => fileInputRef.current.click()}
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
className="flex flex-row items-center space-x-1 text-black/70 dark:text-white/70 hover:text-black hover:dark:text-white transition duration-200"
>
<input
type="file"
@ -128,7 +128,7 @@ const Attach = ({
setFiles([]);
setFileIds([]);
}}
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
className="flex flex-row items-center space-x-1 text-black/70 dark:text-white/70 hover:text-black hover:dark:text-white transition duration-200"
>
<Trash size={14} />
<p className="text-xs">Clear</p>
@ -145,7 +145,7 @@ const Attach = ({
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
</div>
<p className="text-white/70 text-sm">
<p className="text-black/70 dark:text-white/70 text-sm">
{file.fileName.length > 25
? file.fileName.replace(/\.\w+$/, '').substring(0, 25) +
'...' +

View File

@ -39,7 +39,7 @@ const AttachSmall = ({
data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
const res = await fetch(`/api/uploads`, {
method: 'POST',
body: data,
});
@ -82,7 +82,7 @@ const AttachSmall = ({
<button
type="button"
onClick={() => fileInputRef.current.click()}
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
className="flex flex-row items-center space-x-1 text-black/70 dark:text-white/70 hover:text-black hover:dark:text-white transition duration-200"
>
<input
type="file"
@ -100,7 +100,7 @@ const AttachSmall = ({
setFiles([]);
setFileIds([]);
}}
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
className="flex flex-row items-center space-x-1 text-black/70 dark:text-white/70 hover:text-black hover:dark:text-white transition duration-200"
>
<Trash size={14} />
<p className="text-xs">Clear</p>
@ -117,7 +117,7 @@ const AttachSmall = ({
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
</div>
<p className="text-white/70 text-sm">
<p className="text-black/70 dark:text-white/70 text-sm">
{file.fileName.length > 25
? file.fileName.replace(/\.\w+$/, '').substring(0, 25) +
'...' +

View File

@ -45,25 +45,13 @@ const focusModes = [
key: 'youtubeSearch',
title: 'Youtube',
description: 'Search and watch videos',
icon: (
<SiYoutube
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
icon: <SiYoutube className="h-5 w-auto mr-0.5" />,
},
{
key: 'redditSearch',
title: 'Reddit',
description: 'Search for discussions and opinions',
icon: (
<SiReddit
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
icon: <SiReddit className="h-5 w-auto mr-0.5" />,
},
];

View File

@ -69,11 +69,15 @@ const MessageSources = ({ sources }: { sources: Document[] }) => {
<div className="flex flex-row items-center space-x-1">
{sources.slice(3, 6).map((source, i) => {
return source.metadata.url === 'File' ? (
<div className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full">
<div
key={i}
className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full"
>
<File size={12} className="text-white/70" />
</div>
) : (
<img
key={i}
src={`https://s2.googleusercontent.com/s2/favicons?domain_url=${source.metadata.url}`}
width={16}
height={16}

View File

@ -14,9 +14,11 @@ type Image = {
const SearchImages = ({
query,
chatHistory,
messageId,
}: {
query: string;
chatHistory: Message[];
messageId: string;
}) => {
const [images, setImages] = useState<Image[] | null>(null);
const [loading, setLoading] = useState(false);
@ -27,7 +29,7 @@ const SearchImages = ({
<>
{!loading && images === null && (
<button
id="search-images"
id={`search-images-${messageId}`}
onClick={async () => {
setLoading(true);
@ -37,27 +39,24 @@ const SearchImages = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/images`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
const res = await fetch(`/api/images`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
});
const data = await res.json();

View File

@ -27,9 +27,11 @@ declare module 'yet-another-react-lightbox' {
const Searchvideos = ({
query,
chatHistory,
messageId,
}: {
query: string;
chatHistory: Message[];
messageId: string;
}) => {
const [videos, setVideos] = useState<Video[] | null>(null);
const [loading, setLoading] = useState(false);
@ -42,7 +44,7 @@ const Searchvideos = ({
<>
{!loading && videos === null && (
<button
id="search-videos"
id={`search-videos-${messageId}`}
onClick={async () => {
setLoading(true);
@ -52,27 +54,24 @@ const Searchvideos = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
const res = await fetch(`/api/videos`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
});
const data = await res.json();

View File

@ -16,8 +16,6 @@ const VerticalIconContainer = ({ children }: { children: ReactNode }) => {
const Sidebar = ({ children }: { children: React.ReactNode }) => {
const segments = useSelectedLayoutSegments();
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
const navLinks = [
{
icon: Home,

View File

@ -0,0 +1,43 @@
'use client';
import { useState } from 'react';
import { cn } from '@/lib/utils';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps {
content: string;
}
const ThinkBox = ({ content }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false);
return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">
<button
onClick={() => setIsExpanded(!isExpanded)}
className="w-full flex items-center justify-between px-4 py-1 text-black/90 dark:text-white/90 hover:bg-light-200 dark:hover:bg-dark-200 transition duration-200"
>
<div className="flex items-center space-x-2">
<BrainCircuit
size={20}
className="text-[#9C27B0] dark:text-[#CE93D8]"
/>
<p className="font-medium text-sm">Thinking Process</p>
</div>
{isExpanded ? (
<ChevronUp size={18} className="text-black/70 dark:text-white/70" />
) : (
<ChevronDown size={18} className="text-black/70 dark:text-white/70" />
)}
</button>
{isExpanded && (
<div className="px-4 py-3 text-black/80 dark:text-white/80 text-sm border-t border-light-200 dark:border-dark-200 bg-light-100/50 dark:bg-dark-100/50 whitespace-pre-wrap">
{content}
</div>
)}
</div>
);
};
export default ThinkBox;

View File

@ -7,7 +7,7 @@ export const getSuggestions = async (chatHisory: Message[]) => {
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/suggestions`, {
const res = await fetch(`/api/suggestions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',

View File

@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng';
import { searchSearxng } from '../searchEngines/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
@ -36,6 +36,12 @@ type ImageSearchChainInput = {
query: string;
};
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
@ -52,11 +58,13 @@ const createImageSearchChain = (llm: BaseChatModel) => {
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images = [];
const images: ImageSearchResult[] = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {

View File

@ -1,5 +1,5 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser';
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';

View File

@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng';
import { searchSearxng } from '../searchEngines/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
@ -36,6 +36,13 @@ type VideoSearchChainInput = {
query: string;
};
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
@ -52,11 +59,13 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos = [];
const videos: VideoSearchResult[] = [];
res.results.forEach((result) => {
if (

View File

@ -6,7 +6,6 @@ const configFileName = 'config.toml';
interface Config {
GENERAL: {
PORT: number;
SIMILARITY_MEASURE: string;
KEEP_ALIVE: string;
};
@ -26,6 +25,9 @@ interface Config {
OLLAMA: {
API_URL: string;
};
DEEPSEEK: {
API_KEY: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
@ -34,6 +36,10 @@ interface Config {
};
API_ENDPOINTS: {
SEARXNG: string;
TAVILY: string;
};
SEARCH: {
ENGINE: string;
};
}
@ -43,11 +49,9 @@ type RecursivePartial<T> = {
const loadConfig = () =>
toml.parse(
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
export const getPort = () => loadConfig().GENERAL.PORT;
export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE;
@ -64,8 +68,16 @@ export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
export const getTavilyApiKey = () =>
process.env.TAVILY_API_KEY || loadConfig().API_ENDPOINTS.TAVILY;
export const getSearchEngine = () =>
process.env.SEARCH_ENGINE || loadConfig().SEARCH?.ENGINE || 'searxng';
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@ -109,9 +121,8 @@ const mergeConfigs = (current: any, update: any): any => {
export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(__dirname, `../${configFileName}`),
path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig),
);
};

View File

@ -1,8 +1,9 @@
import { drizzle } from 'drizzle-orm/better-sqlite3';
import Database from 'better-sqlite3';
import * as schema from './schema';
import path from 'path';
const sqlite = new Database('data/db.sqlite');
const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite'));
const db = drizzle(sqlite, {
schema: schema,
});

View File

@ -28,7 +28,7 @@ export class HuggingFaceTransformersEmbeddings
timeout?: number;
private pipelinePromise: Promise<any>;
private pipelinePromise: Promise<any> | undefined;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});

View File

@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
constructor(args?: LineOutputParserArgs) {
super();
this.key = args.key ?? this.key;
this.key = args?.key ?? this.key;
}
static lc_name() {

View File

@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
constructor(args?: LineListOutputParserArgs) {
super();
this.key = args.key ?? this.key;
this.key = args?.key ?? this.key;
}
static lc_name() {

View File

@ -51,6 +51,10 @@ export const academicSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Academic', this means you will be searching for academic papers and articles on the web.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -51,6 +51,10 @@ export const redditSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -1,6 +1,6 @@
export const webSearchRetrieverPrompt = `
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
If it is a smple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
@ -92,6 +92,10 @@ export const webSearchResponsePrompt = `
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -51,6 +51,10 @@ export const wolframAlphaSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -7,6 +7,10 @@ You have to cite the answer using [number] notation. You must cite the sentences
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
<context>
{context}
</context>

View File

@ -51,6 +51,10 @@ export const youtubeSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcrip
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -1,6 +1,38 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { getAnthropicApiKey } from '../../config';
import logger from '../../utils/logger';
import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey();
@ -8,52 +40,22 @@ export const loadAnthropicChatModels = async () => {
if (!anthropicApiKey) return {};
try {
const chatModels = {
'claude-3-5-sonnet-20241022': {
displayName: 'Claude 3.5 Sonnet',
const chatModels: Record<string, ChatModel> = {};
anthropicChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatAnthropic({
apiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-sonnet-20241022',
}),
},
'claude-3-5-haiku-20241022': {
displayName: 'Claude 3.5 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-haiku-20241022',
}),
},
'claude-3-opus-20240229': {
displayName: 'Claude 3 Opus',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-opus-20240229',
}),
},
'claude-3-sonnet-20240229': {
displayName: 'Claude 3 Sonnet',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-sonnet-20240229',
}),
},
'claude-3-haiku-20240307': {
displayName: 'Claude 3 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-haiku-20240307',
}),
},
};
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Anthropic models: ${err}`);
console.error(`Error loading Anthropic models: ${err}`);
return {};
}
};

View File

@ -0,0 +1,44 @@
import { ChatOpenAI } from '@langchain/openai';
import { getDeepseekApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const deepseekChatModels: Record<string, string>[] = [
{
displayName: 'Deepseek Chat (Deepseek V3)',
key: 'deepseek-chat',
},
{
displayName: 'Deepseek Reasoner (Deepseek R1)',
key: 'deepseek-reasoner',
},
];
export const loadDeepseekChatModels = async () => {
const deepseekApiKey = getDeepseekApiKey();
if (!deepseekApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
deepseekChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.deepseek.com',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Deepseek models: ${err}`);
return {};
}
};

View File

@ -2,8 +2,52 @@ import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../../config';
import logger from '../../utils/logger';
import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-exp-03-25',
},
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Flash Thinking Experimental',
key: 'gemini-2.0-flash-thinking-exp-01-21',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 004',
key: 'models/text-embedding-004',
},
{
displayName: 'Embedding 001',
key: 'models/embedding-001',
},
];
export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey();
@ -11,75 +55,47 @@ export const loadGeminiChatModels = async () => {
if (!geminiApiKey) return {};
try {
const chatModels = {
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash',
const chatModels: Record<string, ChatModel> = {};
geminiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-flash-8b': {
displayName: 'Gemini 1.5 Flash 8B',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash-8b',
modelName: model.key,
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-exp': {
displayName: 'Gemini 2.0 Flash Exp',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-exp',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-thinking-exp-01-21': {
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
};
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Gemini models: ${err}`);
console.error(`Error loading Gemini models: ${err}`);
return {};
}
};
export const loadGeminiEmbeddingsModels = async () => {
export const loadGeminiEmbeddingModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
try {
const embeddingModels = {
'text-embedding-004': {
displayName: 'Text Embedding',
const embeddingModels: Record<string, EmbeddingModel> = {};
geminiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new GoogleGenerativeAIEmbeddings({
apiKey: geminiApiKey,
modelName: 'text-embedding-004',
}),
},
};
modelName: model.key,
}) as unknown as Embeddings,
};
});
return embeddingModels;
} catch (err) {
logger.error(`Error loading Gemini embeddings model: ${err}`);
console.error(`Error loading OpenAI embeddings models: ${err}`);
return {};
}
};

View File

@ -1,6 +1,86 @@
import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../../config';
import logger from '../../utils/logger';
import { getGroqApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
/* {
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
}, */
{
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
},
];
export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey();
@ -8,129 +88,25 @@ export const loadGroqChatModels = async () => {
if (!groqApiKey) return {};
try {
const chatModels = {
'llama-3.3-70b-versatile': {
displayName: 'Llama 3.3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.3-70b-versatile',
temperature: 0.7,
},
{
const chatModels: Record<string, ChatModel> = {};
groqChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-3b-preview': {
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-vision-preview': {
displayName: 'Llama 3.2 11B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-vision-preview': {
displayName: 'Llama 3.2 90B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-8b-instant': {
displayName: 'Llama 3.1 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.1-8b-instant',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-8b-8192': {
displayName: 'LLaMA3 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-70b-8192': {
displayName: 'LLaMA3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'mixtral-8x7b-32768': {
displayName: 'Mixtral 8x7B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'gemma2-9b-it': {
displayName: 'Gemma2 9B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma2-9b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
};
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Groq models: ${err}`);
console.error(`Error loading Groq models: ${err}`);
return {};
}
};

View File

@ -1,33 +1,53 @@
import { loadGroqChatModels } from './groq';
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../../config';
} from '../config';
import { ChatOpenAI } from '@langchain/openai';
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
import { loadGroqChatModels } from './groq';
import { loadAnthropicChatModels } from './anthropic';
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadDeepseekChatModels } from './deepseek';
const chatModelProviders = {
export interface ChatModel {
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
string,
() => Promise<Record<string, ChatModel>>
> = {
openai: loadOpenAIChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels,
groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels,
};
const embeddingModelProviders = {
openai: loadOpenAIEmbeddingsModels,
local: loadTransformersEmbeddingsModels,
ollama: loadOllamaEmbeddingsModels,
gemini: loadGeminiEmbeddingsModels,
export const embeddingModelProviders: Record<
string,
() => Promise<Record<string, EmbeddingModel>>
> = {
openai: loadOpenAIEmbeddingModels,
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels,
};
export const getAvailableChatModelProviders = async () => {
const models = {};
const models: Record<string, Record<string, ChatModel>> = {};
for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider]();
@ -52,7 +72,7 @@ export const getAvailableChatModelProviders = async () => {
configuration: {
baseURL: customOpenAiApiUrl,
},
}),
}) as unknown as BaseChatModel,
},
}
: {}),
@ -62,7 +82,7 @@ export const getAvailableChatModelProviders = async () => {
};
export const getAvailableEmbeddingModelProviders = async () => {
const models = {};
const models: Record<string, Record<string, EmbeddingModel>> = {};
for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider]();

View File

@ -1,74 +1,73 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
export const loadOllamaChatModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint();
const keepAlive = getKeepAlive();
const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {};
if (!ollamaApiEndpoint) return {};
try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models: ollamaModels } = response.data;
const { models } = res.data;
const chatModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
const chatModels: Record<string, ChatModel> = {};
models.forEach((model: any) => {
chatModels[model.model] = {
displayName: model.name,
model: new ChatOllama({
baseUrl: ollamaEndpoint,
baseUrl: ollamaApiEndpoint,
model: model.model,
temperature: 0.7,
keepAlive: keepAlive,
keepAlive: getKeepAlive(),
}),
};
return acc;
}, {});
});
return chatModels;
} catch (err) {
logger.error(`Error loading Ollama models: ${err}`);
console.error(`Error loading Ollama models: ${err}`);
return {};
}
};
export const loadOllamaEmbeddingsModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint();
export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {};
if (!ollamaApiEndpoint) return {};
try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models: ollamaModels } = response.data;
const { models } = res.data;
const embeddingsModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model: any) => {
embeddingModels[model.model] = {
displayName: model.name,
model: new OllamaEmbeddings({
baseUrl: ollamaEndpoint,
baseUrl: ollamaApiEndpoint,
model: model.model,
}),
};
});
return acc;
}, {});
return embeddingsModels;
return embeddingModels;
} catch (err) {
logger.error(`Error loading Ollama embeddings model: ${err}`);
console.error(`Error loading Ollama embeddings models: ${err}`);
return {};
}
};

View File

@ -1,89 +1,90 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../../config';
import logger from '../../utils/logger';
import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
export const loadOpenAIChatModels = async () => {
const openAIApiKey = getOpenaiApiKey();
const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {};
if (!openaiApiKey) return {};
try {
const chatModels = {
'gpt-3.5-turbo': {
displayName: 'GPT-3.5 Turbo',
const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-3.5-turbo',
openAIApiKey: openaiApiKey,
modelName: model.key,
temperature: 0.7,
}),
},
'gpt-4': {
displayName: 'GPT-4',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
},
'gpt-4-turbo': {
displayName: 'GPT-4 turbo',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
},
'gpt-4o': {
displayName: 'GPT-4 omni',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o',
temperature: 0.7,
}),
},
'gpt-4o-mini': {
displayName: 'GPT-4 omni mini',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o-mini',
temperature: 0.7,
}),
},
};
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading OpenAI models: ${err}`);
console.error(`Error loading OpenAI models: ${err}`);
return {};
}
};
export const loadOpenAIEmbeddingsModels = async () => {
const openAIApiKey = getOpenaiApiKey();
export const loadOpenAIEmbeddingModels = async () => {
const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {};
if (!openaiApiKey) return {};
try {
const embeddingModels = {
'text-embedding-3-small': {
displayName: 'Text Embedding 3 Small',
const embeddingModels: Record<string, EmbeddingModel> = {};
openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-small',
}),
},
'text-embedding-3-large': {
displayName: 'Text Embedding 3 Large',
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
},
};
openAIApiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};
});
return embeddingModels;
} catch (err) {
logger.error(`Error loading OpenAI embeddings model: ${err}`);
console.error(`Error loading OpenAI embeddings models: ${err}`);
return {};
}
};

View File

@ -1,4 +1,3 @@
import logger from '../../utils/logger';
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const loadTransformersEmbeddingsModels = async () => {
@ -26,7 +25,7 @@ export const loadTransformersEmbeddingsModels = async () => {
return embeddingModels;
} catch (err) {
logger.error(`Error loading Transformers embeddings model: ${err}`);
console.error(`Error loading Transformers embeddings model: ${err}`);
return {};
}
};

59
src/lib/search/index.ts Normal file
View File

@ -0,0 +1,59 @@
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import prompts from '../prompts';
export const searchHandlers: Record<string, MetaSearchAgent> = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};

View File

@ -13,18 +13,19 @@ import {
} from '@langchain/core/runnables';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document';
import { searchSearxng } from '../lib/searxng';
import path from 'path';
import fs from 'fs';
import { searchTavily } from '../searchEngines/tavily';
import { searchSearxng } from '../searchEngines/searxng';
import { getSearchEngine } from '../config';
import path from 'node:path';
import fs from 'node:fs';
import computeSimilarity from '../utils/computeSimilarity';
import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events';
import { StreamEvent } from '@langchain/core/tracers/log_stream';
import { IterableReadableStream } from '@langchain/core/utils/stream';
export interface MetaSearchAgentType {
searchAndAnswer: (
@ -34,6 +35,7 @@ export interface MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) => Promise<eventEmitter>;
}
@ -90,7 +92,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
question = 'summarize';
}
let docs = [];
let docs: Document[] = [];
const linkDocs = await getDocumentsFromLinks({ links });
@ -203,25 +205,44 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: docs };
} else {
const res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
question = question.replace(/<think>.*?<\/think>/g, '');
const documents = res.results.map(
(result) =>
new Document({
pageContent:
result.content ||
(this.config.activeEngines.includes('youtube')
? result.title
: '') /* Todo: Implement transcript grabbing using Youtubei (source: https://www.npmjs.com/package/youtubei) */,
metadata: {
title: result.title,
url: result.url,
...(result.img_src && { img_src: result.img_src }),
},
}),
const searchEngine = getSearchEngine();
let res;
if (searchEngine === 'tavily') {
res = await searchTavily(question, {
search_depth: 'basic',
max_results: 15,
include_images: true,
});
} else {
// Default to SearxNG
res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
}
let documents: Document[] = [];
documents = documents.concat(
res.results.map(
(result) =>
new Document({
pageContent:
result.content ||
(this.config.activeEngines.includes('youtube')
? result.title
: ''),
metadata: {
title: result.title,
url: result.url,
...(result.img_src ? { img_src: result.img_src } : {}),
},
}),
)
);
return { query: question, docs: documents };
@ -235,9 +256,11 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds: string[],
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
systemInstructions: string,
) {
return RunnableSequence.from([
RunnableMap.from({
systemInstructions: () => systemInstructions,
query: (input: BasicChainInput) => input.query,
chat_history: (input: BasicChainInput) => input.chat_history,
date: () => new Date().toISOString(),
@ -311,7 +334,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
const fileSimilaritySearchObject = content.contents.map(
(c: string, i) => {
(c: string, i: number) => {
return {
fileName: content.title,
content: c,
@ -414,6 +437,8 @@ class MetaSearchAgent implements MetaSearchAgentType {
return sortedDocs;
}
return [];
}
private processDocs(docs: Document[]) {
@ -426,7 +451,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
}
private async handleStream(
stream: IterableReadableStream<StreamEvent>,
stream: AsyncGenerator<StreamEvent, any, any>,
emitter: eventEmitter,
) {
for await (const event of stream) {
@ -465,6 +490,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) {
const emitter = new eventEmitter();
@ -473,6 +499,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds,
embeddings,
optimizationMode,
systemInstructions,
);
const stream = answeringChain.streamEvents(

View File

@ -30,11 +30,12 @@ export const searchSearxng = async (
if (opts) {
Object.keys(opts).forEach((key) => {
if (Array.isArray(opts[key])) {
url.searchParams.append(key, opts[key].join(','));
const value = opts[key as keyof SearxngSearchOptions];
if (Array.isArray(value)) {
url.searchParams.append(key, value.join(','));
return;
}
url.searchParams.append(key, opts[key]);
url.searchParams.append(key, value as string);
});
}

View File

@ -0,0 +1,79 @@
import axios from 'axios';
import { getTavilyApiKey } from '../config';
interface TavilySearchOptions {
topic?: 'general' | 'news';
search_depth?: 'basic' | 'advanced';
chunks_per_source?: number;
max_results?: number;
time_range?: 'day' | 'week' | 'month' | 'year' | 'd' | 'w' | 'm' | 'y';
days?: number;
include_answer?: boolean | 'basic' | 'advanced';
include_raw_content?: boolean;
include_images?: boolean;
include_image_descriptions?: boolean;
include_domains?: string[];
exclude_domains?: string[];
}
interface TavilySearchResult {
title: string;
url: string;
content: string;
score: number;
raw_content?: string;
}
interface TavilySearchResponse {
query: string;
answer?: string;
images?: Array<{
url: string;
description?: string;
}>;
results: TavilySearchResult[];
response_time: string;
}
export const searchTavily = async (
query: string,
opts?: TavilySearchOptions,
) => {
const tavilyApiKey = getTavilyApiKey();
if (!tavilyApiKey) {
throw new Error('Tavily API key is not configured');
}
const url = 'https://api.tavily.com/search';
const response = await axios.post<TavilySearchResponse>(
url,
{
query,
...opts,
},
{
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${tavilyApiKey}`,
},
}
);
const results = response.data.results;
// Convert Tavily results to match the format expected by the rest of the application
const formattedResults = results.map(result => ({
title: result.title,
url: result.url,
content: result.content,
img_src: undefined, // Tavily doesn't provide image URLs in the standard response
}));
return {
results: formattedResults,
suggestions: [], // Tavily doesn't provide suggestions, so return empty array
answer: response.data.answer, // Include the AI-generated answer if available
};
};

5
src/lib/types/compute-dot.d.ts vendored Normal file
View File

@ -0,0 +1,5 @@
declare function computeDot(vectorA: number[], vectorB: number[]): number;
declare module 'compute-dot' {
export default computeDot;
}

View File

@ -6,7 +6,7 @@ const computeSimilarity = (x: number[], y: number[]): number => {
const similarityMeasure = getSimilarityMeasure();
if (similarityMeasure === 'cosine') {
return cosineSimilarity(x, y);
return cosineSimilarity(x, y) as number;
} else if (similarityMeasure === 'dot') {
return dot(x, y);
}

View File

@ -3,7 +3,6 @@ import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse';
import logger from './logger';
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splitter = new RecursiveCharacterTextSplitter();
@ -79,12 +78,13 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
docs.push(...linkDocs);
} catch (err) {
logger.error(
`Error at generating documents from links: ${err.message}`,
console.error(
'An error occurred while getting documents from links: ',
err,
);
docs.push(
new Document({
pageContent: `Failed to retrieve content from the link: ${err.message}`,
pageContent: `Failed to retrieve content from the link: ${err}`,
metadata: {
title: 'Failed to retrieve content',
url: link,

Some files were not shown because too many files have changed in this diff Show More