mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-17 15:28:37 +00:00
Compare commits
297 Commits
feat/ollam
...
feat/singl
Author | SHA1 | Date | |
---|---|---|---|
dc4a843d8a | |||
92f66266b0 | |||
4b89008f3a | |||
c650d1c3d9 | |||
874505cd0e | |||
b4a80d8ca0 | |||
c7bab91803 | |||
a58adbfecc | |||
9e746aea5e | |||
5e1331144a | |||
d789c970b1 | |||
e699cb2921 | |||
03eed9693b | |||
011570dd9b | |||
18529391f4 | |||
a1a7470ca6 | |||
10c5ac1076 | |||
7c01d2656e | |||
afb4786ac0 | |||
1e99fe8d69 | |||
012dfa5a74 | |||
65d057a05e | |||
3e7645614f | |||
7c6ee2ead1 | |||
540f38ae68 | |||
f1c0b5435b | |||
b33e5fefba | |||
03d0ff2ca4 | |||
687cbb365f | |||
dfb532e4d3 | |||
c8cd959496 | |||
4576d3de13 | |||
8057f28b20 | |||
36bb265e1f | |||
71fc19f525 | |||
c7c0ebe5b6 | |||
8fe1b7c5e3 | |||
6e0d3baef6 | |||
54e0bb317a | |||
3e6e57dab0 | |||
5aad2febda | |||
24e1919c5e | |||
c7abd96b05 | |||
3a01eebc04 | |||
7532c436db | |||
b9509a5d41 | |||
9db847c366 | |||
19bf71cefc | |||
61c0347ef2 | |||
0a7167eb04 | |||
7cce853618 | |||
877735b852 | |||
1680a1786e | |||
66f1e19ce8 | |||
ae3fc5f802 | |||
9f88d16ef1 | |||
c233362e70 | |||
1aaf172246 | |||
4bba674134 | |||
dcfe43ebda | |||
fc5e35b1b1 | |||
425a08432b | |||
e3488366c1 | |||
8902abdcee | |||
15203c123d | |||
a0aad69f62 | |||
1cfa3398a3 | |||
ead2d98a9f | |||
c52d6ac290 | |||
2785cdd97a | |||
1589f16d5a | |||
40f551c426 | |||
1fcd64ad42 | |||
07e5615860 | |||
c4f52adb45 | |||
92abbc5b98 | |||
c952469f08 | |||
449684c419 | |||
f620252406 | |||
e8ed4df31a | |||
2873093fee | |||
806c47e705 | |||
ff34d1043f | |||
c521b032a7 | |||
6b8f7dc32c | |||
8bb3e4f016 | |||
51939ff842 | |||
e4faa82362 | |||
9c1936ec2c | |||
c4932c659a | |||
96f67c7028 | |||
61dfeb89b4 | |||
8e4f0c6a6d | |||
6f50e25bf3 | |||
9abb4b654d | |||
0a29237732 | |||
c62e7f091e | |||
08379fcad5 | |||
cbce39a5dd | |||
27f8cfd212 | |||
8a76f92e23 | |||
00a52fc3b1 | |||
8143eca2c1 | |||
9bb0b64044 | |||
323f3c516c | |||
c0b3a409dd | |||
9195cbcce0 | |||
f02393dbe9 | |||
e1732b9bf2 | |||
fac41d3812 | |||
27e6f5b9e1 | |||
8539ce82ad | |||
3b4b8a8b02 | |||
3ffb20b777 | |||
f4b58c7157 | |||
2678c36e44 | |||
25b5dbd63e | |||
c63c9b5c8a | |||
80818983d8 | |||
5217d21366 | |||
57ede99b83 | |||
c74e16e01c | |||
ce593daab9 | |||
fcf9b644af | |||
6ae825999a | |||
b291265944 | |||
c62684407d | |||
f4b01a29bb | |||
022cf55db7 | |||
aeef03fbaf | |||
9588eed710 | |||
7d2344dc85 | |||
799f4d6aee | |||
c51ec8ff0f | |||
61044715e9 | |||
d806c7e581 | |||
93b90dc1c4 | |||
7879167b13 | |||
f7d1364f30 | |||
91bba8eaca | |||
4545ff1d7d | |||
a152e58132 | |||
9d827d4cc2 | |||
336ceefe2b | |||
9a96fd4788 | |||
87cc86d406 | |||
5fd64ef6e6 | |||
594106aea3 | |||
2ae5846b3d | |||
476303f52b | |||
21b315d14b | |||
7c676479d4 | |||
8e18c32e23 | |||
5f6e61d7a0 | |||
32cc430b1b | |||
cf0abbb9d2 | |||
dcbcab3122 | |||
90f9edea95 | |||
6fb0c5b362 | |||
f4628ae52d | |||
9e7e1d76a2 | |||
9a36e48de5 | |||
cfab91ddbf | |||
2d9ca3835e | |||
f061345c74 | |||
5fe08b5ec8 | |||
6a2f4b8ebf | |||
4eadc0c797 | |||
743b67d0e9 | |||
c8a16a622e | |||
cae05bcf5e | |||
710b72d053 | |||
af9862c019 | |||
984b80b5ec | |||
cb65f67140 | |||
62c7f535db | |||
943458440c | |||
d28cfa3319 | |||
b37a6e1560 | |||
0a2934935e | |||
a5978d544c | |||
d46a844df8 | |||
c97a434723 | |||
382fa295e5 | |||
90f68ab214 | |||
89c30530bc | |||
776d389c1e | |||
996cc1b674 | |||
f9664d48e7 | |||
79cfd0a722 | |||
d04ba91c85 | |||
7853c18b6f | |||
64ea4b4289 | |||
c61facef13 | |||
fcff93a594 | |||
3bfaf9be28 | |||
68b595023e | |||
180e204c2d | |||
0e2f4514b4 | |||
0993c5a760 | |||
100872f2d9 | |||
22aee27cda | |||
9d30224faa | |||
b622df5a9f | |||
1b18715f8f | |||
9816eb1d36 | |||
828eeb0c77 | |||
c852bee8ed | |||
954b4bf89a | |||
3ef39c69a7 | |||
7a28be9e1a | |||
a60145137c | |||
7eace1e6bd | |||
baef45b456 | |||
9a7af945b0 | |||
09463999c2 | |||
0f6986fc9b | |||
5e940914a3 | |||
ac4cba32c8 | |||
4f5f6be85f | |||
17fbc28172 | |||
655fbec583 | |||
0af66f8b72 | |||
8f9c709648 | |||
2a1d6e261d | |||
74d1df7d25 | |||
e042ff491b | |||
fc1bfb3888 | |||
d9ba36794a | |||
321e60b993 | |||
68837e06ee | |||
01fc683d32 | |||
f88f179920 | |||
4cb0aeeee3 | |||
e8fe74ae7c | |||
ed47191d9b | |||
b4d787d333 | |||
38b1995677 | |||
f28257b480 | |||
9b088cd161 | |||
94ea6c372a | |||
6e61c88c9e | |||
ba7b92ffde | |||
f8fd2a6fb0 | |||
0440a810f5 | |||
e3fef3a1be | |||
4bf69dfdda | |||
9f45ecb98d | |||
c710f4f88c | |||
79f6a52b5b | |||
c87c2b27a9 | |||
dafc835774 | |||
205373d676 | |||
408abd24ea | |||
1d344266aa | |||
1bcff03cfc | |||
f618b713af | |||
ed9ff3c20f | |||
f21f5c9611 | |||
edc40d8fe6 | |||
6e304e7051 | |||
bb9a2f538d | |||
ee053cf31e | |||
aae85cd767 | |||
7c84025f3c | |||
ab6cda690f | |||
639129848a | |||
9b5548e9f8 | |||
c053af534c | |||
f2c51420da | |||
a90e294c60 | |||
66c5fcb4fa | |||
5df3c5ad8c | |||
f14050840b | |||
99ae8f6998 | |||
3b66808e7d | |||
571cdc1b4e | |||
7f8c73782c | |||
8758fcbc13 | |||
6fe70a70ff | |||
7653eaf146 | |||
b2b1d724ee | |||
3ffbddd237 | |||
a86378e726 | |||
fd65af53c3 | |||
ec91289c0c | |||
0ea2bec85d | |||
5924690df2 | |||
23b7feee0c | |||
95461154d0 | |||
e964ffcea5 | |||
d37a1a8020 | |||
28a7175afc | |||
c6a5790d33 | |||
dd1ce4e324 | |||
f9ab543bcf | |||
88304d29c1 |
Binary file not shown.
Before Width: | Height: | Size: 151 KiB After Width: | Height: | Size: 641 KiB |
@ -1,5 +0,0 @@
|
||||
PORT=3001
|
||||
OLLAMA_URL=http://localhost:11434 # url of the ollama server
|
||||
SIMILARITY_MEASURE=cosine # cosine or dot
|
||||
SEARXNG_API_URL= # no need to fill this if using docker
|
||||
MODEL_NAME=llama2
|
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -4,7 +4,6 @@ about: Create an issue to help us fix bugs
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
@ -12,6 +11,7 @@ A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
|
3
.github/ISSUE_TEMPLATE/custom.md
vendored
3
.github/ISSUE_TEMPLATE/custom.md
vendored
@ -4,7 +4,4 @@ about: Describe this issue template's purpose here.
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -4,7 +4,6 @@ about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
|
73
.github/workflows/docker-build.yaml
vendored
Normal file
73
.github/workflows/docker-build.yaml
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
name: Build & Push Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
service: [backend, app]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Extract version from release tag
|
||||
if: github.event_name == 'release'
|
||||
id: version
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push Docker image for ${{ matrix.service }}
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx create --use
|
||||
if [[ "${{ matrix.service }}" == "backend" ]]; then \
|
||||
DOCKERFILE=backend.dockerfile; \
|
||||
IMAGE_NAME=perplexica-backend; \
|
||||
else \
|
||||
DOCKERFILE=app.dockerfile; \
|
||||
IMAGE_NAME=perplexica-frontend; \
|
||||
fi
|
||||
docker buildx build --platform linux/amd64,linux/arm64 \
|
||||
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
|
||||
--cache-to=type=inline \
|
||||
-f $DOCKERFILE \
|
||||
-t itzcrazykns1337/${IMAGE_NAME}:main \
|
||||
--push .
|
||||
|
||||
- name: Build and push release Docker image for ${{ matrix.service }}
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
docker buildx create --use
|
||||
if [[ "${{ matrix.service }}" == "backend" ]]; then \
|
||||
DOCKERFILE=backend.dockerfile; \
|
||||
IMAGE_NAME=perplexica-backend; \
|
||||
else \
|
||||
DOCKERFILE=app.dockerfile; \
|
||||
IMAGE_NAME=perplexica-frontend; \
|
||||
fi
|
||||
docker buildx build --platform linux/amd64,linux/arm64 \
|
||||
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
|
||||
--cache-to=type=inline \
|
||||
-f $DOCKERFILE \
|
||||
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
|
||||
--push .
|
8
.gitignore
vendored
8
.gitignore
vendored
@ -6,6 +6,7 @@ yarn-error.log
|
||||
# Build output
|
||||
/.next/
|
||||
/out/
|
||||
/dist/
|
||||
|
||||
# IDE/Editor specific
|
||||
.vscode/
|
||||
@ -19,6 +20,9 @@ yarn-error.log
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
# Config files
|
||||
config.toml
|
||||
|
||||
# Log files
|
||||
logs/
|
||||
*.log
|
||||
@ -29,3 +33,7 @@ logs/
|
||||
# Miscellaneous
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Db
|
||||
db.sqlite
|
||||
/searxng
|
||||
|
41
.prettierignore
Normal file
41
.prettierignore
Normal file
@ -0,0 +1,41 @@
|
||||
# Ignore all files in the node_modules directory
|
||||
node_modules
|
||||
|
||||
# Ignore all files in the .next directory (Next.js build output)
|
||||
.next
|
||||
|
||||
# Ignore all files in the .out directory (TypeScript build output)
|
||||
.out
|
||||
|
||||
# Ignore all files in the .cache directory (Prettier cache)
|
||||
.cache
|
||||
|
||||
# Ignore all files in the .vscode directory (Visual Studio Code settings)
|
||||
.vscode
|
||||
|
||||
# Ignore all files in the .idea directory (IntelliJ IDEA settings)
|
||||
.idea
|
||||
|
||||
# Ignore all files in the dist directory (build output)
|
||||
dist
|
||||
|
||||
# Ignore all files in the build directory (build output)
|
||||
build
|
||||
|
||||
# Ignore all files in the coverage directory (test coverage reports)
|
||||
coverage
|
||||
|
||||
# Ignore all files with the .log extension
|
||||
*.log
|
||||
|
||||
# Ignore all files with the .tmp extension
|
||||
*.tmp
|
||||
|
||||
# Ignore all files with the .swp extension
|
||||
*.swp
|
||||
|
||||
# Ignore all files with the .DS_Store extension (macOS specific)
|
||||
.DS_Store
|
||||
|
||||
# Ignore all files in uploads directory
|
||||
uploads
|
@ -9,18 +9,17 @@ Perplexica's design consists of two main domains:
|
||||
- **Frontend (`ui` directory)**: This is a Next.js application holding all user interface components. It's a self-contained environment that manages everything the user interacts with.
|
||||
- **Backend (root and `src` directory)**: The backend logic is situated in the `src` folder, but the root directory holds the main `package.json` for backend dependency management.
|
||||
|
||||
Both the root directory (for backend configurations outside `src`) and the `ui` folder come with an `.env.example` file. These are templates for environment variables that you need to set up manually for the application to run correctly.
|
||||
|
||||
## Setting Up Your Environment
|
||||
|
||||
Before diving into coding, setting up your local environment is key. Here's what you need to do:
|
||||
|
||||
### Backend
|
||||
|
||||
1. In the root directory, locate the `.env.example` file.
|
||||
2. Rename it to `.env` and fill in the necessary environment variables specific to the backend.
|
||||
1. In the root directory, locate the `sample.config.toml` file.
|
||||
2. Rename it to `config.toml` and fill in the necessary configuration fields specific to the backend.
|
||||
3. Run `npm install` to install dependencies.
|
||||
4. Use `npm run dev` to start the backend in development mode.
|
||||
4. Run `npm run db:push` to set up the local sqlite.
|
||||
5. Use `npm run dev` to start the backend in development mode.
|
||||
|
||||
### Frontend
|
||||
|
||||
|
131
README.md
131
README.md
@ -1,6 +1,6 @@
|
||||
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
|
||||
|
||||

|
||||

|
||||
|
||||
## Table of Contents <!-- omit in toc -->
|
||||
|
||||
@ -10,34 +10,44 @@
|
||||
- [Installation](#installation)
|
||||
- [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended)
|
||||
- [Non-Docker Installation](#non-docker-installation)
|
||||
- [Ollama Connection Errors](#ollama-connection-errors)
|
||||
- [Using as a Search Engine](#using-as-a-search-engine)
|
||||
- [Using Perplexica's API](#using-perplexicas-api)
|
||||
- [Expose Perplexica to a network](#expose-perplexica-to-network)
|
||||
- [One-Click Deployment](#one-click-deployment)
|
||||
- [Upcoming Features](#upcoming-features)
|
||||
- [Support Us](#support-us)
|
||||
- [Donations](#donations)
|
||||
- [Contribution](#contribution)
|
||||
- [Acknowledgements](#acknowledgements)
|
||||
- [Help and Support](#help-and-support)
|
||||
|
||||
## Overview
|
||||
|
||||
Perplexica is an open-source AI-powered searching tool or an AI-powered search engine that goes deep into the internet to find answers. Inspired by Perplexity AI, it's an open-source option that not just searches the web but understands your questions. It uses advanced machine learning algorithms like similarity searching and embeddings to refine results and provides clear answers with sources cited.
|
||||
|
||||
Using SearxNG to stay current and fully open source, Perplexica ensures you always get the most up-to-date information without compromising your privacy.
|
||||
|
||||
Want to know more about its architecture and how it works? You can read it [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/architecture/README.md).
|
||||
|
||||
## Preview
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
- **Local LLMs**: You can make use local LLMs such as Llama3 and Mixtral using Ollama.
|
||||
- **Two Main Modes:**
|
||||
- **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page.
|
||||
- **Normal Mode:** Processes your query and performs a web search.
|
||||
- **Focus Modes:** Special modes to better answer specific types of questions. Perplexica currently has 6 focus modes:
|
||||
|
||||
1. **All Mode:** Searches the entire web to find the best results.
|
||||
2. **Writing Assistant Mode:** Helpful for writing tasks that does not require searching the web.
|
||||
3. **Academic Search Mode:** Finds articles and papers, ideal for academic research.
|
||||
4. **YouTube Search Mode:** Finds YouTube videos based on the search query.
|
||||
5. **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
|
||||
6. **Reddit Search Mode:** Searches Reddit for discussions and opinions related to the query.
|
||||
|
||||
- **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index (its like converting the web into embeddings which is quite expensive.). Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevent source out of it, ensuring you always get the latest information without the overhead of daily data updates.
|
||||
- **All Mode:** Searches the entire web to find the best results.
|
||||
- **Writing Assistant Mode:** Helpful for writing tasks that does not require searching the web.
|
||||
- **Academic Search Mode:** Finds articles and papers, ideal for academic research.
|
||||
- **YouTube Search Mode:** Finds YouTube videos based on the search query.
|
||||
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
|
||||
- **Reddit Search Mode:** Searches Reddit for discussions and opinions related to the query.
|
||||
- **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index. Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevant source out of it, ensuring you always get the latest information without the overhead of daily data updates.
|
||||
- **API**: Integrate Perplexica into your existing applications and make use of its capibilities.
|
||||
|
||||
It has many more features like image and video search. Some of the planned features are mentioned in [upcoming features](#upcoming-features).
|
||||
|
||||
@ -51,54 +61,115 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
||||
2. Clone the Perplexica repository:
|
||||
|
||||
```bash
|
||||
git clone -b feat/ollama-support https://github.com/ItzCrazyKns/Perplexica.git
|
||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||
```
|
||||
|
||||
3. After cloning, navigate to the directory containing the project files.
|
||||
|
||||
4. Rename the `.env.example` file to `.env`. For Docker setups, you need only fill in the following fields:
|
||||
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
|
||||
|
||||
- `OLLAMA_URL` (It should be the URL where Ollama is running; it is also filled by default but you need to replace it if your Ollama URL is different.)
|
||||
- `MODEL_NAME` (This is filled by default; you can change it if you want to use a different model.)
|
||||
- `SIMILARITY_MEASURE` (This is filled by default; you can leave it as is if you are unsure about it.)
|
||||
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
|
||||
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
||||
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
|
||||
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
|
||||
|
||||
**Note**: You can change these after starting Perplexica from the settings dialog.
|
||||
|
||||
- `SIMILARITY_MEASURE`: The similarity measure to use (This is filled by default; you can leave it as is if you are unsure about it.)
|
||||
|
||||
5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute:
|
||||
|
||||
```bash
|
||||
docker compose up
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
6. Wait a few minutes for the setup to complete. You can access Perplexica at http://localhost:3000 in your web browser.
|
||||
|
||||
**Note**: Once the terminal is stopped, Perplexica will also stop. To restart it, you will need to open Docker Desktop and run Perplexica again.
|
||||
**Note**: After the containers are built, you can start Perplexica directly from Docker without having to open a terminal.
|
||||
|
||||
### Non-Docker Installation
|
||||
|
||||
For setups without Docker:
|
||||
|
||||
1. Follow the initial steps to clone the repository and rename the `.env.example` file to `.env` in the root directory. You will need to fill in all the fields in this file.
|
||||
2. Additionally, rename the `.env.example` file to `.env` in the `ui` folder and complete all fields.
|
||||
3. The non-Docker setup requires manual configuration of both the backend and frontend.
|
||||
1. Install SearXNG and allow `JSON` format in the SearXNG settings.
|
||||
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
|
||||
3. Rename the `.env.example` file to `.env` in the `ui` folder and fill in all necessary fields.
|
||||
4. After populating the configuration and environment files, run `npm i` in both the `ui` folder and the root directory.
|
||||
5. Install the dependencies and then execute `npm run build` in both the `ui` folder and the root directory.
|
||||
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
|
||||
|
||||
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
|
||||
|
||||
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like exposing it your network, etc.
|
||||
|
||||
### Ollama Connection Errors
|
||||
|
||||
If you're encountering an Ollama connection error, it is likely due to the backend being unable to connect to Ollama's API. To fix this issue you can:
|
||||
|
||||
1. **Check your Ollama API URL:** Ensure that the API URL is correctly set in the settings menu.
|
||||
2. **Update API URL Based on OS:**
|
||||
|
||||
- **Windows:** Use `http://host.docker.internal:11434`
|
||||
- **Mac:** Use `http://host.docker.internal:11434`
|
||||
- **Linux:** Use `http://<private_ip_of_host>:11434`
|
||||
|
||||
Adjust the port number if you're using a different one.
|
||||
|
||||
3. **Linux Users - Expose Ollama to Network:**
|
||||
|
||||
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0"`. Then restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
|
||||
|
||||
- Ensure that the port (default is 11434) is not blocked by your firewall.
|
||||
|
||||
## Using as a Search Engine
|
||||
|
||||
If you wish to use Perplexica as an alternative to traditional search engines like Google or Bing, or if you want to add a shortcut for quick access from your browser's search bar, follow these steps:
|
||||
|
||||
1. Open your browser's settings.
|
||||
2. Navigate to the 'Search Engines' section.
|
||||
3. Add a new site search with the following URL: `http://localhost:3000/?q=%s`. Replace `localhost` with your IP address or domain name, and `3000` with the port number if Perplexica is not hosted locally.
|
||||
4. Click the add button. Now, you can use Perplexica directly from your browser's search bar.
|
||||
|
||||
## Using Perplexica's API
|
||||
|
||||
Perplexica also provides an API for developers looking to integrate its powerful search engine into their own applications. You can run searches, use multiple models and get answers to your queries.
|
||||
|
||||
For more details, check out the full documentation [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/API/SEARCH.md).
|
||||
|
||||
## Expose Perplexica to network
|
||||
|
||||
You can access Perplexica over your home network by following our networking guide [here](https://github.com/ItzCrazyKns/Perplexica/blob/master/docs/installation/NETWORKING.md).
|
||||
|
||||
## One-Click Deployment
|
||||
|
||||
[](https://repocloud.io/details/?app_id=267)
|
||||
|
||||
## Upcoming Features
|
||||
|
||||
- [ ] Finalizing Copilot Mode
|
||||
- [ ] Adding support for multiple local LLMs and LLM providers such as Anthropic, Google, etc.
|
||||
- [ ] Adding Discover and History Saving features
|
||||
- [x] Add settings page
|
||||
- [x] Adding support for local LLMs
|
||||
- [x] History Saving features
|
||||
- [x] Introducing various Focus Modes
|
||||
- [x] Adding API support
|
||||
- [x] Adding Discover
|
||||
- [ ] Finalizing Copilot Mode
|
||||
|
||||
## Support Us
|
||||
|
||||
If you find Perplexica useful, consider giving us a star on GitHub. This helps more people discover Perplexica and supports the development of new features. Your support is appreciated.
|
||||
If you find Perplexica useful, consider giving us a star on GitHub. This helps more people discover Perplexica and supports the development of new features. Your support is greatly appreciated.
|
||||
|
||||
### Donations
|
||||
|
||||
We also accept donations to help sustain our project. If you would like to contribute, you can use the following options to donate. Thank you for your support!
|
||||
|
||||
| Ethereum |
|
||||
| ----------------------------------------------------- |
|
||||
| Address: `0xB025a84b2F269570Eb8D4b05DEdaA41D8525B6DD` |
|
||||
|
||||
## Contribution
|
||||
|
||||
Perplexica is built on the idea that AI and large language models should be easy for everyone to use. If you find bugs or have ideas, please share them in via GitHub Issues. For more information on contributing to Perplexica you can read the [CONTRIBUTING.md](CONTRIBUTING.md) file to learn more about Perplexica and how you can contribute to it.
|
||||
|
||||
## Acknowledgements
|
||||
## Help and Support
|
||||
|
||||
Inspired by Perplexity AI, Perplexica aims to provide a similar service but always up-to-date and fully open source, thanks to SearxNG.
|
||||
If you have any questions or feedback, please feel free to reach out to us. You can create an issue on GitHub or join our Discord server. There, you can connect with other users, share your experiences and reviews, and receive more personalized help. [Click here](https://discord.gg/EFwsmQDgAu) to join the Discord server. To discuss matters outside of regular support, feel free to contact me on Discord at `itzcrazykns`.
|
||||
|
||||
If you have any queries you can reach me via my Discord - `itzcrazykns`. Thanks for checking out Perplexica.
|
||||
Thank you for exploring Perplexica, the AI-powered search engine designed to enhance your search experience. We are constantly working to improve Perplexica and expand its capabilities. We value your feedback and contributions which help us make Perplexica even better. Don't forget to check back for updates and new features!
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM node:alpine
|
||||
FROM node:20.18.0-alpine
|
||||
|
||||
ARG NEXT_PUBLIC_WS_URL
|
||||
ARG NEXT_PUBLIC_API_URL
|
||||
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
|
||||
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
|
||||
|
||||
@ -9,7 +9,7 @@ WORKDIR /home/perplexica
|
||||
|
||||
COPY ui /home/perplexica/
|
||||
|
||||
RUN yarn install
|
||||
RUN yarn install --frozen-lockfile
|
||||
RUN yarn build
|
||||
|
||||
CMD ["yarn", "start"]
|
@ -1,17 +1,17 @@
|
||||
FROM node:alpine
|
||||
|
||||
ARG SEARXNG_API_URL
|
||||
ENV SEARXNG_API_URL=${SEARXNG_API_URL}
|
||||
FROM node:18-slim
|
||||
|
||||
WORKDIR /home/perplexica
|
||||
|
||||
COPY src /home/perplexica/src
|
||||
COPY tsconfig.json /home/perplexica/
|
||||
COPY .env /home/perplexica/
|
||||
COPY drizzle.config.ts /home/perplexica/
|
||||
COPY package.json /home/perplexica/
|
||||
COPY yarn.lock /home/perplexica/
|
||||
|
||||
RUN yarn install
|
||||
RUN mkdir /home/perplexica/data
|
||||
RUN mkdir /home/perplexica/uploads
|
||||
|
||||
RUN yarn install --frozen-lockfile --network-timeout 600000
|
||||
RUN yarn build
|
||||
|
||||
CMD ["yarn", "start"]
|
2
data/.gitignore
vendored
Normal file
2
data/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
@ -1,28 +1,34 @@
|
||||
services:
|
||||
searxng:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: searxng.dockerfile
|
||||
expose:
|
||||
- 4000
|
||||
image: docker.io/searxng/searxng:latest
|
||||
volumes:
|
||||
- ./searxng:/etc/searxng:rw
|
||||
ports:
|
||||
- 4000:8080
|
||||
networks:
|
||||
- perplexica-network
|
||||
restart: unless-stopped
|
||||
|
||||
perplexica-backend:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: backend.dockerfile
|
||||
args:
|
||||
image: itzcrazykns1337/perplexica-backend:main
|
||||
environment:
|
||||
- SEARXNG_API_URL=http://searxng:8080
|
||||
depends_on:
|
||||
- searxng
|
||||
expose:
|
||||
- 3001
|
||||
ports:
|
||||
- 3001:3001
|
||||
volumes:
|
||||
- backend-dbstore:/home/perplexica/data
|
||||
- uploads:/home/perplexica/uploads
|
||||
- ./config.toml:/home/perplexica/config.toml
|
||||
extra_hosts:
|
||||
- 'host.docker.internal:host-gateway'
|
||||
networks:
|
||||
- perplexica-network
|
||||
restart: unless-stopped
|
||||
|
||||
perplexica-frontend:
|
||||
build:
|
||||
@ -31,14 +37,18 @@ services:
|
||||
args:
|
||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
image: itzcrazykns1337/perplexica-frontend:main
|
||||
depends_on:
|
||||
- perplexica-backend
|
||||
expose:
|
||||
- 3000
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
- perplexica-network
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
perplexica-network:
|
||||
|
||||
volumes:
|
||||
backend-dbstore:
|
||||
uploads:
|
||||
|
117
docs/API/SEARCH.md
Normal file
117
docs/API/SEARCH.md
Normal file
@ -0,0 +1,117 @@
|
||||
# Perplexica Search API Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
Perplexica’s Search API makes it easy to use our AI-powered search engine. You can run different types of searches, pick the models you want to use, and get the most recent info. Follow the following headings to learn more about Perplexica's search API.
|
||||
|
||||
## Endpoint
|
||||
|
||||
### **POST** `http://localhost:3001/api/search`
|
||||
|
||||
**Note**: Replace `3001` with any other port if you've changed the default PORT
|
||||
|
||||
### Request
|
||||
|
||||
The API accepts a JSON object in the request body, where you define the focus mode, chat models, embedding models, and your query.
|
||||
|
||||
#### Request Body Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"chatModel": {
|
||||
"provider": "openai",
|
||||
"model": "gpt-4o-mini"
|
||||
},
|
||||
"embeddingModel": {
|
||||
"provider": "openai",
|
||||
"model": "text-embedding-3-large"
|
||||
},
|
||||
"optimizationMode": "speed",
|
||||
"focusMode": "webSearch",
|
||||
"query": "What is Perplexica",
|
||||
"history": [
|
||||
["human", "Hi, how are you?"],
|
||||
["assistant", "I am doing well, how can I help you today?"]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Request Parameters
|
||||
|
||||
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
|
||||
|
||||
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
|
||||
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
|
||||
- Optional fields for custom OpenAI configuration:
|
||||
- `customOpenAIBaseURL`: If you’re using a custom OpenAI instance, provide the base URL.
|
||||
- `customOpenAIKey`: The API key for a custom OpenAI instance.
|
||||
|
||||
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
|
||||
|
||||
- `provider`: The provider for the embedding model (e.g., `openai`).
|
||||
- `model`: The specific embedding model (e.g., `text-embedding-3-large`).
|
||||
|
||||
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:
|
||||
|
||||
- `webSearch`, `academicSearch`, `writingAssistant`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`.
|
||||
|
||||
- **`optimizationMode`** (string, optional): Specifies the optimization mode to control the balance between performance and quality. Available modes:
|
||||
|
||||
- `speed`: Prioritize speed and return the fastest answer.
|
||||
- `balanced`: Provide a balanced answer with good speed and reasonable quality.
|
||||
|
||||
- **`query`** (string, required): The search query or question.
|
||||
|
||||
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
|
||||
|
||||
```json
|
||||
[
|
||||
["human", "What is Perplexica?"],
|
||||
["assistant", "Perplexica is an AI-powered search engine..."]
|
||||
]
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
The response from the API includes both the final message and the sources used to generate that message.
|
||||
|
||||
#### Example Response
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Perplexica is an innovative, open-source AI-powered search engine designed to enhance the way users search for information online. Here are some key features and characteristics of Perplexica:\n\n- **AI-Powered Technology**: It utilizes advanced machine learning algorithms to not only retrieve information but also to understand the context and intent behind user queries, providing more relevant results [1][5].\n\n- **Open-Source**: Being open-source, Perplexica offers flexibility and transparency, allowing users to explore its functionalities without the constraints of proprietary software [3][10].",
|
||||
"sources": [
|
||||
{
|
||||
"pageContent": "Perplexica is an innovative, open-source AI-powered search engine designed to enhance the way users search for information online.",
|
||||
"metadata": {
|
||||
"title": "What is Perplexica, and how does it function as an AI-powered search ...",
|
||||
"url": "https://askai.glarity.app/search/What-is-Perplexica--and-how-does-it-function-as-an-AI-powered-search-engine"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pageContent": "Perplexica is an open-source AI-powered search tool that dives deep into the internet to find precise answers.",
|
||||
"metadata": {
|
||||
"title": "Sahar Mor's Post",
|
||||
"url": "https://www.linkedin.com/posts/sahar-mor_a-new-open-source-project-called-perplexica-activity-7204489745668694016-ncja"
|
||||
}
|
||||
}
|
||||
....
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Fields in the Response
|
||||
|
||||
- **`message`** (string): The search result, generated based on the query and focus mode.
|
||||
- **`sources`** (array): A list of sources that were used to generate the search result. Each source includes:
|
||||
- `pageContent`: A snippet of the relevant content from the source.
|
||||
- `metadata`: Metadata about the source, including:
|
||||
- `title`: The title of the webpage.
|
||||
- `url`: The URL of the webpage.
|
||||
|
||||
### Error Handling
|
||||
|
||||
If an error occurs during the search process, the API will return an appropriate error message with an HTTP status code.
|
||||
|
||||
- **400**: If the request is malformed or missing required fields (e.g., no focus mode or query).
|
||||
- **500**: If an internal server error occurs during the search.
|
11
docs/architecture/README.md
Normal file
11
docs/architecture/README.md
Normal file
@ -0,0 +1,11 @@
|
||||
## Perplexica's Architecture
|
||||
|
||||
Perplexica's architecture consists of the following key components:
|
||||
|
||||
1. **User Interface**: A web-based interface that allows users to interact with Perplexica for searching images, videos, and much more.
|
||||
2. **Agent/Chains**: These components predict Perplexica's next actions, understand user queries, and decide whether a web search is necessary.
|
||||
3. **SearXNG**: A metadata search engine used by Perplexica to search the web for sources.
|
||||
4. **LLMs (Large Language Models)**: Utilized by agents and chains for tasks like understanding content, writing responses, and citing sources. Examples include Claude, GPTs, etc.
|
||||
5. **Embedding Models**: To improve the accuracy of search results, embedding models re-rank the results using similarity search algorithms such as cosine similarity and dot product distance.
|
||||
|
||||
For a more detailed explanation of how these components work together, see [WORKING.md](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/architecture/WORKING.md).
|
19
docs/architecture/WORKING.md
Normal file
19
docs/architecture/WORKING.md
Normal file
@ -0,0 +1,19 @@
|
||||
## How does Perplexica work?
|
||||
|
||||
Curious about how Perplexica works? Don't worry, we'll cover it here. Before we begin, make sure you've read about the architecture of Perplexica to ensure you understand what it's made up of. Haven't read it? You can read it [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/architecture/README.md).
|
||||
|
||||
We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows:
|
||||
|
||||
1. The message is sent via WS to the backend server where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
|
||||
2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources and searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started.
|
||||
3. The query returned by the first chain is passed to SearXNG to search the web for information.
|
||||
4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query.
|
||||
5. After all this is done, the sources are passed to the response generator. This chain takes all the chat history, the query, and the sources. It generates a response that is streamed to the UI.
|
||||
|
||||
### How are the answers cited?
|
||||
|
||||
The LLMs are prompted to do so. We've prompted them so well that they cite the answers themselves, and using some UI magic, we display it to the user.
|
||||
|
||||
### Image and Video Search
|
||||
|
||||
Image and video searches are conducted in a similar manner. A query is always generated first, then we search the web for images and videos that match the query. These results are then returned to the user.
|
109
docs/installation/NETWORKING.md
Normal file
109
docs/installation/NETWORKING.md
Normal file
@ -0,0 +1,109 @@
|
||||
# Expose Perplexica to a network
|
||||
|
||||
This guide will show you how to make Perplexica available over a network. Follow these steps to allow computers on the same network to interact with Perplexica. Choose the instructions that match the operating system you are using.
|
||||
|
||||
## Windows
|
||||
|
||||
1. Open PowerShell as Administrator
|
||||
|
||||
2. Navigate to the directory containing the `docker-compose.yaml` file
|
||||
|
||||
3. Stop and remove the existing Perplexica containers and images:
|
||||
|
||||
```
|
||||
docker compose down --rmi all
|
||||
```
|
||||
|
||||
4. Open the `docker-compose.yaml` file in a text editor like Notepad++
|
||||
|
||||
5. Replace `127.0.0.1` with the IP address of the server Perplexica is running on in these two lines:
|
||||
|
||||
```
|
||||
args:
|
||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
```
|
||||
|
||||
6. Save and close the `docker-compose.yaml` file
|
||||
|
||||
7. Rebuild and restart the Perplexica container:
|
||||
|
||||
```
|
||||
docker compose up -d --build
|
||||
```
|
||||
|
||||
## macOS
|
||||
|
||||
1. Open the Terminal application
|
||||
|
||||
2. Navigate to the directory with the `docker-compose.yaml` file:
|
||||
|
||||
```
|
||||
cd /path/to/docker-compose.yaml
|
||||
```
|
||||
|
||||
3. Stop and remove existing containers and images:
|
||||
|
||||
```
|
||||
docker compose down --rmi all
|
||||
```
|
||||
|
||||
4. Open `docker-compose.yaml` in a text editor like Sublime Text:
|
||||
|
||||
```
|
||||
nano docker-compose.yaml
|
||||
```
|
||||
|
||||
5. Replace `127.0.0.1` with the server IP in these lines:
|
||||
|
||||
```
|
||||
args:
|
||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
```
|
||||
|
||||
6. Save and exit the editor
|
||||
|
||||
7. Rebuild and restart Perplexica:
|
||||
|
||||
```
|
||||
docker compose up -d --build
|
||||
```
|
||||
|
||||
## Linux
|
||||
|
||||
1. Open the terminal
|
||||
|
||||
2. Navigate to the `docker-compose.yaml` directory:
|
||||
|
||||
```
|
||||
cd /path/to/docker-compose.yaml
|
||||
```
|
||||
|
||||
3. Stop and remove containers and images:
|
||||
|
||||
```
|
||||
docker compose down --rmi all
|
||||
```
|
||||
|
||||
4. Edit `docker-compose.yaml`:
|
||||
|
||||
```
|
||||
nano docker-compose.yaml
|
||||
```
|
||||
|
||||
5. Replace `127.0.0.1` with the server IP:
|
||||
|
||||
```
|
||||
args:
|
||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
```
|
||||
|
||||
6. Save and exit the editor
|
||||
|
||||
7. Rebuild and restart Perplexica:
|
||||
|
||||
```
|
||||
docker compose up -d --build
|
||||
```
|
40
docs/installation/UPDATING.md
Normal file
40
docs/installation/UPDATING.md
Normal file
@ -0,0 +1,40 @@
|
||||
# Update Perplexica to the latest version
|
||||
|
||||
To update Perplexica to the latest version, follow these steps:
|
||||
|
||||
## For Docker users
|
||||
|
||||
1. Clone the latest version of Perplexica from GitHub:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||
```
|
||||
|
||||
2. Navigate to the Project Directory.
|
||||
|
||||
3. Pull latest images from registry.
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
```
|
||||
|
||||
4. Update and Recreate containers.
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
5. Once the command completes running go to http://localhost:3000 and verify the latest changes.
|
||||
|
||||
## For non Docker users
|
||||
|
||||
1. Clone the latest version of Perplexica from GitHub:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||
```
|
||||
|
||||
2. Navigate to the Project Directory
|
||||
3. Execute `npm i` in both the `ui` folder and the root directory.
|
||||
4. Once packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
|
||||
5. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
|
10
drizzle.config.ts
Normal file
10
drizzle.config.ts
Normal file
@ -0,0 +1,10 @@
|
||||
import { defineConfig } from 'drizzle-kit';
|
||||
|
||||
export default defineConfig({
|
||||
dialect: 'sqlite',
|
||||
schema: './src/db/schema.ts',
|
||||
out: './drizzle',
|
||||
dbCredentials: {
|
||||
url: './data/db.sqlite',
|
||||
},
|
||||
});
|
27
package.json
27
package.json
@ -1,33 +1,52 @@
|
||||
{
|
||||
"name": "perplexica-backend",
|
||||
"version": "1.0.0",
|
||||
"version": "1.9.3",
|
||||
"license": "MIT",
|
||||
"author": "ItzCrazyKns",
|
||||
"scripts": {
|
||||
"start": "node --env-file=.env dist/app.js",
|
||||
"start": "npm run db:push && node dist/app.js",
|
||||
"build": "tsc",
|
||||
"dev": "nodemon -r dotenv/config src/app.ts",
|
||||
"dev": "nodemon --ignore uploads/ src/app.ts ",
|
||||
"db:push": "drizzle-kit push sqlite",
|
||||
"format": "prettier . --check",
|
||||
"format:write": "prettier . --write"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/better-sqlite3": "^7.6.10",
|
||||
"@types/cors": "^2.8.17",
|
||||
"@types/express": "^4.17.21",
|
||||
"@types/html-to-text": "^9.0.4",
|
||||
"@types/multer": "^1.4.12",
|
||||
"@types/pdf-parse": "^1.1.4",
|
||||
"@types/readable-stream": "^4.0.11",
|
||||
"@types/ws": "^8.5.12",
|
||||
"drizzle-kit": "^0.22.7",
|
||||
"nodemon": "^3.1.0",
|
||||
"prettier": "^3.2.5",
|
||||
"ts-node": "^10.9.2",
|
||||
"typescript": "^5.4.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"@langchain/anthropic": "^0.2.3",
|
||||
"@langchain/community": "^0.2.16",
|
||||
"@langchain/openai": "^0.0.25",
|
||||
"@xenova/transformers": "^2.17.1",
|
||||
"axios": "^1.6.8",
|
||||
"better-sqlite3": "^11.0.0",
|
||||
"compute-cosine-similarity": "^1.1.0",
|
||||
"compute-dot": "^1.1.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.4.5",
|
||||
"drizzle-orm": "^0.31.2",
|
||||
"express": "^4.19.2",
|
||||
"html-to-text": "^9.0.5",
|
||||
"langchain": "^0.1.30",
|
||||
"ws": "^8.16.0",
|
||||
"mammoth": "^1.8.0",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"winston": "^3.13.0",
|
||||
"ws": "^8.17.1",
|
||||
"zod": "^3.22.4"
|
||||
}
|
||||
}
|
||||
|
13
sample.config.toml
Normal file
13
sample.config.toml
Normal file
@ -0,0 +1,13 @@
|
||||
[GENERAL]
|
||||
PORT = 3001 # Port to run the server on
|
||||
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
|
||||
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
|
||||
|
||||
[API_KEYS]
|
||||
OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef
|
||||
GROQ = "" # Groq API key - gsk_1234567890abcdef1234567890abcdef
|
||||
ANTHROPIC = "" # Anthropic API key - sk-ant-1234567890abcdef1234567890abcdef
|
||||
|
||||
[API_ENDPOINTS]
|
||||
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
||||
OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434
|
2380
searxng-settings.yml
2380
searxng-settings.yml
File diff suppressed because it is too large
Load Diff
@ -1,3 +0,0 @@
|
||||
FROM searxng/searxng
|
||||
|
||||
COPY searxng-settings.yml /etc/searxng/settings.yml
|
3
searxng/limiter.toml
Normal file
3
searxng/limiter.toml
Normal file
@ -0,0 +1,3 @@
|
||||
[botdetection.ip_limit]
|
||||
# activate link_token method in the ip_limit method
|
||||
link_token = true
|
17
searxng/settings.yml
Normal file
17
searxng/settings.yml
Normal file
@ -0,0 +1,17 @@
|
||||
use_default_settings: true
|
||||
|
||||
general:
|
||||
instance_name: 'searxng'
|
||||
|
||||
search:
|
||||
autocomplete: 'google'
|
||||
formats:
|
||||
- html
|
||||
- json
|
||||
|
||||
server:
|
||||
secret_key: 'a2fb23f1b02e6ee83875b09826990de0f6bd908b6638e8c10277d415f6ab852b' # Is overwritten by ${SEARXNG_SECRET}
|
||||
|
||||
engines:
|
||||
- name: wolframalpha
|
||||
disabled: false
|
50
searxng/uwsgi.ini
Normal file
50
searxng/uwsgi.ini
Normal file
@ -0,0 +1,50 @@
|
||||
[uwsgi]
|
||||
# Who will run the code
|
||||
uid = searxng
|
||||
gid = searxng
|
||||
|
||||
# Number of workers (usually CPU count)
|
||||
# default value: %k (= number of CPU core, see Dockerfile)
|
||||
workers = %k
|
||||
|
||||
# Number of threads per worker
|
||||
# default value: 4 (see Dockerfile)
|
||||
threads = 4
|
||||
|
||||
# The right granted on the created socket
|
||||
chmod-socket = 666
|
||||
|
||||
# Plugin to use and interpreter config
|
||||
single-interpreter = true
|
||||
master = true
|
||||
plugin = python3
|
||||
lazy-apps = true
|
||||
enable-threads = 4
|
||||
|
||||
# Module to import
|
||||
module = searx.webapp
|
||||
|
||||
# Virtualenv and python path
|
||||
pythonpath = /usr/local/searxng/
|
||||
chdir = /usr/local/searxng/searx/
|
||||
|
||||
# automatically set processes name to something meaningful
|
||||
auto-procname = true
|
||||
|
||||
# Disable request logging for privacy
|
||||
disable-logging = true
|
||||
log-5xx = true
|
||||
|
||||
# Set the max size of a request (request-body excluded)
|
||||
buffer-size = 8192
|
||||
|
||||
# No keep alive
|
||||
# See https://github.com/searx/searx-docker/issues/24
|
||||
add-header = Connection: close
|
||||
|
||||
# uwsgi serves the static files
|
||||
static-map = /static=/usr/local/searxng/searx/static
|
||||
# expires set to one day
|
||||
static-expires = /* 86400
|
||||
static-gzip-all = True
|
||||
offload-threads = 4
|
@ -1,260 +0,0 @@
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import {
|
||||
PromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
} from '@langchain/core/prompts';
|
||||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
import type { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import eventEmitter from 'events';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const embeddings = new OllamaEmbeddings({
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicAcademicSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: How does stable diffusion work?
|
||||
Rephrased: Stable diffusion working
|
||||
|
||||
2. Follow up question: What is linear algebra?
|
||||
Rephrased: Linear algebra
|
||||
|
||||
3. Follow up question: What is the third law of thermodynamics?
|
||||
Rephrased: Third law of thermodynamics
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
const basicAcademicSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Acadedemic', this means you will be searching for academic papers and articles on the web.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Aything inside the following \`context\` HTML block provided below is for your knowledge returned by the search engine and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from a search engine and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const handleStream = async (
|
||||
stream: AsyncGenerator<StreamEvent, any, unknown>,
|
||||
emitter: eventEmitter,
|
||||
) => {
|
||||
for await (const event of stream) {
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalSourceRetriever'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'sources', data: event.data.output }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_stream' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'response', data: event.data.chunk }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit('end');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const processDocs = async (docs: Document[]) => {
|
||||
return docs
|
||||
.map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
|
||||
.join('\n');
|
||||
};
|
||||
|
||||
const rerankDocs = async ({
|
||||
query,
|
||||
docs,
|
||||
}: {
|
||||
query: string;
|
||||
docs: Document[];
|
||||
}) => {
|
||||
if (docs.length === 0) {
|
||||
return docs;
|
||||
}
|
||||
|
||||
const docsWithContent = docs.filter(
|
||||
(doc) => doc.pageContent && doc.pageContent.length > 0,
|
||||
);
|
||||
|
||||
const docEmbeddings = await embeddings.embedDocuments(
|
||||
docsWithContent.map((doc) => doc.pageContent),
|
||||
);
|
||||
|
||||
const queryEmbedding = await embeddings.embedQuery(query);
|
||||
|
||||
const similarity = docEmbeddings.map((docEmbedding, i) => {
|
||||
const sim = computeSimilarity(queryEmbedding, docEmbedding);
|
||||
|
||||
return {
|
||||
index: i,
|
||||
similarity: sim,
|
||||
};
|
||||
});
|
||||
|
||||
const sortedDocs = similarity
|
||||
.sort((a, b) => b.similarity - a.similarity)
|
||||
.slice(0, 15)
|
||||
.map((sim) => docsWithContent[sim.index]);
|
||||
|
||||
return sortedDocs;
|
||||
};
|
||||
|
||||
type BasicChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const basicAcademicSearchRetrieverChain = RunnableSequence.from([
|
||||
PromptTemplate.fromTemplate(basicAcademicSearchRetrieverPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
if (input === 'not_needed') {
|
||||
return { query: '', docs: [] };
|
||||
}
|
||||
|
||||
const res = await searchSearxng(input, {
|
||||
language: 'en',
|
||||
engines: [
|
||||
'arxiv',
|
||||
'google_scholar',
|
||||
'internet_archive_scholar',
|
||||
'pubmed',
|
||||
],
|
||||
});
|
||||
|
||||
const documents = res.results.map(
|
||||
(result) =>
|
||||
new Document({
|
||||
pageContent: result.content,
|
||||
metadata: {
|
||||
title: result.title,
|
||||
url: result.url,
|
||||
...(result.img_src && { img_src: result.img_src }),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
return { query: input, docs: documents };
|
||||
}),
|
||||
]);
|
||||
|
||||
const basicAcademicSearchAnsweringChain = RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
query: (input: BasicChainInput) => input.query,
|
||||
chat_history: (input: BasicChainInput) => input.chat_history,
|
||||
context: RunnableSequence.from([
|
||||
(input) => ({
|
||||
query: input.query,
|
||||
chat_history: formatChatHistoryAsString(input.chat_history),
|
||||
}),
|
||||
basicAcademicSearchRetrieverChain
|
||||
.pipe(rerankDocs)
|
||||
.withConfig({
|
||||
runName: 'FinalSourceRetriever',
|
||||
})
|
||||
.pipe(processDocs),
|
||||
]),
|
||||
}),
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', basicAcademicSearchResponsePrompt],
|
||||
new MessagesPlaceholder('chat_history'),
|
||||
['user', '{query}'],
|
||||
]),
|
||||
chatLLM,
|
||||
strParser,
|
||||
]).withConfig({
|
||||
runName: 'FinalResponseGenerator',
|
||||
});
|
||||
|
||||
const basicAcademicSearch = (query: string, history: BaseMessage[]) => {
|
||||
const emitter = new eventEmitter();
|
||||
|
||||
try {
|
||||
const stream = basicAcademicSearchAnsweringChain.streamEvents(
|
||||
{
|
||||
chat_history: history,
|
||||
query: query,
|
||||
},
|
||||
{
|
||||
version: 'v1',
|
||||
},
|
||||
);
|
||||
|
||||
handleStream(stream, emitter);
|
||||
} catch (err) {
|
||||
emitter.emit(
|
||||
'error',
|
||||
JSON.stringify({ data: 'An error has occurred please try again later' }),
|
||||
);
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
return emitter;
|
||||
};
|
||||
|
||||
const handleAcademicSearch = (message: string, history: BaseMessage[]) => {
|
||||
const emitter = basicAcademicSearch(message, history);
|
||||
return emitter;
|
||||
};
|
||||
|
||||
export default handleAcademicSearch;
|
@ -1,81 +0,0 @@
|
||||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const imageSearchChainPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
|
||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||
|
||||
Example:
|
||||
1. Follow up question: What is a cat?
|
||||
Rephrased: A cat
|
||||
|
||||
2. Follow up question: What is a car? How does it works?
|
||||
Rephrased: Car working
|
||||
|
||||
3. Follow up question: How does an AC work?
|
||||
Rephrased: AC working
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
type ImageSearchChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const imageSearchChain = RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
chat_history: (input: ImageSearchChainInput) => {
|
||||
return formatChatHistoryAsString(input.chat_history);
|
||||
},
|
||||
query: (input: ImageSearchChainInput) => {
|
||||
return input.query;
|
||||
},
|
||||
}),
|
||||
PromptTemplate.fromTemplate(imageSearchChainPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
const res = await searchSearxng(input, {
|
||||
categories: ['images'],
|
||||
engines: ['bing_images', 'google_images'],
|
||||
});
|
||||
|
||||
const images = [];
|
||||
|
||||
res.results.forEach((result) => {
|
||||
if (result.img_src && result.url && result.title) {
|
||||
images.push({
|
||||
img_src: result.img_src,
|
||||
url: result.url,
|
||||
title: result.title,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return images.slice(0, 10);
|
||||
}),
|
||||
]);
|
||||
|
||||
export default imageSearchChain;
|
@ -1,256 +0,0 @@
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import {
|
||||
PromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
} from '@langchain/core/prompts';
|
||||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
import type { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import eventEmitter from 'events';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const embeddings = new OllamaEmbeddings({
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicRedditSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: Which company is most likely to create an AGI
|
||||
Rephrased: Which company is most likely to create an AGI
|
||||
|
||||
2. Follow up question: Is Earth flat?
|
||||
Rephrased: Is Earth flat?
|
||||
|
||||
3. Follow up question: Is there life on Mars?
|
||||
Rephrased: Is there life on Mars?
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
const basicRedditSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Aything inside the following \`context\` HTML block provided below is for your knowledge returned by Reddit and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from Reddit and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const handleStream = async (
|
||||
stream: AsyncGenerator<StreamEvent, any, unknown>,
|
||||
emitter: eventEmitter,
|
||||
) => {
|
||||
for await (const event of stream) {
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalSourceRetriever'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'sources', data: event.data.output }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_stream' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'response', data: event.data.chunk }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit('end');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const processDocs = async (docs: Document[]) => {
|
||||
return docs
|
||||
.map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
|
||||
.join('\n');
|
||||
};
|
||||
|
||||
const rerankDocs = async ({
|
||||
query,
|
||||
docs,
|
||||
}: {
|
||||
query: string;
|
||||
docs: Document[];
|
||||
}) => {
|
||||
if (docs.length === 0) {
|
||||
return docs;
|
||||
}
|
||||
|
||||
const docsWithContent = docs.filter(
|
||||
(doc) => doc.pageContent && doc.pageContent.length > 0,
|
||||
);
|
||||
|
||||
const docEmbeddings = await embeddings.embedDocuments(
|
||||
docsWithContent.map((doc) => doc.pageContent),
|
||||
);
|
||||
|
||||
const queryEmbedding = await embeddings.embedQuery(query);
|
||||
|
||||
const similarity = docEmbeddings.map((docEmbedding, i) => {
|
||||
const sim = computeSimilarity(queryEmbedding, docEmbedding);
|
||||
|
||||
return {
|
||||
index: i,
|
||||
similarity: sim,
|
||||
};
|
||||
});
|
||||
|
||||
const sortedDocs = similarity
|
||||
.sort((a, b) => b.similarity - a.similarity)
|
||||
.slice(0, 15)
|
||||
.filter((sim) => sim.similarity > 0.3)
|
||||
.map((sim) => docsWithContent[sim.index]);
|
||||
|
||||
return sortedDocs;
|
||||
};
|
||||
|
||||
type BasicChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const basicRedditSearchRetrieverChain = RunnableSequence.from([
|
||||
PromptTemplate.fromTemplate(basicRedditSearchRetrieverPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
if (input === 'not_needed') {
|
||||
return { query: '', docs: [] };
|
||||
}
|
||||
|
||||
const res = await searchSearxng(input, {
|
||||
language: 'en',
|
||||
engines: ['reddit'],
|
||||
});
|
||||
|
||||
const documents = res.results.map(
|
||||
(result) =>
|
||||
new Document({
|
||||
pageContent: result.content ? result.content : result.title,
|
||||
metadata: {
|
||||
title: result.title,
|
||||
url: result.url,
|
||||
...(result.img_src && { img_src: result.img_src }),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
return { query: input, docs: documents };
|
||||
}),
|
||||
]);
|
||||
|
||||
const basicRedditSearchAnsweringChain = RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
query: (input: BasicChainInput) => input.query,
|
||||
chat_history: (input: BasicChainInput) => input.chat_history,
|
||||
context: RunnableSequence.from([
|
||||
(input) => ({
|
||||
query: input.query,
|
||||
chat_history: formatChatHistoryAsString(input.chat_history),
|
||||
}),
|
||||
basicRedditSearchRetrieverChain
|
||||
.pipe(rerankDocs)
|
||||
.withConfig({
|
||||
runName: 'FinalSourceRetriever',
|
||||
})
|
||||
.pipe(processDocs),
|
||||
]),
|
||||
}),
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', basicRedditSearchResponsePrompt],
|
||||
new MessagesPlaceholder('chat_history'),
|
||||
['user', '{query}'],
|
||||
]),
|
||||
chatLLM,
|
||||
strParser,
|
||||
]).withConfig({
|
||||
runName: 'FinalResponseGenerator',
|
||||
});
|
||||
|
||||
const basicRedditSearch = (query: string, history: BaseMessage[]) => {
|
||||
const emitter = new eventEmitter();
|
||||
|
||||
try {
|
||||
const stream = basicRedditSearchAnsweringChain.streamEvents(
|
||||
{
|
||||
chat_history: history,
|
||||
query: query,
|
||||
},
|
||||
{
|
||||
version: 'v1',
|
||||
},
|
||||
);
|
||||
|
||||
handleStream(stream, emitter);
|
||||
} catch (err) {
|
||||
emitter.emit(
|
||||
'error',
|
||||
JSON.stringify({ data: 'An error has occurred please try again later' }),
|
||||
);
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
return emitter;
|
||||
};
|
||||
|
||||
const handleRedditSearch = (message: string, history: BaseMessage[]) => {
|
||||
const emitter = basicRedditSearch(message, history);
|
||||
return emitter;
|
||||
};
|
||||
|
||||
export default handleRedditSearch;
|
@ -1,255 +0,0 @@
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import {
|
||||
PromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
} from '@langchain/core/prompts';
|
||||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
import type { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import eventEmitter from 'events';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const embeddings = new OllamaEmbeddings({
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: What is the capital of France?
|
||||
Rephrased: Capital of france
|
||||
|
||||
2. Follow up question: What is the population of New York City?
|
||||
Rephrased: Population of New York City
|
||||
|
||||
3. Follow up question: What is Docker?
|
||||
Rephrased: What is Docker
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
const basicWebSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Aything inside the following \`context\` HTML block provided below is for your knowledge returned by the search engine and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from a search engine and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const handleStream = async (
|
||||
stream: AsyncGenerator<StreamEvent, any, unknown>,
|
||||
emitter: eventEmitter,
|
||||
) => {
|
||||
for await (const event of stream) {
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalSourceRetriever'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'sources', data: event.data.output }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_stream' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'response', data: event.data.chunk }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit('end');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const processDocs = async (docs: Document[]) => {
|
||||
return docs
|
||||
.map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
|
||||
.join('\n');
|
||||
};
|
||||
|
||||
const rerankDocs = async ({
|
||||
query,
|
||||
docs,
|
||||
}: {
|
||||
query: string;
|
||||
docs: Document[];
|
||||
}) => {
|
||||
if (docs.length === 0) {
|
||||
return docs;
|
||||
}
|
||||
|
||||
const docsWithContent = docs.filter(
|
||||
(doc) => doc.pageContent && doc.pageContent.length > 0,
|
||||
);
|
||||
|
||||
const docEmbeddings = await embeddings.embedDocuments(
|
||||
docsWithContent.map((doc) => doc.pageContent),
|
||||
);
|
||||
|
||||
const queryEmbedding = await embeddings.embedQuery(query);
|
||||
|
||||
const similarity = docEmbeddings.map((docEmbedding, i) => {
|
||||
const sim = computeSimilarity(queryEmbedding, docEmbedding);
|
||||
|
||||
return {
|
||||
index: i,
|
||||
similarity: sim,
|
||||
};
|
||||
});
|
||||
|
||||
const sortedDocs = similarity
|
||||
.sort((a, b) => b.similarity - a.similarity)
|
||||
.filter((sim) => sim.similarity > 0.5)
|
||||
.slice(0, 15)
|
||||
.map((sim) => docsWithContent[sim.index]);
|
||||
|
||||
return sortedDocs;
|
||||
};
|
||||
|
||||
type BasicChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const basicWebSearchRetrieverChain = RunnableSequence.from([
|
||||
PromptTemplate.fromTemplate(basicSearchRetrieverPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
if (input === 'not_needed') {
|
||||
return { query: '', docs: [] };
|
||||
}
|
||||
|
||||
const res = await searchSearxng(input, {
|
||||
language: 'en',
|
||||
});
|
||||
|
||||
const documents = res.results.map(
|
||||
(result) =>
|
||||
new Document({
|
||||
pageContent: result.content,
|
||||
metadata: {
|
||||
title: result.title,
|
||||
url: result.url,
|
||||
...(result.img_src && { img_src: result.img_src }),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
return { query: input, docs: documents };
|
||||
}),
|
||||
]);
|
||||
|
||||
const basicWebSearchAnsweringChain = RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
query: (input: BasicChainInput) => input.query,
|
||||
chat_history: (input: BasicChainInput) => input.chat_history,
|
||||
context: RunnableSequence.from([
|
||||
(input) => ({
|
||||
query: input.query,
|
||||
chat_history: formatChatHistoryAsString(input.chat_history),
|
||||
}),
|
||||
basicWebSearchRetrieverChain
|
||||
.pipe(rerankDocs)
|
||||
.withConfig({
|
||||
runName: 'FinalSourceRetriever',
|
||||
})
|
||||
.pipe(processDocs),
|
||||
]),
|
||||
}),
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', basicWebSearchResponsePrompt],
|
||||
new MessagesPlaceholder('chat_history'),
|
||||
['user', '{query}'],
|
||||
]),
|
||||
chatLLM,
|
||||
strParser,
|
||||
]).withConfig({
|
||||
runName: 'FinalResponseGenerator',
|
||||
});
|
||||
|
||||
const basicWebSearch = (query: string, history: BaseMessage[]) => {
|
||||
const emitter = new eventEmitter();
|
||||
|
||||
try {
|
||||
const stream = basicWebSearchAnsweringChain.streamEvents(
|
||||
{
|
||||
chat_history: history,
|
||||
query: query,
|
||||
},
|
||||
{
|
||||
version: 'v1',
|
||||
},
|
||||
);
|
||||
|
||||
handleStream(stream, emitter);
|
||||
} catch (err) {
|
||||
emitter.emit(
|
||||
'error',
|
||||
JSON.stringify({ data: 'An error has occurred please try again later' }),
|
||||
);
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
return emitter;
|
||||
};
|
||||
|
||||
const handleWebSearch = (message: string, history: BaseMessage[]) => {
|
||||
const emitter = basicWebSearch(message, history);
|
||||
return emitter;
|
||||
};
|
||||
|
||||
export default handleWebSearch;
|
@ -1,212 +0,0 @@
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import {
|
||||
PromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
} from '@langchain/core/prompts';
|
||||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
import type { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import eventEmitter from 'events';
|
||||
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicWolframAlphaSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: What is the atomic radius of S?
|
||||
Rephrased: Atomic radius of S
|
||||
|
||||
2. Follow up question: What is linear algebra?
|
||||
Rephrased: Linear algebra
|
||||
|
||||
3. Follow up question: What is the third law of thermodynamics?
|
||||
Rephrased: Third law of thermodynamics
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
const basicWolframAlphaSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Aything inside the following \`context\` HTML block provided below is for your knowledge returned by Wolfram Alpha and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from Wolfram Alpha and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const handleStream = async (
|
||||
stream: AsyncGenerator<StreamEvent, any, unknown>,
|
||||
emitter: eventEmitter,
|
||||
) => {
|
||||
for await (const event of stream) {
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalSourceRetriever'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'sources', data: event.data.output }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_stream' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'response', data: event.data.chunk }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit('end');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const processDocs = async (docs: Document[]) => {
|
||||
return docs
|
||||
.map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
|
||||
.join('\n');
|
||||
};
|
||||
|
||||
type BasicChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const basicWolframAlphaSearchRetrieverChain = RunnableSequence.from([
|
||||
PromptTemplate.fromTemplate(basicWolframAlphaSearchRetrieverPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
if (input === 'not_needed') {
|
||||
return { query: '', docs: [] };
|
||||
}
|
||||
|
||||
const res = await searchSearxng(input, {
|
||||
language: 'en',
|
||||
engines: ['wolframalpha'],
|
||||
});
|
||||
|
||||
const documents = res.results.map(
|
||||
(result) =>
|
||||
new Document({
|
||||
pageContent: result.content,
|
||||
metadata: {
|
||||
title: result.title,
|
||||
url: result.url,
|
||||
...(result.img_src && { img_src: result.img_src }),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
return { query: input, docs: documents };
|
||||
}),
|
||||
]);
|
||||
|
||||
const basicWolframAlphaSearchAnsweringChain = RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
query: (input: BasicChainInput) => input.query,
|
||||
chat_history: (input: BasicChainInput) => input.chat_history,
|
||||
context: RunnableSequence.from([
|
||||
(input) => ({
|
||||
query: input.query,
|
||||
chat_history: formatChatHistoryAsString(input.chat_history),
|
||||
}),
|
||||
basicWolframAlphaSearchRetrieverChain
|
||||
.pipe(({ query, docs }) => {
|
||||
return docs;
|
||||
})
|
||||
.withConfig({
|
||||
runName: 'FinalSourceRetriever',
|
||||
})
|
||||
.pipe(processDocs),
|
||||
]),
|
||||
}),
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', basicWolframAlphaSearchResponsePrompt],
|
||||
new MessagesPlaceholder('chat_history'),
|
||||
['user', '{query}'],
|
||||
]),
|
||||
chatLLM,
|
||||
strParser,
|
||||
]).withConfig({
|
||||
runName: 'FinalResponseGenerator',
|
||||
});
|
||||
|
||||
const basicWolframAlphaSearch = (query: string, history: BaseMessage[]) => {
|
||||
const emitter = new eventEmitter();
|
||||
|
||||
try {
|
||||
const stream = basicWolframAlphaSearchAnsweringChain.streamEvents(
|
||||
{
|
||||
chat_history: history,
|
||||
query: query,
|
||||
},
|
||||
{
|
||||
version: 'v1',
|
||||
},
|
||||
);
|
||||
|
||||
handleStream(stream, emitter);
|
||||
} catch (err) {
|
||||
emitter.emit(
|
||||
'error',
|
||||
JSON.stringify({ data: 'An error has occurred please try again later' }),
|
||||
);
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
return emitter;
|
||||
};
|
||||
|
||||
const handleWolframAlphaSearch = (message: string, history: BaseMessage[]) => {
|
||||
const emitter = basicWolframAlphaSearch(message, history);
|
||||
return emitter;
|
||||
};
|
||||
|
||||
export default handleWolframAlphaSearch;
|
@ -1,86 +0,0 @@
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import {
|
||||
ChatPromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
} from '@langchain/core/prompts';
|
||||
import { RunnableSequence } from '@langchain/core/runnables';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import type { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import eventEmitter from 'events';
|
||||
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const writingAssistantPrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Writing Assistant', this means you will be helping the user write a response to a given query.
|
||||
Since you are a writing assistant, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode.
|
||||
`;
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const handleStream = async (
|
||||
stream: AsyncGenerator<StreamEvent, any, unknown>,
|
||||
emitter: eventEmitter,
|
||||
) => {
|
||||
for await (const event of stream) {
|
||||
if (
|
||||
event.event === 'on_chain_stream' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'response', data: event.data.chunk }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit('end');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const writingAssistantChain = RunnableSequence.from([
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', writingAssistantPrompt],
|
||||
new MessagesPlaceholder('chat_history'),
|
||||
['user', '{query}'],
|
||||
]),
|
||||
chatLLM,
|
||||
strParser,
|
||||
]).withConfig({
|
||||
runName: 'FinalResponseGenerator',
|
||||
});
|
||||
|
||||
const handleWritingAssistant = (query: string, history: BaseMessage[]) => {
|
||||
const emitter = new eventEmitter();
|
||||
|
||||
try {
|
||||
const stream = writingAssistantChain.streamEvents(
|
||||
{
|
||||
chat_history: history,
|
||||
query: query,
|
||||
},
|
||||
{
|
||||
version: 'v1',
|
||||
},
|
||||
);
|
||||
|
||||
handleStream(stream, emitter);
|
||||
} catch (err) {
|
||||
emitter.emit(
|
||||
'error',
|
||||
JSON.stringify({ data: 'An error has occurred please try again later' }),
|
||||
);
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
return emitter;
|
||||
};
|
||||
|
||||
export default handleWritingAssistant;
|
@ -1,256 +0,0 @@
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import {
|
||||
PromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
} from '@langchain/core/prompts';
|
||||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
import type { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import eventEmitter from 'events';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const embeddings = new OllamaEmbeddings({
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicYoutubeSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: How does an A.C work?
|
||||
Rephrased: A.C working
|
||||
|
||||
2. Follow up question: Linear algebra explanation video
|
||||
Rephrased: What is linear algebra?
|
||||
|
||||
3. Follow up question: What is theory of relativity?
|
||||
Rephrased: What is theory of relativity?
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
const basicYoutubeSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcript.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containg a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Aything inside the following \`context\` HTML block provided below is for your knowledge returned by Youtube and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from Youtube and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const handleStream = async (
|
||||
stream: AsyncGenerator<StreamEvent, any, unknown>,
|
||||
emitter: eventEmitter,
|
||||
) => {
|
||||
for await (const event of stream) {
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalSourceRetriever'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'sources', data: event.data.output }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_stream' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'response', data: event.data.chunk }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit('end');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const processDocs = async (docs: Document[]) => {
|
||||
return docs
|
||||
.map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
|
||||
.join('\n');
|
||||
};
|
||||
|
||||
const rerankDocs = async ({
|
||||
query,
|
||||
docs,
|
||||
}: {
|
||||
query: string;
|
||||
docs: Document[];
|
||||
}) => {
|
||||
if (docs.length === 0) {
|
||||
return docs;
|
||||
}
|
||||
|
||||
const docsWithContent = docs.filter(
|
||||
(doc) => doc.pageContent && doc.pageContent.length > 0,
|
||||
);
|
||||
|
||||
const docEmbeddings = await embeddings.embedDocuments(
|
||||
docsWithContent.map((doc) => doc.pageContent),
|
||||
);
|
||||
|
||||
const queryEmbedding = await embeddings.embedQuery(query);
|
||||
|
||||
const similarity = docEmbeddings.map((docEmbedding, i) => {
|
||||
const sim = computeSimilarity(queryEmbedding, docEmbedding);
|
||||
|
||||
return {
|
||||
index: i,
|
||||
similarity: sim,
|
||||
};
|
||||
});
|
||||
|
||||
const sortedDocs = similarity
|
||||
.sort((a, b) => b.similarity - a.similarity)
|
||||
.slice(0, 15)
|
||||
.filter((sim) => sim.similarity > 0.3)
|
||||
.map((sim) => docsWithContent[sim.index]);
|
||||
|
||||
return sortedDocs;
|
||||
};
|
||||
|
||||
type BasicChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const basicYoutubeSearchRetrieverChain = RunnableSequence.from([
|
||||
PromptTemplate.fromTemplate(basicYoutubeSearchRetrieverPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
if (input === 'not_needed') {
|
||||
return { query: '', docs: [] };
|
||||
}
|
||||
|
||||
const res = await searchSearxng(input, {
|
||||
language: 'en',
|
||||
engines: ['youtube'],
|
||||
});
|
||||
|
||||
const documents = res.results.map(
|
||||
(result) =>
|
||||
new Document({
|
||||
pageContent: result.content ? result.content : result.title,
|
||||
metadata: {
|
||||
title: result.title,
|
||||
url: result.url,
|
||||
...(result.img_src && { img_src: result.img_src }),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
return { query: input, docs: documents };
|
||||
}),
|
||||
]);
|
||||
|
||||
const basicYoutubeSearchAnsweringChain = RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
query: (input: BasicChainInput) => input.query,
|
||||
chat_history: (input: BasicChainInput) => input.chat_history,
|
||||
context: RunnableSequence.from([
|
||||
(input) => ({
|
||||
query: input.query,
|
||||
chat_history: formatChatHistoryAsString(input.chat_history),
|
||||
}),
|
||||
basicYoutubeSearchRetrieverChain
|
||||
.pipe(rerankDocs)
|
||||
.withConfig({
|
||||
runName: 'FinalSourceRetriever',
|
||||
})
|
||||
.pipe(processDocs),
|
||||
]),
|
||||
}),
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', basicYoutubeSearchResponsePrompt],
|
||||
new MessagesPlaceholder('chat_history'),
|
||||
['user', '{query}'],
|
||||
]),
|
||||
chatLLM,
|
||||
strParser,
|
||||
]).withConfig({
|
||||
runName: 'FinalResponseGenerator',
|
||||
});
|
||||
|
||||
const basicYoutubeSearch = (query: string, history: BaseMessage[]) => {
|
||||
const emitter = new eventEmitter();
|
||||
|
||||
try {
|
||||
const stream = basicYoutubeSearchAnsweringChain.streamEvents(
|
||||
{
|
||||
chat_history: history,
|
||||
query: query,
|
||||
},
|
||||
{
|
||||
version: 'v1',
|
||||
},
|
||||
);
|
||||
|
||||
handleStream(stream, emitter);
|
||||
} catch (err) {
|
||||
emitter.emit(
|
||||
'error',
|
||||
JSON.stringify({ data: 'An error has occurred please try again later' }),
|
||||
);
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
return emitter;
|
||||
};
|
||||
|
||||
const handleYoutubeSearch = (message: string, history: BaseMessage[]) => {
|
||||
const emitter = basicYoutubeSearch(message, history);
|
||||
return emitter;
|
||||
};
|
||||
|
||||
export default handleYoutubeSearch;
|
16
src/app.ts
16
src/app.ts
@ -3,6 +3,10 @@ import express from 'express';
|
||||
import cors from 'cors';
|
||||
import http from 'http';
|
||||
import routes from './routes';
|
||||
import { getPort } from './config';
|
||||
import logger from './utils/logger';
|
||||
|
||||
const port = getPort();
|
||||
|
||||
const app = express();
|
||||
const server = http.createServer(app);
|
||||
@ -19,8 +23,16 @@ app.get('/api', (_, res) => {
|
||||
res.status(200).json({ status: 'ok' });
|
||||
});
|
||||
|
||||
server.listen(process.env.PORT!, () => {
|
||||
console.log(`API server started on port ${process.env.PORT}`);
|
||||
server.listen(port, () => {
|
||||
logger.info(`Server is running on port ${port}`);
|
||||
});
|
||||
|
||||
startWebSocketServer(server);
|
||||
|
||||
process.on('uncaughtException', (err, origin) => {
|
||||
logger.error(`Uncaught Exception at ${origin}: ${err}`);
|
||||
});
|
||||
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
|
||||
});
|
||||
|
84
src/chains/imageSearchAgent.ts
Normal file
84
src/chains/imageSearchAgent.ts
Normal file
@ -0,0 +1,84 @@
|
||||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const imageSearchChainPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
|
||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||
|
||||
Example:
|
||||
1. Follow up question: What is a cat?
|
||||
Rephrased: A cat
|
||||
|
||||
2. Follow up question: What is a car? How does it works?
|
||||
Rephrased: Car working
|
||||
|
||||
3. Follow up question: How does an AC work?
|
||||
Rephrased: AC working
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
type ImageSearchChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const createImageSearchChain = (llm: BaseChatModel) => {
|
||||
return RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
chat_history: (input: ImageSearchChainInput) => {
|
||||
return formatChatHistoryAsString(input.chat_history);
|
||||
},
|
||||
query: (input: ImageSearchChainInput) => {
|
||||
return input.query;
|
||||
},
|
||||
}),
|
||||
PromptTemplate.fromTemplate(imageSearchChainPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
const res = await searchSearxng(input, {
|
||||
engines: ['bing images', 'google images'],
|
||||
});
|
||||
|
||||
const images = [];
|
||||
|
||||
res.results.forEach((result) => {
|
||||
if (result.img_src && result.url && result.title) {
|
||||
images.push({
|
||||
img_src: result.img_src,
|
||||
url: result.url,
|
||||
title: result.title,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return images.slice(0, 10);
|
||||
}),
|
||||
]);
|
||||
};
|
||||
|
||||
const handleImageSearch = (
|
||||
input: ImageSearchChainInput,
|
||||
llm: BaseChatModel,
|
||||
) => {
|
||||
const imageSearchChain = createImageSearchChain(llm);
|
||||
return imageSearchChain.invoke(input);
|
||||
};
|
||||
|
||||
export default handleImageSearch;
|
55
src/chains/suggestionGeneratorAgent.ts
Normal file
55
src/chains/suggestionGeneratorAgent.ts
Normal file
@ -0,0 +1,55 @@
|
||||
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
|
||||
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
|
||||
const suggestionGeneratorPrompt = `
|
||||
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
|
||||
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
|
||||
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
|
||||
|
||||
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:
|
||||
|
||||
<suggestions>
|
||||
Tell me more about SpaceX and their recent projects
|
||||
What is the latest news on SpaceX?
|
||||
Who is the CEO of SpaceX?
|
||||
</suggestions>
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
`;
|
||||
|
||||
type SuggestionGeneratorInput = {
|
||||
chat_history: BaseMessage[];
|
||||
};
|
||||
|
||||
const outputParser = new ListLineOutputParser({
|
||||
key: 'suggestions',
|
||||
});
|
||||
|
||||
const createSuggestionGeneratorChain = (llm: BaseChatModel) => {
|
||||
return RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
chat_history: (input: SuggestionGeneratorInput) =>
|
||||
formatChatHistoryAsString(input.chat_history),
|
||||
}),
|
||||
PromptTemplate.fromTemplate(suggestionGeneratorPrompt),
|
||||
llm,
|
||||
outputParser,
|
||||
]);
|
||||
};
|
||||
|
||||
const generateSuggestions = (
|
||||
input: SuggestionGeneratorInput,
|
||||
llm: BaseChatModel,
|
||||
) => {
|
||||
(llm as unknown as ChatOpenAI).temperature = 0;
|
||||
const suggestionGeneratorChain = createSuggestionGeneratorChain(llm);
|
||||
return suggestionGeneratorChain.invoke(input);
|
||||
};
|
||||
|
||||
export default generateSuggestions;
|
90
src/chains/videoSearchAgent.ts
Normal file
90
src/chains/videoSearchAgent.ts
Normal file
@ -0,0 +1,90 @@
|
||||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const VideoSearchChainPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
|
||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||
|
||||
Example:
|
||||
1. Follow up question: How does a car work?
|
||||
Rephrased: How does a car work?
|
||||
|
||||
2. Follow up question: What is the theory of relativity?
|
||||
Rephrased: What is theory of relativity
|
||||
|
||||
3. Follow up question: How does an AC work?
|
||||
Rephrased: How does an AC work
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
type VideoSearchChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const createVideoSearchChain = (llm: BaseChatModel) => {
|
||||
return RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
chat_history: (input: VideoSearchChainInput) => {
|
||||
return formatChatHistoryAsString(input.chat_history);
|
||||
},
|
||||
query: (input: VideoSearchChainInput) => {
|
||||
return input.query;
|
||||
},
|
||||
}),
|
||||
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
const res = await searchSearxng(input, {
|
||||
engines: ['youtube'],
|
||||
});
|
||||
|
||||
const videos = [];
|
||||
|
||||
res.results.forEach((result) => {
|
||||
if (
|
||||
result.thumbnail &&
|
||||
result.url &&
|
||||
result.title &&
|
||||
result.iframe_src
|
||||
) {
|
||||
videos.push({
|
||||
img_src: result.thumbnail,
|
||||
url: result.url,
|
||||
title: result.title,
|
||||
iframe_src: result.iframe_src,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return videos.slice(0, 10);
|
||||
}),
|
||||
]);
|
||||
};
|
||||
|
||||
const handleVideoSearch = (
|
||||
input: VideoSearchChainInput,
|
||||
llm: BaseChatModel,
|
||||
) => {
|
||||
const VideoSearchChain = createVideoSearchChain(llm);
|
||||
return VideoSearchChain.invoke(input);
|
||||
};
|
||||
|
||||
export default handleVideoSearch;
|
76
src/config.ts
Normal file
76
src/config.ts
Normal file
@ -0,0 +1,76 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import toml from '@iarna/toml';
|
||||
|
||||
const configFileName = 'config.toml';
|
||||
|
||||
interface Config {
|
||||
GENERAL: {
|
||||
PORT: number;
|
||||
SIMILARITY_MEASURE: string;
|
||||
KEEP_ALIVE: string;
|
||||
};
|
||||
API_KEYS: {
|
||||
OPENAI: string;
|
||||
GROQ: string;
|
||||
ANTHROPIC: string;
|
||||
};
|
||||
API_ENDPOINTS: {
|
||||
SEARXNG: string;
|
||||
OLLAMA: string;
|
||||
};
|
||||
}
|
||||
|
||||
type RecursivePartial<T> = {
|
||||
[P in keyof T]?: RecursivePartial<T[P]>;
|
||||
};
|
||||
|
||||
const loadConfig = () =>
|
||||
toml.parse(
|
||||
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
|
||||
) as any as Config;
|
||||
|
||||
export const getPort = () => loadConfig().GENERAL.PORT;
|
||||
|
||||
export const getSimilarityMeasure = () =>
|
||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||
|
||||
export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE;
|
||||
|
||||
export const getOpenaiApiKey = () => loadConfig().API_KEYS.OPENAI;
|
||||
|
||||
export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ;
|
||||
|
||||
export const getAnthropicApiKey = () => loadConfig().API_KEYS.ANTHROPIC;
|
||||
|
||||
export const getSearxngApiEndpoint = () =>
|
||||
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
|
||||
|
||||
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
|
||||
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
|
||||
for (const key in currentConfig) {
|
||||
if (!config[key]) config[key] = {};
|
||||
|
||||
if (typeof currentConfig[key] === 'object' && currentConfig[key] !== null) {
|
||||
for (const nestedKey in currentConfig[key]) {
|
||||
if (
|
||||
!config[key][nestedKey] &&
|
||||
currentConfig[key][nestedKey] &&
|
||||
config[key][nestedKey] !== ''
|
||||
) {
|
||||
config[key][nestedKey] = currentConfig[key][nestedKey];
|
||||
}
|
||||
}
|
||||
} else if (currentConfig[key] && config[key] !== '') {
|
||||
config[key] = currentConfig[key];
|
||||
}
|
||||
}
|
||||
|
||||
fs.writeFileSync(
|
||||
path.join(__dirname, `../${configFileName}`),
|
||||
toml.stringify(config),
|
||||
);
|
||||
};
|
@ -1,69 +0,0 @@
|
||||
import { z } from 'zod';
|
||||
import { OpenAI } from '@langchain/openai';
|
||||
import { RunnableSequence } from '@langchain/core/runnables';
|
||||
import { StructuredOutputParser } from 'langchain/output_parsers';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
|
||||
const availableAgents = [
|
||||
{
|
||||
name: 'webSearch',
|
||||
description:
|
||||
'It is expert is searching the web for information and answer user queries',
|
||||
},
|
||||
/* {
|
||||
name: 'academicSearch',
|
||||
description:
|
||||
'It is expert is searching the academic databases for information and answer user queries. It is particularly good at finding research papers and articles on topics like science, engineering, and technology. Use this instead of wolframAlphaSearch if the user query is not mathematical or scientific in nature',
|
||||
},
|
||||
{
|
||||
name: 'youtubeSearch',
|
||||
description:
|
||||
'This model is expert at finding videos on youtube based on user queries',
|
||||
},
|
||||
{
|
||||
name: 'wolframAlphaSearch',
|
||||
description:
|
||||
'This model is expert at finding answers to mathematical and scientific questions based on user queries.',
|
||||
},
|
||||
{
|
||||
name: 'redditSearch',
|
||||
description:
|
||||
'This model is expert at finding posts and discussions on reddit based on user queries',
|
||||
},
|
||||
{
|
||||
name: 'writingAssistant',
|
||||
description:
|
||||
'If there is no need for searching, this model is expert at generating text based on user queries',
|
||||
}, */
|
||||
];
|
||||
|
||||
const parser = StructuredOutputParser.fromZodSchema(
|
||||
z.object({
|
||||
agent: z.string().describe('The name of the selected agent'),
|
||||
}),
|
||||
);
|
||||
|
||||
const prompt = `
|
||||
You are an AI model who is expert at finding suitable agents for user queries. The available agents are:
|
||||
${availableAgents.map((agent) => `- ${agent.name}: ${agent.description}`).join('\n')}
|
||||
|
||||
Your task is to find the most suitable agent for the following query: {query}
|
||||
|
||||
{format_instructions}
|
||||
`;
|
||||
|
||||
const chain = RunnableSequence.from([
|
||||
PromptTemplate.fromTemplate(prompt),
|
||||
new OpenAI({ temperature: 0 }),
|
||||
parser,
|
||||
]);
|
||||
|
||||
const pickSuitableAgent = async (query: string) => {
|
||||
const res = await chain.invoke({
|
||||
query,
|
||||
format_instructions: parser.getFormatInstructions(),
|
||||
});
|
||||
return res.agent;
|
||||
};
|
||||
|
||||
export default pickSuitableAgent;
|
10
src/db/index.ts
Normal file
10
src/db/index.ts
Normal file
@ -0,0 +1,10 @@
|
||||
import { drizzle } from 'drizzle-orm/better-sqlite3';
|
||||
import Database from 'better-sqlite3';
|
||||
import * as schema from './schema';
|
||||
|
||||
const sqlite = new Database('data/db.sqlite');
|
||||
const db = drizzle(sqlite, {
|
||||
schema: schema,
|
||||
});
|
||||
|
||||
export default db;
|
28
src/db/schema.ts
Normal file
28
src/db/schema.ts
Normal file
@ -0,0 +1,28 @@
|
||||
import { sql } from 'drizzle-orm';
|
||||
import { text, integer, sqliteTable } from 'drizzle-orm/sqlite-core';
|
||||
|
||||
export const messages = sqliteTable('messages', {
|
||||
id: integer('id').primaryKey(),
|
||||
content: text('content').notNull(),
|
||||
chatId: text('chatId').notNull(),
|
||||
messageId: text('messageId').notNull(),
|
||||
role: text('type', { enum: ['assistant', 'user'] }),
|
||||
metadata: text('metadata', {
|
||||
mode: 'json',
|
||||
}),
|
||||
});
|
||||
|
||||
interface File {
|
||||
name: string;
|
||||
fileId: string;
|
||||
}
|
||||
|
||||
export const chats = sqliteTable('chats', {
|
||||
id: text('id').primaryKey(),
|
||||
title: text('title').notNull(),
|
||||
createdAt: text('createdAt').notNull(),
|
||||
focusMode: text('focusMode').notNull(),
|
||||
files: text('files', { mode: 'json' })
|
||||
.$type<File[]>()
|
||||
.default(sql`'[]'`),
|
||||
});
|
82
src/lib/huggingfaceTransformer.ts
Normal file
82
src/lib/huggingfaceTransformer.ts
Normal file
@ -0,0 +1,82 @@
|
||||
import { Embeddings, type EmbeddingsParams } from '@langchain/core/embeddings';
|
||||
import { chunkArray } from '@langchain/core/utils/chunk_array';
|
||||
|
||||
export interface HuggingFaceTransformersEmbeddingsParams
|
||||
extends EmbeddingsParams {
|
||||
modelName: string;
|
||||
|
||||
model: string;
|
||||
|
||||
timeout?: number;
|
||||
|
||||
batchSize?: number;
|
||||
|
||||
stripNewLines?: boolean;
|
||||
}
|
||||
|
||||
export class HuggingFaceTransformersEmbeddings
|
||||
extends Embeddings
|
||||
implements HuggingFaceTransformersEmbeddingsParams
|
||||
{
|
||||
modelName = 'Xenova/all-MiniLM-L6-v2';
|
||||
|
||||
model = 'Xenova/all-MiniLM-L6-v2';
|
||||
|
||||
batchSize = 512;
|
||||
|
||||
stripNewLines = true;
|
||||
|
||||
timeout?: number;
|
||||
|
||||
private pipelinePromise: Promise<any>;
|
||||
|
||||
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
|
||||
super(fields ?? {});
|
||||
|
||||
this.modelName = fields?.model ?? fields?.modelName ?? this.model;
|
||||
this.model = this.modelName;
|
||||
this.stripNewLines = fields?.stripNewLines ?? this.stripNewLines;
|
||||
this.timeout = fields?.timeout;
|
||||
}
|
||||
|
||||
async embedDocuments(texts: string[]): Promise<number[][]> {
|
||||
const batches = chunkArray(
|
||||
this.stripNewLines ? texts.map((t) => t.replace(/\n/g, ' ')) : texts,
|
||||
this.batchSize,
|
||||
);
|
||||
|
||||
const batchRequests = batches.map((batch) => this.runEmbedding(batch));
|
||||
const batchResponses = await Promise.all(batchRequests);
|
||||
const embeddings: number[][] = [];
|
||||
|
||||
for (let i = 0; i < batchResponses.length; i += 1) {
|
||||
const batchResponse = batchResponses[i];
|
||||
for (let j = 0; j < batchResponse.length; j += 1) {
|
||||
embeddings.push(batchResponse[j]);
|
||||
}
|
||||
}
|
||||
|
||||
return embeddings;
|
||||
}
|
||||
|
||||
async embedQuery(text: string): Promise<number[]> {
|
||||
const data = await this.runEmbedding([
|
||||
this.stripNewLines ? text.replace(/\n/g, ' ') : text,
|
||||
]);
|
||||
return data[0];
|
||||
}
|
||||
|
||||
private async runEmbedding(texts: string[]) {
|
||||
const { pipeline } = await import('@xenova/transformers');
|
||||
|
||||
const pipe = await (this.pipelinePromise ??= pipeline(
|
||||
'feature-extraction',
|
||||
this.model,
|
||||
));
|
||||
|
||||
return this.caller.call(async () => {
|
||||
const output = await pipe(texts, { pooling: 'mean', normalize: true });
|
||||
return output.tolist();
|
||||
});
|
||||
}
|
||||
}
|
46
src/lib/outputParsers/lineOutputParser.ts
Normal file
46
src/lib/outputParsers/lineOutputParser.ts
Normal file
@ -0,0 +1,46 @@
|
||||
import { BaseOutputParser } from '@langchain/core/output_parsers';
|
||||
|
||||
interface LineOutputParserArgs {
|
||||
key?: string;
|
||||
}
|
||||
|
||||
class LineOutputParser extends BaseOutputParser<string> {
|
||||
private key = 'questions';
|
||||
|
||||
constructor(args?: LineOutputParserArgs) {
|
||||
super();
|
||||
this.key = args.key ?? this.key;
|
||||
}
|
||||
|
||||
static lc_name() {
|
||||
return 'LineOutputParser';
|
||||
}
|
||||
|
||||
lc_namespace = ['langchain', 'output_parsers', 'line_output_parser'];
|
||||
|
||||
async parse(text: string): Promise<string> {
|
||||
const regex = /^(\s*(-|\*|\d+\.\s|\d+\)\s|\u2022)\s*)+/;
|
||||
const startKeyIndex = text.indexOf(`<${this.key}>`);
|
||||
const endKeyIndex = text.indexOf(`</${this.key}>`);
|
||||
|
||||
if (startKeyIndex === -1 || endKeyIndex === -1) {
|
||||
return '';
|
||||
}
|
||||
|
||||
const questionsStartIndex =
|
||||
startKeyIndex === -1 ? 0 : startKeyIndex + `<${this.key}>`.length;
|
||||
const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;
|
||||
const line = text
|
||||
.slice(questionsStartIndex, questionsEndIndex)
|
||||
.trim()
|
||||
.replace(regex, '');
|
||||
|
||||
return line;
|
||||
}
|
||||
|
||||
getFormatInstructions(): string {
|
||||
throw new Error('Not implemented.');
|
||||
}
|
||||
}
|
||||
|
||||
export default LineOutputParser;
|
48
src/lib/outputParsers/listLineOutputParser.ts
Normal file
48
src/lib/outputParsers/listLineOutputParser.ts
Normal file
@ -0,0 +1,48 @@
|
||||
import { BaseOutputParser } from '@langchain/core/output_parsers';
|
||||
|
||||
interface LineListOutputParserArgs {
|
||||
key?: string;
|
||||
}
|
||||
|
||||
class LineListOutputParser extends BaseOutputParser<string[]> {
|
||||
private key = 'questions';
|
||||
|
||||
constructor(args?: LineListOutputParserArgs) {
|
||||
super();
|
||||
this.key = args.key ?? this.key;
|
||||
}
|
||||
|
||||
static lc_name() {
|
||||
return 'LineListOutputParser';
|
||||
}
|
||||
|
||||
lc_namespace = ['langchain', 'output_parsers', 'line_list_output_parser'];
|
||||
|
||||
async parse(text: string): Promise<string[]> {
|
||||
const regex = /^(\s*(-|\*|\d+\.\s|\d+\)\s|\u2022)\s*)+/;
|
||||
const startKeyIndex = text.indexOf(`<${this.key}>`);
|
||||
const endKeyIndex = text.indexOf(`</${this.key}>`);
|
||||
|
||||
if (startKeyIndex === -1 && endKeyIndex === -1) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const questionsStartIndex =
|
||||
startKeyIndex === -1 ? 0 : startKeyIndex + `<${this.key}>`.length;
|
||||
const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;
|
||||
const lines = text
|
||||
.slice(questionsStartIndex, questionsEndIndex)
|
||||
.trim()
|
||||
.split('\n')
|
||||
.filter((line) => line.trim() !== '')
|
||||
.map((line) => line.replace(regex, ''));
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
getFormatInstructions(): string {
|
||||
throw new Error('Not implemented.');
|
||||
}
|
||||
}
|
||||
|
||||
export default LineListOutputParser;
|
51
src/lib/providers/anthropic.ts
Normal file
51
src/lib/providers/anthropic.ts
Normal file
@ -0,0 +1,51 @@
|
||||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import { getAnthropicApiKey } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
|
||||
export const loadAnthropicChatModels = async () => {
|
||||
const anthropicApiKey = getAnthropicApiKey();
|
||||
|
||||
if (!anthropicApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels = {
|
||||
'claude-3-5-sonnet-20240620': {
|
||||
displayName: 'Claude 3.5 Sonnet',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-5-sonnet-20240620',
|
||||
}),
|
||||
},
|
||||
'claude-3-opus-20240229': {
|
||||
displayName: 'Claude 3 Opus',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-opus-20240229',
|
||||
}),
|
||||
},
|
||||
'claude-3-sonnet-20240229': {
|
||||
displayName: 'Claude 3 Sonnet',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-sonnet-20240229',
|
||||
}),
|
||||
},
|
||||
'claude-3-haiku-20240307': {
|
||||
displayName: 'Claude 3 Haiku',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-haiku-20240307',
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Anthropic models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
149
src/lib/providers/groq.ts
Normal file
149
src/lib/providers/groq.ts
Normal file
@ -0,0 +1,149 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { getGroqApiKey } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
|
||||
export const loadGroqChatModels = async () => {
|
||||
const groqApiKey = getGroqApiKey();
|
||||
|
||||
if (!groqApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels = {
|
||||
'llama-3.2-3b-preview': {
|
||||
displayName: 'Llama 3.2 3B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.2-3b-preview',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama-3.2-11b-vision-preview': {
|
||||
displayName: 'Llama 3.2 11B Vision',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.2-11b-vision-preview',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama-3.2-90b-vision-preview': {
|
||||
displayName: 'Llama 3.2 90B Vision',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.2-90b-vision-preview',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama-3.1-70b-versatile': {
|
||||
displayName: 'Llama 3.1 70B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.1-70b-versatile',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama-3.1-8b-instant': {
|
||||
displayName: 'Llama 3.1 8B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.1-8b-instant',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama3-8b-8192': {
|
||||
displayName: 'LLaMA3 8B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama3-8b-8192',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama3-70b-8192': {
|
||||
displayName: 'LLaMA3 70B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama3-70b-8192',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'mixtral-8x7b-32768': {
|
||||
displayName: 'Mixtral 8x7B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'mixtral-8x7b-32768',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'gemma-7b-it': {
|
||||
displayName: 'Gemma 7B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'gemma-7b-it',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'gemma2-9b-it': {
|
||||
displayName: 'Gemma2 9B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'gemma2-9b-it',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
};
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Groq models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
46
src/lib/providers/index.ts
Normal file
46
src/lib/providers/index.ts
Normal file
@ -0,0 +1,46 @@
|
||||
import { loadGroqChatModels } from './groq';
|
||||
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
|
||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
||||
import { loadAnthropicChatModels } from './anthropic';
|
||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||
|
||||
const chatModelProviders = {
|
||||
openai: loadOpenAIChatModels,
|
||||
groq: loadGroqChatModels,
|
||||
ollama: loadOllamaChatModels,
|
||||
anthropic: loadAnthropicChatModels,
|
||||
};
|
||||
|
||||
const embeddingModelProviders = {
|
||||
openai: loadOpenAIEmbeddingsModels,
|
||||
local: loadTransformersEmbeddingsModels,
|
||||
ollama: loadOllamaEmbeddingsModels,
|
||||
};
|
||||
|
||||
export const getAvailableChatModelProviders = async () => {
|
||||
const models = {};
|
||||
|
||||
for (const provider in chatModelProviders) {
|
||||
const providerModels = await chatModelProviders[provider]();
|
||||
if (Object.keys(providerModels).length > 0) {
|
||||
models[provider] = providerModels;
|
||||
}
|
||||
}
|
||||
|
||||
models['custom_openai'] = {};
|
||||
|
||||
return models;
|
||||
};
|
||||
|
||||
export const getAvailableEmbeddingModelProviders = async () => {
|
||||
const models = {};
|
||||
|
||||
for (const provider in embeddingModelProviders) {
|
||||
const providerModels = await embeddingModelProviders[provider]();
|
||||
if (Object.keys(providerModels).length > 0) {
|
||||
models[provider] = providerModels;
|
||||
}
|
||||
}
|
||||
|
||||
return models;
|
||||
};
|
73
src/lib/providers/ollama.ts
Normal file
73
src/lib/providers/ollama.ts
Normal file
@ -0,0 +1,73 @@
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
export const loadOllamaChatModels = async () => {
|
||||
const ollamaEndpoint = getOllamaApiEndpoint();
|
||||
const keepAlive = getKeepAlive();
|
||||
|
||||
if (!ollamaEndpoint) return {};
|
||||
|
||||
try {
|
||||
const response = await fetch(`${ollamaEndpoint}/api/tags`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const { models: ollamaModels } = (await response.json()) as any;
|
||||
|
||||
const chatModels = ollamaModels.reduce((acc, model) => {
|
||||
acc[model.model] = {
|
||||
displayName: model.name,
|
||||
model: new ChatOllama({
|
||||
baseUrl: ollamaEndpoint,
|
||||
model: model.model,
|
||||
temperature: 0.7,
|
||||
keepAlive: keepAlive,
|
||||
}),
|
||||
};
|
||||
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Ollama models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadOllamaEmbeddingsModels = async () => {
|
||||
const ollamaEndpoint = getOllamaApiEndpoint();
|
||||
|
||||
if (!ollamaEndpoint) return {};
|
||||
|
||||
try {
|
||||
const response = await fetch(`${ollamaEndpoint}/api/tags`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const { models: ollamaModels } = (await response.json()) as any;
|
||||
|
||||
const embeddingsModels = ollamaModels.reduce((acc, model) => {
|
||||
acc[model.model] = {
|
||||
displayName: model.name,
|
||||
model: new OllamaEmbeddings({
|
||||
baseUrl: ollamaEndpoint,
|
||||
model: model.model,
|
||||
}),
|
||||
};
|
||||
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return embeddingsModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Ollama embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
89
src/lib/providers/openai.ts
Normal file
89
src/lib/providers/openai.ts
Normal file
@ -0,0 +1,89 @@
|
||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getOpenaiApiKey } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
|
||||
export const loadOpenAIChatModels = async () => {
|
||||
const openAIApiKey = getOpenaiApiKey();
|
||||
|
||||
if (!openAIApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels = {
|
||||
'gpt-3.5-turbo': {
|
||||
displayName: 'GPT-3.5 Turbo',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-3.5-turbo',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
'gpt-4': {
|
||||
displayName: 'GPT-4',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-4',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
'gpt-4-turbo': {
|
||||
displayName: 'GPT-4 turbo',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-4-turbo',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
'gpt-4o': {
|
||||
displayName: 'GPT-4 omni',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-4o',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
'gpt-4o-mini': {
|
||||
displayName: 'GPT-4 omni mini',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-4o-mini',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading OpenAI models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadOpenAIEmbeddingsModels = async () => {
|
||||
const openAIApiKey = getOpenaiApiKey();
|
||||
|
||||
if (!openAIApiKey) return {};
|
||||
|
||||
try {
|
||||
const embeddingModels = {
|
||||
'text-embedding-3-small': {
|
||||
displayName: 'Text Embedding 3 Small',
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey,
|
||||
modelName: 'text-embedding-3-small',
|
||||
}),
|
||||
},
|
||||
'text-embedding-3-large': {
|
||||
displayName: 'Text Embedding 3 Large',
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey,
|
||||
modelName: 'text-embedding-3-large',
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading OpenAI embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
32
src/lib/providers/transformers.ts
Normal file
32
src/lib/providers/transformers.ts
Normal file
@ -0,0 +1,32 @@
|
||||
import logger from '../../utils/logger';
|
||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
||||
|
||||
export const loadTransformersEmbeddingsModels = async () => {
|
||||
try {
|
||||
const embeddingModels = {
|
||||
'xenova-bge-small-en-v1.5': {
|
||||
displayName: 'BGE Small',
|
||||
model: new HuggingFaceTransformersEmbeddings({
|
||||
modelName: 'Xenova/bge-small-en-v1.5',
|
||||
}),
|
||||
},
|
||||
'xenova-gte-small': {
|
||||
displayName: 'GTE Small',
|
||||
model: new HuggingFaceTransformersEmbeddings({
|
||||
modelName: 'Xenova/gte-small',
|
||||
}),
|
||||
},
|
||||
'xenova-bert-base-multilingual-uncased': {
|
||||
displayName: 'Bert Multilingual',
|
||||
model: new HuggingFaceTransformersEmbeddings({
|
||||
modelName: 'Xenova/bert-base-multilingual-uncased',
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Transformers embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
@ -1,4 +1,5 @@
|
||||
import axios from 'axios';
|
||||
import { getSearxngApiEndpoint } from '../config';
|
||||
|
||||
interface SearxngSearchOptions {
|
||||
categories?: string[];
|
||||
@ -12,15 +13,19 @@ interface SearxngSearchResult {
|
||||
url: string;
|
||||
img_src?: string;
|
||||
thumbnail_src?: string;
|
||||
thumbnail?: string;
|
||||
content?: string;
|
||||
author?: string;
|
||||
iframe_src?: string;
|
||||
}
|
||||
|
||||
export const searchSearxng = async (
|
||||
query: string,
|
||||
opts?: SearxngSearchOptions,
|
||||
) => {
|
||||
const url = new URL(`${process.env.SEARXNG_API_URL}/search?format=json`);
|
||||
const searxngURL = getSearxngApiEndpoint();
|
||||
|
||||
const url = new URL(`${searxngURL}/search?format=json`);
|
||||
url.searchParams.append('q', query);
|
||||
|
||||
if (opts) {
|
42
src/prompts/academicSearch.ts
Normal file
42
src/prompts/academicSearch.ts
Normal file
@ -0,0 +1,42 @@
|
||||
export const academicSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: How does stable diffusion work?
|
||||
Rephrased: Stable diffusion working
|
||||
|
||||
2. Follow up question: What is linear algebra?
|
||||
Rephrased: Linear algebra
|
||||
|
||||
3. Follow up question: What is the third law of thermodynamics?
|
||||
Rephrased: Third law of thermodynamics
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
export const academicSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Academic', this means you will be searching for academic papers and articles on the web.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containing a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Anything inside the following \`context\` HTML block provided below is for your knowledge returned by the search engine and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from a search engine and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
32
src/prompts/index.ts
Normal file
32
src/prompts/index.ts
Normal file
@ -0,0 +1,32 @@
|
||||
import {
|
||||
academicSearchResponsePrompt,
|
||||
academicSearchRetrieverPrompt,
|
||||
} from './academicSearch';
|
||||
import {
|
||||
redditSearchResponsePrompt,
|
||||
redditSearchRetrieverPrompt,
|
||||
} from './redditSearch';
|
||||
import { webSearchResponsePrompt, webSearchRetrieverPrompt } from './webSearch';
|
||||
import {
|
||||
wolframAlphaSearchResponsePrompt,
|
||||
wolframAlphaSearchRetrieverPrompt,
|
||||
} from './wolframAlpha';
|
||||
import { writingAssistantPrompt } from './writingAssistant';
|
||||
import {
|
||||
youtubeSearchResponsePrompt,
|
||||
youtubeSearchRetrieverPrompt,
|
||||
} from './youtubeSearch';
|
||||
|
||||
export default {
|
||||
webSearchResponsePrompt,
|
||||
webSearchRetrieverPrompt,
|
||||
academicSearchResponsePrompt,
|
||||
academicSearchRetrieverPrompt,
|
||||
redditSearchResponsePrompt,
|
||||
redditSearchRetrieverPrompt,
|
||||
wolframAlphaSearchResponsePrompt,
|
||||
wolframAlphaSearchRetrieverPrompt,
|
||||
writingAssistantPrompt,
|
||||
youtubeSearchResponsePrompt,
|
||||
youtubeSearchRetrieverPrompt,
|
||||
};
|
42
src/prompts/redditSearch.ts
Normal file
42
src/prompts/redditSearch.ts
Normal file
@ -0,0 +1,42 @@
|
||||
export const redditSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: Which company is most likely to create an AGI
|
||||
Rephrased: Which company is most likely to create an AGI
|
||||
|
||||
2. Follow up question: Is Earth flat?
|
||||
Rephrased: Is Earth flat?
|
||||
|
||||
3. Follow up question: Is there life on Mars?
|
||||
Rephrased: Is there life on Mars?
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
export const redditSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containing a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Anything inside the following \`context\` HTML block provided below is for your knowledge returned by Reddit and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from Reddit and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
86
src/prompts/webSearch.ts
Normal file
86
src/prompts/webSearch.ts
Normal file
@ -0,0 +1,86 @@
|
||||
export const webSearchRetrieverPrompt = `
|
||||
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
|
||||
If it is a smple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
|
||||
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
|
||||
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
|
||||
|
||||
There are several examples attached for your reference inside the below \`examples\` XML block
|
||||
|
||||
<examples>
|
||||
1. Follow up question: What is the capital of France
|
||||
Rephrased question:\`
|
||||
<question>
|
||||
Capital of france
|
||||
</question>
|
||||
\`
|
||||
|
||||
2. Hi, how are you?
|
||||
Rephrased question\`
|
||||
<question>
|
||||
not_needed
|
||||
</question>
|
||||
\`
|
||||
|
||||
3. Follow up question: What is Docker?
|
||||
Rephrased question: \`
|
||||
<question>
|
||||
What is Docker
|
||||
</question>
|
||||
\`
|
||||
|
||||
4. Follow up question: Can you tell me what is X from https://example.com
|
||||
Rephrased question: \`
|
||||
<question>
|
||||
Can you tell me what is X?
|
||||
</question>
|
||||
|
||||
<links>
|
||||
https://example.com
|
||||
</links>
|
||||
\`
|
||||
|
||||
5. Follow up question: Summarize the content from https://example.com
|
||||
Rephrased question: \`
|
||||
<question>
|
||||
summarize
|
||||
</question>
|
||||
|
||||
<links>
|
||||
https://example.com
|
||||
</links>
|
||||
\`
|
||||
</examples>
|
||||
|
||||
Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
|
||||
|
||||
<conversation>
|
||||
{chat_history}
|
||||
</conversation>
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
export const webSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are also an expert at summarizing web pages or documents and searching for content in them.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containing a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
If the query contains some links and the user asks to answer from those links you will be provided the entire content of the page inside the \`context\` XML block. You can then use this content to answer the user's query.
|
||||
If the user asks to summarize content from some links, you will be provided the entire content of the page inside the \`context\` XML block. You can then use this content to summarize the text. The content provided inside the \`context\` block will be already summarized by another model so you just need to use that content to answer the user's query.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Anything inside the following \`context\` HTML block provided below is for your knowledge returned by the search engine and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'. You do not need to do this for summarization tasks.
|
||||
Anything between the \`context\` is retrieved from a search engine and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
42
src/prompts/wolframAlpha.ts
Normal file
42
src/prompts/wolframAlpha.ts
Normal file
@ -0,0 +1,42 @@
|
||||
export const wolframAlphaSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: What is the atomic radius of S?
|
||||
Rephrased: Atomic radius of S
|
||||
|
||||
2. Follow up question: What is linear algebra?
|
||||
Rephrased: Linear algebra
|
||||
|
||||
3. Follow up question: What is the third law of thermodynamics?
|
||||
Rephrased: Third law of thermodynamics
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
export const wolframAlphaSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containing a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Anything inside the following \`context\` HTML block provided below is for your knowledge returned by Wolfram Alpha and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from Wolfram Alpha and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
13
src/prompts/writingAssistant.ts
Normal file
13
src/prompts/writingAssistant.ts
Normal file
@ -0,0 +1,13 @@
|
||||
export const writingAssistantPrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Writing Assistant', this means you will be helping the user write a response to a given query.
|
||||
Since you are a writing assistant, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode.
|
||||
You will be shared a context that can contain information from files user has uploaded to get answers from. You will have to generate answers upon that.
|
||||
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
`;
|
42
src/prompts/youtubeSearch.ts
Normal file
42
src/prompts/youtubeSearch.ts
Normal file
@ -0,0 +1,42 @@
|
||||
export const youtubeSearchRetrieverPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
|
||||
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
|
||||
|
||||
Example:
|
||||
1. Follow up question: How does an A.C work?
|
||||
Rephrased: A.C working
|
||||
|
||||
2. Follow up question: Linear algebra explanation video
|
||||
Rephrased: What is linear algebra?
|
||||
|
||||
3. Follow up question: What is theory of relativity?
|
||||
Rephrased: What is theory of relativity?
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
|
||||
export const youtubeSearchResponsePrompt = `
|
||||
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcript.
|
||||
|
||||
Generate a response that is informative and relevant to the user's query based on provided context (the context consits of search results containing a brief description of the content of that page).
|
||||
You must use this context to answer the user's query in the best way possible. Use an unbaised and journalistic tone in your response. Do not repeat the text.
|
||||
You must not tell the user to open any link or visit any website to get the answer. You must provide the answer in the response itself. If the user asks for links you can provide them.
|
||||
Your responses should be medium to long in length be informative and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is not short and is informative.
|
||||
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
|
||||
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
|
||||
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
|
||||
|
||||
Anything inside the following \`context\` HTML block provided below is for your knowledge returned by Youtube and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to
|
||||
talk about the context in your response.
|
||||
|
||||
<context>
|
||||
{context}
|
||||
</context>
|
||||
|
||||
If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
|
||||
Anything between the \`context\` is retrieved from Youtube and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
|
||||
`;
|
66
src/routes/chats.ts
Normal file
66
src/routes/chats.ts
Normal file
@ -0,0 +1,66 @@
|
||||
import express from 'express';
|
||||
import logger from '../utils/logger';
|
||||
import db from '../db/index';
|
||||
import { eq } from 'drizzle-orm';
|
||||
import { chats, messages } from '../db/schema';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', async (_, res) => {
|
||||
try {
|
||||
let chats = await db.query.chats.findMany();
|
||||
|
||||
chats = chats.reverse();
|
||||
|
||||
return res.status(200).json({ chats: chats });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in getting chats: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
router.get('/:id', async (req, res) => {
|
||||
try {
|
||||
const chatExists = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, req.params.id),
|
||||
});
|
||||
|
||||
if (!chatExists) {
|
||||
return res.status(404).json({ message: 'Chat not found' });
|
||||
}
|
||||
|
||||
const chatMessages = await db.query.messages.findMany({
|
||||
where: eq(messages.chatId, req.params.id),
|
||||
});
|
||||
|
||||
return res.status(200).json({ chat: chatExists, messages: chatMessages });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in getting chat: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete(`/:id`, async (req, res) => {
|
||||
try {
|
||||
const chatExists = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, req.params.id),
|
||||
});
|
||||
|
||||
if (!chatExists) {
|
||||
return res.status(404).json({ message: 'Chat not found' });
|
||||
}
|
||||
|
||||
await db.delete(chats).where(eq(chats.id, req.params.id)).execute();
|
||||
await db
|
||||
.delete(messages)
|
||||
.where(eq(messages.chatId, req.params.id))
|
||||
.execute();
|
||||
|
||||
return res.status(200).json({ message: 'Chat deleted successfully' });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in deleting chat: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
82
src/routes/config.ts
Normal file
82
src/routes/config.ts
Normal file
@ -0,0 +1,82 @@
|
||||
import express from 'express';
|
||||
import {
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '../lib/providers';
|
||||
import {
|
||||
getGroqApiKey,
|
||||
getOllamaApiEndpoint,
|
||||
getAnthropicApiKey,
|
||||
getOpenaiApiKey,
|
||||
updateConfig,
|
||||
} from '../config';
|
||||
import logger from '../utils/logger';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', async (_, res) => {
|
||||
try {
|
||||
const config = {};
|
||||
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
config['chatModelProviders'] = {};
|
||||
config['embeddingModelProviders'] = {};
|
||||
|
||||
for (const provider in chatModelProviders) {
|
||||
config['chatModelProviders'][provider] = Object.keys(
|
||||
chatModelProviders[provider],
|
||||
).map((model) => {
|
||||
return {
|
||||
name: model,
|
||||
displayName: chatModelProviders[provider][model].displayName,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
for (const provider in embeddingModelProviders) {
|
||||
config['embeddingModelProviders'][provider] = Object.keys(
|
||||
embeddingModelProviders[provider],
|
||||
).map((model) => {
|
||||
return {
|
||||
name: model,
|
||||
displayName: embeddingModelProviders[provider][model].displayName,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
config['openaiApiKey'] = getOpenaiApiKey();
|
||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||
config['groqApiKey'] = getGroqApiKey();
|
||||
|
||||
res.status(200).json(config);
|
||||
} catch (err: any) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error getting config: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
const config = req.body;
|
||||
|
||||
const updatedConfig = {
|
||||
API_KEYS: {
|
||||
OPENAI: config.openaiApiKey,
|
||||
GROQ: config.groqApiKey,
|
||||
ANTHROPIC: config.anthropicApiKey,
|
||||
},
|
||||
API_ENDPOINTS: {
|
||||
OLLAMA: config.ollamaApiUrl,
|
||||
},
|
||||
};
|
||||
|
||||
updateConfig(updatedConfig);
|
||||
|
||||
res.status(200).json({ message: 'Config updated' });
|
||||
});
|
||||
|
||||
export default router;
|
48
src/routes/discover.ts
Normal file
48
src/routes/discover.ts
Normal file
@ -0,0 +1,48 @@
|
||||
import express from 'express';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import logger from '../utils/logger';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', async (req, res) => {
|
||||
try {
|
||||
const data = (
|
||||
await Promise.all([
|
||||
searchSearxng('site:businessinsider.com AI', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:www.exchangewire.com AI', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:yahoo.com AI', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:businessinsider.com tech', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:www.exchangewire.com tech', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:yahoo.com tech', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
])
|
||||
)
|
||||
.map((result) => result.results)
|
||||
.flat()
|
||||
.sort(() => Math.random() - 0.5);
|
||||
|
||||
return res.json({ blogs: data });
|
||||
} catch (err: any) {
|
||||
logger.error(`Error in discover route: ${err.message}`);
|
||||
return res.status(500).json({ message: 'An error has occurred' });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
@ -1,21 +1,87 @@
|
||||
import express from 'express';
|
||||
import imageSearchChain from '../agents/imageSearchAgent';
|
||||
import handleImageSearch from '../chains/imageSearchAgent';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { getAvailableChatModelProviders } from '../lib/providers';
|
||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||
import logger from '../utils/logger';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
customOpenAIBaseURL?: string;
|
||||
customOpenAIKey?: string;
|
||||
}
|
||||
|
||||
interface ImageSearchBody {
|
||||
query: string;
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
const { query, chat_history } = req.body;
|
||||
let body: ImageSearchBody = req.body;
|
||||
|
||||
const images = await imageSearchChain.invoke({
|
||||
query,
|
||||
chat_history,
|
||||
const chatHistory = body.chatHistory.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
});
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
body.chatModel?.model ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
if (
|
||||
!body.chatModel?.customOpenAIBaseURL ||
|
||||
!body.chatModel?.customOpenAIKey
|
||||
) {
|
||||
return res
|
||||
.status(400)
|
||||
.json({ message: 'Missing custom OpenAI base URL or key' });
|
||||
}
|
||||
|
||||
llm = new ChatOpenAI({
|
||||
modelName: body.chatModel.model,
|
||||
openAIApiKey: body.chatModel.customOpenAIKey,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: body.chatModel.customOpenAIBaseURL,
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return res.status(400).json({ message: 'Invalid model selected' });
|
||||
}
|
||||
|
||||
const images = await handleImageSearch(
|
||||
{ query: body.query, chat_history: chatHistory },
|
||||
llm,
|
||||
);
|
||||
|
||||
res.status(200).json({ images });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
console.log(err.message);
|
||||
logger.error(`Error in image search: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -1,8 +1,24 @@
|
||||
import express from 'express';
|
||||
import imagesRouter from './images';
|
||||
import videosRouter from './videos';
|
||||
import configRouter from './config';
|
||||
import modelsRouter from './models';
|
||||
import suggestionsRouter from './suggestions';
|
||||
import chatsRouter from './chats';
|
||||
import searchRouter from './search';
|
||||
import discoverRouter from './discover';
|
||||
import uploadsRouter from './uploads';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.use('/images', imagesRouter);
|
||||
router.use('/videos', videosRouter);
|
||||
router.use('/config', configRouter);
|
||||
router.use('/models', modelsRouter);
|
||||
router.use('/suggestions', suggestionsRouter);
|
||||
router.use('/chats', chatsRouter);
|
||||
router.use('/search', searchRouter);
|
||||
router.use('/discover', discoverRouter);
|
||||
router.use('/uploads', uploadsRouter);
|
||||
|
||||
export default router;
|
||||
|
36
src/routes/models.ts
Normal file
36
src/routes/models.ts
Normal file
@ -0,0 +1,36 @@
|
||||
import express from 'express';
|
||||
import logger from '../utils/logger';
|
||||
import {
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '../lib/providers';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', async (req, res) => {
|
||||
try {
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
Object.keys(chatModelProviders).forEach((provider) => {
|
||||
Object.keys(chatModelProviders[provider]).forEach((model) => {
|
||||
delete chatModelProviders[provider][model].model;
|
||||
});
|
||||
});
|
||||
|
||||
Object.keys(embeddingModelProviders).forEach((provider) => {
|
||||
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
|
||||
delete embeddingModelProviders[provider][model].model;
|
||||
});
|
||||
});
|
||||
|
||||
res.status(200).json({ chatModelProviders, embeddingModelProviders });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(err.message);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
160
src/routes/search.ts
Normal file
160
src/routes/search.ts
Normal file
@ -0,0 +1,160 @@
|
||||
import express from 'express';
|
||||
import logger from '../utils/logger';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { Embeddings } from '@langchain/core/embeddings';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import {
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '../lib/providers';
|
||||
import { searchHandlers } from '../websocket/messageHandler';
|
||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import { MetaSearchAgentType } from '../search/metaSearchAgent';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface chatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
customOpenAIBaseURL?: string;
|
||||
customOpenAIKey?: string;
|
||||
}
|
||||
|
||||
interface embeddingModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface ChatRequestBody {
|
||||
optimizationMode: 'speed' | 'balanced';
|
||||
focusMode: string;
|
||||
chatModel?: chatModel;
|
||||
embeddingModel?: embeddingModel;
|
||||
query: string;
|
||||
history: Array<[string, string]>;
|
||||
}
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
const body: ChatRequestBody = req.body;
|
||||
|
||||
if (!body.focusMode || !body.query) {
|
||||
return res.status(400).json({ message: 'Missing focus mode or query' });
|
||||
}
|
||||
|
||||
body.history = body.history || [];
|
||||
body.optimizationMode = body.optimizationMode || 'balanced';
|
||||
|
||||
const history: BaseMessage[] = body.history.map((msg) => {
|
||||
if (msg[0] === 'human') {
|
||||
return new HumanMessage({
|
||||
content: msg[1],
|
||||
});
|
||||
} else {
|
||||
return new AIMessage({
|
||||
content: msg[1],
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
const chatModelProvider =
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
body.chatModel?.model ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
const embeddingModelProvider =
|
||||
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
|
||||
const embeddingModel =
|
||||
body.embeddingModel?.model ||
|
||||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
let embeddings: Embeddings | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
if (
|
||||
!body.chatModel?.customOpenAIBaseURL ||
|
||||
!body.chatModel?.customOpenAIKey
|
||||
) {
|
||||
return res
|
||||
.status(400)
|
||||
.json({ message: 'Missing custom OpenAI base URL or key' });
|
||||
}
|
||||
|
||||
llm = new ChatOpenAI({
|
||||
modelName: body.chatModel.model,
|
||||
openAIApiKey: body.chatModel.customOpenAIKey,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: body.chatModel.customOpenAIBaseURL,
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
}
|
||||
|
||||
if (
|
||||
embeddingModelProviders[embeddingModelProvider] &&
|
||||
embeddingModelProviders[embeddingModelProvider][embeddingModel]
|
||||
) {
|
||||
embeddings = embeddingModelProviders[embeddingModelProvider][
|
||||
embeddingModel
|
||||
].model as Embeddings | undefined;
|
||||
}
|
||||
|
||||
if (!llm || !embeddings) {
|
||||
return res.status(400).json({ message: 'Invalid model selected' });
|
||||
}
|
||||
|
||||
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
|
||||
|
||||
if (!searchHandler) {
|
||||
return res.status(400).json({ message: 'Invalid focus mode' });
|
||||
}
|
||||
|
||||
const emitter = await searchHandler.searchAndAnswer(
|
||||
body.query,
|
||||
history,
|
||||
llm,
|
||||
embeddings,
|
||||
body.optimizationMode,
|
||||
[],
|
||||
);
|
||||
|
||||
let message = '';
|
||||
let sources = [];
|
||||
|
||||
emitter.on('data', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
if (parsedData.type === 'response') {
|
||||
message += parsedData.data;
|
||||
} else if (parsedData.type === 'sources') {
|
||||
sources = parsedData.data;
|
||||
}
|
||||
});
|
||||
|
||||
emitter.on('end', () => {
|
||||
res.status(200).json({ message, sources });
|
||||
});
|
||||
|
||||
emitter.on('error', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
res.status(500).json({ message: parsedData.data });
|
||||
});
|
||||
} catch (err: any) {
|
||||
logger.error(`Error in getting search results: ${err.message}`);
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
87
src/routes/suggestions.ts
Normal file
87
src/routes/suggestions.ts
Normal file
@ -0,0 +1,87 @@
|
||||
import express from 'express';
|
||||
import generateSuggestions from '../chains/suggestionGeneratorAgent';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { getAvailableChatModelProviders } from '../lib/providers';
|
||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||
import logger from '../utils/logger';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
customOpenAIBaseURL?: string;
|
||||
customOpenAIKey?: string;
|
||||
}
|
||||
|
||||
interface SuggestionsBody {
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
let body: SuggestionsBody = req.body;
|
||||
|
||||
const chatHistory = body.chatHistory.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
});
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
body.chatModel?.model ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
if (
|
||||
!body.chatModel?.customOpenAIBaseURL ||
|
||||
!body.chatModel?.customOpenAIKey
|
||||
) {
|
||||
return res
|
||||
.status(400)
|
||||
.json({ message: 'Missing custom OpenAI base URL or key' });
|
||||
}
|
||||
|
||||
llm = new ChatOpenAI({
|
||||
modelName: body.chatModel.model,
|
||||
openAIApiKey: body.chatModel.customOpenAIKey,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: body.chatModel.customOpenAIBaseURL,
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return res.status(400).json({ message: 'Invalid model selected' });
|
||||
}
|
||||
|
||||
const suggestions = await generateSuggestions(
|
||||
{ chat_history: chatHistory },
|
||||
llm,
|
||||
);
|
||||
|
||||
res.status(200).json({ suggestions: suggestions });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in generating suggestions: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
151
src/routes/uploads.ts
Normal file
151
src/routes/uploads.ts
Normal file
@ -0,0 +1,151 @@
|
||||
import express from 'express';
|
||||
import logger from '../utils/logger';
|
||||
import multer from 'multer';
|
||||
import path from 'path';
|
||||
import crypto from 'crypto';
|
||||
import fs from 'fs';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
import { getAvailableEmbeddingModelProviders } from '../lib/providers';
|
||||
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
|
||||
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
|
||||
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
|
||||
import { Document } from 'langchain/document';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const splitter = new RecursiveCharacterTextSplitter({
|
||||
chunkSize: 500,
|
||||
chunkOverlap: 100,
|
||||
});
|
||||
|
||||
const storage = multer.diskStorage({
|
||||
destination: (req, file, cb) => {
|
||||
cb(null, path.join(process.cwd(), './uploads'));
|
||||
},
|
||||
filename: (req, file, cb) => {
|
||||
const splitedFileName = file.originalname.split('.');
|
||||
const fileExtension = splitedFileName[splitedFileName.length - 1];
|
||||
if (!['pdf', 'docx', 'txt'].includes(fileExtension)) {
|
||||
return cb(new Error('File type is not supported'), '');
|
||||
}
|
||||
cb(null, `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`);
|
||||
},
|
||||
});
|
||||
|
||||
const upload = multer({ storage });
|
||||
|
||||
router.post(
|
||||
'/',
|
||||
upload.fields([
|
||||
{ name: 'files' },
|
||||
{ name: 'embedding_model', maxCount: 1 },
|
||||
{ name: 'embedding_model_provider', maxCount: 1 },
|
||||
]),
|
||||
async (req, res) => {
|
||||
try {
|
||||
const { embedding_model, embedding_model_provider } = req.body;
|
||||
|
||||
if (!embedding_model || !embedding_model_provider) {
|
||||
res
|
||||
.status(400)
|
||||
.json({ message: 'Missing embedding model or provider' });
|
||||
return;
|
||||
}
|
||||
|
||||
const embeddingModels = await getAvailableEmbeddingModelProviders();
|
||||
const provider =
|
||||
embedding_model_provider ?? Object.keys(embeddingModels)[0];
|
||||
const embeddingModel: Embeddings =
|
||||
embedding_model ?? Object.keys(embeddingModels[provider])[0];
|
||||
|
||||
let embeddingsModel: Embeddings | undefined;
|
||||
|
||||
if (
|
||||
embeddingModels[provider] &&
|
||||
embeddingModels[provider][embeddingModel]
|
||||
) {
|
||||
embeddingsModel = embeddingModels[provider][embeddingModel].model as
|
||||
| Embeddings
|
||||
| undefined;
|
||||
}
|
||||
|
||||
if (!embeddingsModel) {
|
||||
res.status(400).json({ message: 'Invalid LLM model selected' });
|
||||
return;
|
||||
}
|
||||
|
||||
const files = req.files['files'] as Express.Multer.File[];
|
||||
if (!files || files.length === 0) {
|
||||
res.status(400).json({ message: 'No files uploaded' });
|
||||
return;
|
||||
}
|
||||
|
||||
await Promise.all(
|
||||
files.map(async (file) => {
|
||||
let docs: Document[] = [];
|
||||
|
||||
if (file.mimetype === 'application/pdf') {
|
||||
const loader = new PDFLoader(file.path);
|
||||
docs = await loader.load();
|
||||
} else if (
|
||||
file.mimetype ===
|
||||
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
|
||||
) {
|
||||
const loader = new DocxLoader(file.path);
|
||||
docs = await loader.load();
|
||||
} else if (file.mimetype === 'text/plain') {
|
||||
const text = fs.readFileSync(file.path, 'utf-8');
|
||||
docs = [
|
||||
new Document({
|
||||
pageContent: text,
|
||||
metadata: {
|
||||
title: file.originalname,
|
||||
},
|
||||
}),
|
||||
];
|
||||
}
|
||||
|
||||
const splitted = await splitter.splitDocuments(docs);
|
||||
|
||||
const json = JSON.stringify({
|
||||
title: file.originalname,
|
||||
contents: splitted.map((doc) => doc.pageContent),
|
||||
});
|
||||
|
||||
const pathToSave = file.path.replace(/\.\w+$/, '-extracted.json');
|
||||
fs.writeFileSync(pathToSave, json);
|
||||
|
||||
const embeddings = await embeddingsModel.embedDocuments(
|
||||
splitted.map((doc) => doc.pageContent),
|
||||
);
|
||||
|
||||
const embeddingsJSON = JSON.stringify({
|
||||
title: file.originalname,
|
||||
embeddings: embeddings,
|
||||
});
|
||||
|
||||
const pathToSaveEmbeddings = file.path.replace(
|
||||
/\.\w+$/,
|
||||
'-embeddings.json',
|
||||
);
|
||||
fs.writeFileSync(pathToSaveEmbeddings, embeddingsJSON);
|
||||
}),
|
||||
);
|
||||
|
||||
res.status(200).json({
|
||||
files: files.map((file) => {
|
||||
return {
|
||||
fileName: file.originalname,
|
||||
fileExtension: file.filename.split('.').pop(),
|
||||
fileId: file.filename.replace(/\.\w+$/, ''),
|
||||
};
|
||||
}),
|
||||
});
|
||||
} catch (err: any) {
|
||||
logger.error(`Error in uploading file results: ${err.message}`);
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
export default router;
|
88
src/routes/videos.ts
Normal file
88
src/routes/videos.ts
Normal file
@ -0,0 +1,88 @@
|
||||
import express from 'express';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { getAvailableChatModelProviders } from '../lib/providers';
|
||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||
import logger from '../utils/logger';
|
||||
import handleVideoSearch from '../chains/videoSearchAgent';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
customOpenAIBaseURL?: string;
|
||||
customOpenAIKey?: string;
|
||||
}
|
||||
|
||||
interface VideoSearchBody {
|
||||
query: string;
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
let body: VideoSearchBody = req.body;
|
||||
|
||||
const chatHistory = body.chatHistory.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
});
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
body.chatModel?.model ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
if (
|
||||
!body.chatModel?.customOpenAIBaseURL ||
|
||||
!body.chatModel?.customOpenAIKey
|
||||
) {
|
||||
return res
|
||||
.status(400)
|
||||
.json({ message: 'Missing custom OpenAI base URL or key' });
|
||||
}
|
||||
|
||||
llm = new ChatOpenAI({
|
||||
modelName: body.chatModel.model,
|
||||
openAIApiKey: body.chatModel.customOpenAIKey,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: body.chatModel.customOpenAIBaseURL,
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return res.status(400).json({ message: 'Invalid model selected' });
|
||||
}
|
||||
|
||||
const videos = await handleVideoSearch(
|
||||
{ chat_history: chatHistory, query: body.query },
|
||||
llm,
|
||||
);
|
||||
|
||||
res.status(200).json({ videos });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in video search: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
486
src/search/metaSearchAgent.ts
Normal file
486
src/search/metaSearchAgent.ts
Normal file
@ -0,0 +1,486 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { Embeddings } from '@langchain/core/embeddings';
|
||||
import {
|
||||
ChatPromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
PromptTemplate,
|
||||
} from '@langchain/core/prompts';
|
||||
import {
|
||||
RunnableLambda,
|
||||
RunnableMap,
|
||||
RunnableSequence,
|
||||
} from '@langchain/core/runnables';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
|
||||
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
|
||||
import { getDocumentsFromLinks } from '../utils/documents';
|
||||
import { Document } from 'langchain/document';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import eventEmitter from 'events';
|
||||
import { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import { IterableReadableStream } from '@langchain/core/utils/stream';
|
||||
|
||||
export interface MetaSearchAgentType {
|
||||
searchAndAnswer: (
|
||||
message: string,
|
||||
history: BaseMessage[],
|
||||
llm: BaseChatModel,
|
||||
embeddings: Embeddings,
|
||||
optimizationMode: 'speed' | 'balanced' | 'quality',
|
||||
fileIds: string[],
|
||||
) => Promise<eventEmitter>;
|
||||
}
|
||||
|
||||
interface Config {
|
||||
searchWeb: boolean;
|
||||
rerank: boolean;
|
||||
summarizer: boolean;
|
||||
rerankThreshold: number;
|
||||
queryGeneratorPrompt: string;
|
||||
responsePrompt: string;
|
||||
activeEngines: string[];
|
||||
}
|
||||
|
||||
type BasicChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
class MetaSearchAgent implements MetaSearchAgentType {
|
||||
private config: Config;
|
||||
private strParser = new StringOutputParser();
|
||||
|
||||
constructor(config: Config) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
private async createSearchRetrieverChain(llm: BaseChatModel) {
|
||||
(llm as unknown as ChatOpenAI).temperature = 0;
|
||||
|
||||
return RunnableSequence.from([
|
||||
PromptTemplate.fromTemplate(this.config.queryGeneratorPrompt),
|
||||
llm,
|
||||
this.strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
const linksOutputParser = new LineListOutputParser({
|
||||
key: 'links',
|
||||
});
|
||||
|
||||
const questionOutputParser = new LineOutputParser({
|
||||
key: 'question',
|
||||
});
|
||||
|
||||
const links = await linksOutputParser.parse(input);
|
||||
let question = this.config.summarizer
|
||||
? await questionOutputParser.parse(input)
|
||||
: input;
|
||||
|
||||
if (question === 'not_needed') {
|
||||
return { query: '', docs: [] };
|
||||
}
|
||||
|
||||
if (links.length > 0) {
|
||||
if (question.length === 0) {
|
||||
question = 'summarize';
|
||||
}
|
||||
|
||||
let docs = [];
|
||||
|
||||
const linkDocs = await getDocumentsFromLinks({ links });
|
||||
|
||||
const docGroups: Document[] = [];
|
||||
|
||||
linkDocs.map((doc) => {
|
||||
const URLDocExists = docGroups.find(
|
||||
(d) =>
|
||||
d.metadata.url === doc.metadata.url &&
|
||||
d.metadata.totalDocs < 10,
|
||||
);
|
||||
|
||||
if (!URLDocExists) {
|
||||
docGroups.push({
|
||||
...doc,
|
||||
metadata: {
|
||||
...doc.metadata,
|
||||
totalDocs: 1,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const docIndex = docGroups.findIndex(
|
||||
(d) =>
|
||||
d.metadata.url === doc.metadata.url &&
|
||||
d.metadata.totalDocs < 10,
|
||||
);
|
||||
|
||||
if (docIndex !== -1) {
|
||||
docGroups[docIndex].pageContent =
|
||||
docGroups[docIndex].pageContent + `\n\n` + doc.pageContent;
|
||||
docGroups[docIndex].metadata.totalDocs += 1;
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.all(
|
||||
docGroups.map(async (doc) => {
|
||||
const res = await llm.invoke(`
|
||||
You are a web search summarizer, tasked with summarizing a piece of text retrieved from a web search. Your job is to summarize the
|
||||
text into a detailed, 2-4 paragraph explanation that captures the main ideas and provides a comprehensive answer to the query.
|
||||
If the query is \"summarize\", you should provide a detailed summary of the text. If the query is a specific question, you should answer it in the summary.
|
||||
|
||||
- **Journalistic tone**: The summary should sound professional and journalistic, not too casual or vague.
|
||||
- **Thorough and detailed**: Ensure that every key point from the text is captured and that the summary directly answers the query.
|
||||
- **Not too lengthy, but detailed**: The summary should be informative but not excessively long. Focus on providing detailed information in a concise format.
|
||||
|
||||
The text will be shared inside the \`text\` XML tag, and the query inside the \`query\` XML tag.
|
||||
|
||||
<example>
|
||||
1. \`<text>
|
||||
Docker is a set of platform-as-a-service products that use OS-level virtualization to deliver software in packages called containers.
|
||||
It was first released in 2013 and is developed by Docker, Inc. Docker is designed to make it easier to create, deploy, and run applications
|
||||
by using containers.
|
||||
</text>
|
||||
|
||||
<query>
|
||||
What is Docker and how does it work?
|
||||
</query>
|
||||
|
||||
Response:
|
||||
Docker is a revolutionary platform-as-a-service product developed by Docker, Inc., that uses container technology to make application
|
||||
deployment more efficient. It allows developers to package their software with all necessary dependencies, making it easier to run in
|
||||
any environment. Released in 2013, Docker has transformed the way applications are built, deployed, and managed.
|
||||
\`
|
||||
2. \`<text>
|
||||
The theory of relativity, or simply relativity, encompasses two interrelated theories of Albert Einstein: special relativity and general
|
||||
relativity. However, the word "relativity" is sometimes used in reference to Galilean invariance. The term "theory of relativity" was based
|
||||
on the expression "relative theory" used by Max Planck in 1906. The theory of relativity usually encompasses two interrelated theories by
|
||||
Albert Einstein: special relativity and general relativity. Special relativity applies to all physical phenomena in the absence of gravity.
|
||||
General relativity explains the law of gravitation and its relation to other forces of nature. It applies to the cosmological and astrophysical
|
||||
realm, including astronomy.
|
||||
</text>
|
||||
|
||||
<query>
|
||||
summarize
|
||||
</query>
|
||||
|
||||
Response:
|
||||
The theory of relativity, developed by Albert Einstein, encompasses two main theories: special relativity and general relativity. Special
|
||||
relativity applies to all physical phenomena in the absence of gravity, while general relativity explains the law of gravitation and its
|
||||
relation to other forces of nature. The theory of relativity is based on the concept of "relative theory," as introduced by Max Planck in
|
||||
1906. It is a fundamental theory in physics that has revolutionized our understanding of the universe.
|
||||
\`
|
||||
</example>
|
||||
|
||||
Everything below is the actual data you will be working with. Good luck!
|
||||
|
||||
<query>
|
||||
${question}
|
||||
</query>
|
||||
|
||||
<text>
|
||||
${doc.pageContent}
|
||||
</text>
|
||||
|
||||
Make sure to answer the query in the summary.
|
||||
`);
|
||||
|
||||
const document = new Document({
|
||||
pageContent: res.content as string,
|
||||
metadata: {
|
||||
title: doc.metadata.title,
|
||||
url: doc.metadata.url,
|
||||
},
|
||||
});
|
||||
|
||||
docs.push(document);
|
||||
}),
|
||||
);
|
||||
|
||||
return { query: question, docs: docs };
|
||||
} else {
|
||||
const res = await searchSearxng(question, {
|
||||
language: 'en',
|
||||
engines: this.config.activeEngines,
|
||||
});
|
||||
|
||||
const documents = res.results.map(
|
||||
(result) =>
|
||||
new Document({
|
||||
pageContent: result.content,
|
||||
metadata: {
|
||||
title: result.title,
|
||||
url: result.url,
|
||||
...(result.img_src && { img_src: result.img_src }),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
return { query: question, docs: documents };
|
||||
}
|
||||
}),
|
||||
]);
|
||||
}
|
||||
|
||||
private async createAnsweringChain(
|
||||
llm: BaseChatModel,
|
||||
fileIds: string[],
|
||||
embeddings: Embeddings,
|
||||
optimizationMode: 'speed' | 'balanced' | 'quality',
|
||||
) {
|
||||
return RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
query: (input: BasicChainInput) => input.query,
|
||||
chat_history: (input: BasicChainInput) => input.chat_history,
|
||||
context: RunnableLambda.from(async (input: BasicChainInput) => {
|
||||
const processedHistory = formatChatHistoryAsString(
|
||||
input.chat_history,
|
||||
);
|
||||
|
||||
let docs: Document[] | null = null;
|
||||
let query = input.query;
|
||||
|
||||
if (this.config.searchWeb) {
|
||||
const searchRetrieverChain =
|
||||
await this.createSearchRetrieverChain(llm);
|
||||
|
||||
const searchRetrieverResult = await searchRetrieverChain.invoke({
|
||||
chat_history: processedHistory,
|
||||
query,
|
||||
});
|
||||
|
||||
query = searchRetrieverResult.query;
|
||||
docs = searchRetrieverResult.docs;
|
||||
}
|
||||
|
||||
const sortedDocs = await this.rerankDocs(
|
||||
query,
|
||||
docs ?? [],
|
||||
fileIds,
|
||||
embeddings,
|
||||
optimizationMode,
|
||||
);
|
||||
|
||||
return sortedDocs;
|
||||
})
|
||||
.withConfig({
|
||||
runName: 'FinalSourceRetriever',
|
||||
})
|
||||
.pipe(this.processDocs),
|
||||
}),
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', this.config.responsePrompt],
|
||||
new MessagesPlaceholder('chat_history'),
|
||||
['user', '{query}'],
|
||||
]),
|
||||
llm,
|
||||
this.strParser,
|
||||
]).withConfig({
|
||||
runName: 'FinalResponseGenerator',
|
||||
});
|
||||
}
|
||||
|
||||
private async rerankDocs(
|
||||
query: string,
|
||||
docs: Document[],
|
||||
fileIds: string[],
|
||||
embeddings: Embeddings,
|
||||
optimizationMode: 'speed' | 'balanced' | 'quality',
|
||||
) {
|
||||
if (docs.length === 0 && fileIds.length === 0) {
|
||||
return docs;
|
||||
}
|
||||
|
||||
const filesData = fileIds
|
||||
.map((file) => {
|
||||
const filePath = path.join(process.cwd(), 'uploads', file);
|
||||
|
||||
const contentPath = filePath + '-extracted.json';
|
||||
const embeddingsPath = filePath + '-embeddings.json';
|
||||
|
||||
const content = JSON.parse(fs.readFileSync(contentPath, 'utf8'));
|
||||
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
|
||||
|
||||
const fileSimilaritySearchObject = content.contents.map(
|
||||
(c: string, i) => {
|
||||
return {
|
||||
fileName: content.title,
|
||||
content: c,
|
||||
embeddings: embeddings.embeddings[i],
|
||||
};
|
||||
},
|
||||
);
|
||||
|
||||
return fileSimilaritySearchObject;
|
||||
})
|
||||
.flat();
|
||||
|
||||
if (query.toLocaleLowerCase() === 'summarize') {
|
||||
return docs.slice(0, 15);
|
||||
}
|
||||
|
||||
const docsWithContent = docs.filter(
|
||||
(doc) => doc.pageContent && doc.pageContent.length > 0,
|
||||
);
|
||||
|
||||
if (optimizationMode === 'speed' || this.config.rerank === false) {
|
||||
if (filesData.length > 0) {
|
||||
const [queryEmbedding] = await Promise.all([
|
||||
embeddings.embedQuery(query),
|
||||
]);
|
||||
|
||||
const fileDocs = filesData.map((fileData) => {
|
||||
return new Document({
|
||||
pageContent: fileData.content,
|
||||
metadata: {
|
||||
title: fileData.fileName,
|
||||
url: `File`,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
const similarity = filesData.map((fileData, i) => {
|
||||
const sim = computeSimilarity(queryEmbedding, fileData.embeddings);
|
||||
|
||||
return {
|
||||
index: i,
|
||||
similarity: sim,
|
||||
};
|
||||
});
|
||||
|
||||
let sortedDocs = similarity
|
||||
.filter(
|
||||
(sim) => sim.similarity > (this.config.rerankThreshold ?? 0.3),
|
||||
)
|
||||
.sort((a, b) => b.similarity - a.similarity)
|
||||
.slice(0, 15)
|
||||
.map((sim) => fileDocs[sim.index]);
|
||||
|
||||
sortedDocs =
|
||||
docsWithContent.length > 0 ? sortedDocs.slice(0, 8) : sortedDocs;
|
||||
|
||||
return [
|
||||
...sortedDocs,
|
||||
...docsWithContent.slice(0, 15 - sortedDocs.length),
|
||||
];
|
||||
} else {
|
||||
return docsWithContent.slice(0, 15);
|
||||
}
|
||||
} else if (optimizationMode === 'balanced') {
|
||||
const [docEmbeddings, queryEmbedding] = await Promise.all([
|
||||
embeddings.embedDocuments(
|
||||
docsWithContent.map((doc) => doc.pageContent),
|
||||
),
|
||||
embeddings.embedQuery(query),
|
||||
]);
|
||||
|
||||
docsWithContent.push(
|
||||
...filesData.map((fileData) => {
|
||||
return new Document({
|
||||
pageContent: fileData.content,
|
||||
metadata: {
|
||||
title: fileData.fileName,
|
||||
url: `File`,
|
||||
},
|
||||
});
|
||||
}),
|
||||
);
|
||||
|
||||
docEmbeddings.push(...filesData.map((fileData) => fileData.embeddings));
|
||||
|
||||
const similarity = docEmbeddings.map((docEmbedding, i) => {
|
||||
const sim = computeSimilarity(queryEmbedding, docEmbedding);
|
||||
|
||||
return {
|
||||
index: i,
|
||||
similarity: sim,
|
||||
};
|
||||
});
|
||||
|
||||
const sortedDocs = similarity
|
||||
.filter((sim) => sim.similarity > (this.config.rerankThreshold ?? 0.3))
|
||||
.sort((a, b) => b.similarity - a.similarity)
|
||||
.slice(0, 15)
|
||||
.map((sim) => docsWithContent[sim.index]);
|
||||
|
||||
return sortedDocs;
|
||||
}
|
||||
}
|
||||
|
||||
private processDocs(docs: Document[]) {
|
||||
return docs
|
||||
.map((_, index) => `${index + 1}. ${docs[index].pageContent}`)
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
private async handleStream(
|
||||
stream: IterableReadableStream<StreamEvent>,
|
||||
emitter: eventEmitter,
|
||||
) {
|
||||
for await (const event of stream) {
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalSourceRetriever'
|
||||
) {
|
||||
``;
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'sources', data: event.data.output }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_stream' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit(
|
||||
'data',
|
||||
JSON.stringify({ type: 'response', data: event.data.chunk }),
|
||||
);
|
||||
}
|
||||
if (
|
||||
event.event === 'on_chain_end' &&
|
||||
event.name === 'FinalResponseGenerator'
|
||||
) {
|
||||
emitter.emit('end');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async searchAndAnswer(
|
||||
message: string,
|
||||
history: BaseMessage[],
|
||||
llm: BaseChatModel,
|
||||
embeddings: Embeddings,
|
||||
optimizationMode: 'speed' | 'balanced' | 'quality',
|
||||
fileIds: string[],
|
||||
) {
|
||||
const emitter = new eventEmitter();
|
||||
|
||||
const answeringChain = await this.createAnsweringChain(
|
||||
llm,
|
||||
fileIds,
|
||||
embeddings,
|
||||
optimizationMode,
|
||||
);
|
||||
|
||||
const stream = answeringChain.streamEvents(
|
||||
{
|
||||
chat_history: history,
|
||||
query: message,
|
||||
},
|
||||
{
|
||||
version: 'v1',
|
||||
},
|
||||
);
|
||||
|
||||
this.handleStream(stream, emitter);
|
||||
|
||||
return emitter;
|
||||
}
|
||||
}
|
||||
|
||||
export default MetaSearchAgent;
|
@ -1,10 +1,13 @@
|
||||
import dot from 'compute-dot';
|
||||
import cosineSimilarity from 'compute-cosine-similarity';
|
||||
import { getSimilarityMeasure } from '../config';
|
||||
|
||||
const computeSimilarity = (x: number[], y: number[]): number => {
|
||||
if (process.env.SIMILARITY_MEASURE === 'cosine') {
|
||||
const similarityMeasure = getSimilarityMeasure();
|
||||
|
||||
if (similarityMeasure === 'cosine') {
|
||||
return cosineSimilarity(x, y);
|
||||
} else if (process.env.SIMILARITY_MEASURE === 'dot') {
|
||||
} else if (similarityMeasure === 'dot') {
|
||||
return dot(x, y);
|
||||
}
|
||||
|
||||
|
99
src/utils/documents.ts
Normal file
99
src/utils/documents.ts
Normal file
@ -0,0 +1,99 @@
|
||||
import axios from 'axios';
|
||||
import { htmlToText } from 'html-to-text';
|
||||
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import pdfParse from 'pdf-parse';
|
||||
import logger from './logger';
|
||||
|
||||
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
||||
const splitter = new RecursiveCharacterTextSplitter();
|
||||
|
||||
let docs: Document[] = [];
|
||||
|
||||
await Promise.all(
|
||||
links.map(async (link) => {
|
||||
link =
|
||||
link.startsWith('http://') || link.startsWith('https://')
|
||||
? link
|
||||
: `https://${link}`;
|
||||
|
||||
try {
|
||||
const res = await axios.get(link, {
|
||||
responseType: 'arraybuffer',
|
||||
});
|
||||
|
||||
const isPdf = res.headers['content-type'] === 'application/pdf';
|
||||
|
||||
if (isPdf) {
|
||||
const pdfText = await pdfParse(res.data);
|
||||
const parsedText = pdfText.text
|
||||
.replace(/(\r\n|\n|\r)/gm, ' ')
|
||||
.replace(/\s+/g, ' ')
|
||||
.trim();
|
||||
|
||||
const splittedText = await splitter.splitText(parsedText);
|
||||
const title = 'PDF Document';
|
||||
|
||||
const linkDocs = splittedText.map((text) => {
|
||||
return new Document({
|
||||
pageContent: text,
|
||||
metadata: {
|
||||
title: title,
|
||||
url: link,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
docs.push(...linkDocs);
|
||||
return;
|
||||
}
|
||||
|
||||
const parsedText = htmlToText(res.data.toString('utf8'), {
|
||||
selectors: [
|
||||
{
|
||||
selector: 'a',
|
||||
options: {
|
||||
ignoreHref: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
})
|
||||
.replace(/(\r\n|\n|\r)/gm, ' ')
|
||||
.replace(/\s+/g, ' ')
|
||||
.trim();
|
||||
|
||||
const splittedText = await splitter.splitText(parsedText);
|
||||
const title = res.data
|
||||
.toString('utf8')
|
||||
.match(/<title>(.*?)<\/title>/)?.[1];
|
||||
|
||||
const linkDocs = splittedText.map((text) => {
|
||||
return new Document({
|
||||
pageContent: text,
|
||||
metadata: {
|
||||
title: title || link,
|
||||
url: link,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
docs.push(...linkDocs);
|
||||
} catch (err) {
|
||||
logger.error(
|
||||
`Error at generating documents from links: ${err.message}`,
|
||||
);
|
||||
docs.push(
|
||||
new Document({
|
||||
pageContent: `Failed to retrieve content from the link: ${err.message}`,
|
||||
metadata: {
|
||||
title: 'Failed to retrieve content',
|
||||
url: link,
|
||||
},
|
||||
}),
|
||||
);
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
return docs;
|
||||
};
|
16
src/utils/files.ts
Normal file
16
src/utils/files.ts
Normal file
@ -0,0 +1,16 @@
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
export const getFileDetails = (fileId: string) => {
|
||||
const fileLoc = path.join(
|
||||
process.cwd(),
|
||||
'./uploads',
|
||||
fileId + '-extracted.json',
|
||||
);
|
||||
|
||||
const parsedFile = JSON.parse(fs.readFileSync(fileLoc, 'utf8'));
|
||||
|
||||
return {
|
||||
name: parsedFile.title,
|
||||
fileId: fileId,
|
||||
};
|
||||
};
|
22
src/utils/logger.ts
Normal file
22
src/utils/logger.ts
Normal file
@ -0,0 +1,22 @@
|
||||
import winston from 'winston';
|
||||
|
||||
const logger = winston.createLogger({
|
||||
level: 'info',
|
||||
transports: [
|
||||
new winston.transports.Console({
|
||||
format: winston.format.combine(
|
||||
winston.format.colorize(),
|
||||
winston.format.simple(),
|
||||
),
|
||||
}),
|
||||
new winston.transports.File({
|
||||
filename: 'app.log',
|
||||
format: winston.format.combine(
|
||||
winston.format.timestamp(),
|
||||
winston.format.json(),
|
||||
),
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
export default logger;
|
@ -1,11 +1,111 @@
|
||||
import { WebSocket } from 'ws';
|
||||
import { handleMessage } from './messageHandler';
|
||||
import {
|
||||
getAvailableEmbeddingModelProviders,
|
||||
getAvailableChatModelProviders,
|
||||
} from '../lib/providers';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { Embeddings } from '@langchain/core/embeddings';
|
||||
import type { IncomingMessage } from 'http';
|
||||
import logger from '../utils/logger';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
|
||||
export const handleConnection = async (
|
||||
ws: WebSocket,
|
||||
request: IncomingMessage,
|
||||
) => {
|
||||
try {
|
||||
const searchParams = new URL(request.url, `http://${request.headers.host}`)
|
||||
.searchParams;
|
||||
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
const chatModelProvider =
|
||||
searchParams.get('chatModelProvider') ||
|
||||
Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
searchParams.get('chatModel') ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
const embeddingModelProvider =
|
||||
searchParams.get('embeddingModelProvider') ||
|
||||
Object.keys(embeddingModelProviders)[0];
|
||||
const embeddingModel =
|
||||
searchParams.get('embeddingModel') ||
|
||||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
let embeddings: Embeddings | undefined;
|
||||
|
||||
if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel] &&
|
||||
chatModelProvider != 'custom_openai'
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
} else if (chatModelProvider == 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
modelName: chatModel,
|
||||
openAIApiKey: searchParams.get('openAIApiKey'),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: searchParams.get('openAIBaseURL'),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
}
|
||||
|
||||
if (
|
||||
embeddingModelProviders[embeddingModelProvider] &&
|
||||
embeddingModelProviders[embeddingModelProvider][embeddingModel]
|
||||
) {
|
||||
embeddings = embeddingModelProviders[embeddingModelProvider][
|
||||
embeddingModel
|
||||
].model as Embeddings | undefined;
|
||||
}
|
||||
|
||||
if (!llm || !embeddings) {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Invalid LLM or embeddings model selected, please refresh the page and try again.',
|
||||
key: 'INVALID_MODEL_SELECTED',
|
||||
}),
|
||||
);
|
||||
ws.close();
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'signal',
|
||||
data: 'open',
|
||||
}),
|
||||
);
|
||||
clearInterval(interval);
|
||||
}
|
||||
}, 5);
|
||||
|
||||
export const handleConnection = (ws: WebSocket) => {
|
||||
ws.on(
|
||||
'message',
|
||||
async (message) => await handleMessage(message.toString(), ws),
|
||||
async (message) =>
|
||||
await handleMessage(message.toString(), ws, llm, embeddings),
|
||||
);
|
||||
|
||||
ws.on('close', () => console.log('Connection closed'));
|
||||
ws.on('close', () => logger.debug('Connection closed'));
|
||||
} catch (err) {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Internal server error.',
|
||||
key: 'INTERNAL_SERVER_ERROR',
|
||||
}),
|
||||
);
|
||||
ws.close();
|
||||
logger.error(err);
|
||||
}
|
||||
};
|
||||
|
@ -1,34 +1,99 @@
|
||||
import { EventEmitter, WebSocket } from 'ws';
|
||||
import { BaseMessage, AIMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import handleWebSearch from '../agents/webSearchAgent';
|
||||
import handleAcademicSearch from '../agents/academicSearchAgent';
|
||||
import handleWritingAssistant from '../agents/writingAssistant';
|
||||
import handleWolframAlphaSearch from '../agents/wolframAlphaSearchAgent';
|
||||
import handleYoutubeSearch from '../agents/youtubeSearchAgent';
|
||||
import handleRedditSearch from '../agents/redditSearchAgent';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { Embeddings } from '@langchain/core/embeddings';
|
||||
import logger from '../utils/logger';
|
||||
import db from '../db';
|
||||
import { chats, messages as messagesSchema } from '../db/schema';
|
||||
import { eq, asc, gt } from 'drizzle-orm';
|
||||
import crypto from 'crypto';
|
||||
import { getFileDetails } from '../utils/files';
|
||||
import MetaSearchAgent, {
|
||||
MetaSearchAgentType,
|
||||
} from '../search/metaSearchAgent';
|
||||
import prompts from '../prompts';
|
||||
|
||||
type Message = {
|
||||
type: string;
|
||||
messageId: string;
|
||||
chatId: string;
|
||||
content: string;
|
||||
copilot: boolean;
|
||||
focusMode: string;
|
||||
history: Array<[string, string]>;
|
||||
};
|
||||
|
||||
const searchHandlers = {
|
||||
webSearch: handleWebSearch,
|
||||
academicSearch: handleAcademicSearch,
|
||||
writingAssistant: handleWritingAssistant,
|
||||
wolframAlphaSearch: handleWolframAlphaSearch,
|
||||
youtubeSearch: handleYoutubeSearch,
|
||||
redditSearch: handleRedditSearch,
|
||||
type WSMessage = {
|
||||
message: Message;
|
||||
optimizationMode: 'speed' | 'balanced' | 'quality';
|
||||
type: string;
|
||||
focusMode: string;
|
||||
history: Array<[string, string]>;
|
||||
files: Array<string>;
|
||||
};
|
||||
|
||||
export const searchHandlers = {
|
||||
webSearch: new MetaSearchAgent({
|
||||
activeEngines: [],
|
||||
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.webSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: true,
|
||||
}),
|
||||
academicSearch: new MetaSearchAgent({
|
||||
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
|
||||
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.academicSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
writingAssistant: new MetaSearchAgent({
|
||||
activeEngines: [],
|
||||
queryGeneratorPrompt: '',
|
||||
responsePrompt: prompts.writingAssistantPrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: false,
|
||||
summarizer: false,
|
||||
}),
|
||||
wolframAlphaSearch: new MetaSearchAgent({
|
||||
activeEngines: ['wolframalpha'],
|
||||
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
|
||||
rerank: false,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
youtubeSearch: new MetaSearchAgent({
|
||||
activeEngines: ['youtube'],
|
||||
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.youtubeSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
redditSearch: new MetaSearchAgent({
|
||||
activeEngines: ['reddit'],
|
||||
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.redditSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
};
|
||||
|
||||
const handleEmitterEvents = (
|
||||
emitter: EventEmitter,
|
||||
ws: WebSocket,
|
||||
id: string,
|
||||
messageId: string,
|
||||
chatId: string,
|
||||
) => {
|
||||
let recievedMessage = '';
|
||||
let sources = [];
|
||||
|
||||
emitter.on('data', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
if (parsedData.type === 'response') {
|
||||
@ -36,39 +101,73 @@ const handleEmitterEvents = (
|
||||
JSON.stringify({
|
||||
type: 'message',
|
||||
data: parsedData.data,
|
||||
messageId: id,
|
||||
messageId: messageId,
|
||||
}),
|
||||
);
|
||||
recievedMessage += parsedData.data;
|
||||
} else if (parsedData.type === 'sources') {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'sources',
|
||||
data: parsedData.data,
|
||||
messageId: id,
|
||||
messageId: messageId,
|
||||
}),
|
||||
);
|
||||
sources = parsedData.data;
|
||||
}
|
||||
});
|
||||
emitter.on('end', () => {
|
||||
ws.send(JSON.stringify({ type: 'messageEnd', messageId: id }));
|
||||
ws.send(JSON.stringify({ type: 'messageEnd', messageId: messageId }));
|
||||
|
||||
db.insert(messagesSchema)
|
||||
.values({
|
||||
content: recievedMessage,
|
||||
chatId: chatId,
|
||||
messageId: messageId,
|
||||
role: 'assistant',
|
||||
metadata: JSON.stringify({
|
||||
createdAt: new Date(),
|
||||
...(sources && sources.length > 0 && { sources }),
|
||||
}),
|
||||
})
|
||||
.execute();
|
||||
});
|
||||
emitter.on('error', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
ws.send(JSON.stringify({ type: 'error', data: parsedData.data }));
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: parsedData.data,
|
||||
key: 'CHAIN_ERROR',
|
||||
}),
|
||||
);
|
||||
});
|
||||
};
|
||||
|
||||
export const handleMessage = async (message: string, ws: WebSocket) => {
|
||||
export const handleMessage = async (
|
||||
message: string,
|
||||
ws: WebSocket,
|
||||
llm: BaseChatModel,
|
||||
embeddings: Embeddings,
|
||||
) => {
|
||||
try {
|
||||
const parsedMessage = JSON.parse(message) as Message;
|
||||
const id = Math.random().toString(36).substring(7);
|
||||
const parsedWSMessage = JSON.parse(message) as WSMessage;
|
||||
const parsedMessage = parsedWSMessage.message;
|
||||
|
||||
const humanMessageId =
|
||||
parsedMessage.messageId ?? crypto.randomBytes(7).toString('hex');
|
||||
const aiMessageId = crypto.randomBytes(7).toString('hex');
|
||||
|
||||
if (!parsedMessage.content)
|
||||
return ws.send(
|
||||
JSON.stringify({ type: 'error', data: 'Invalid message format' }),
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Invalid message format',
|
||||
key: 'INVALID_FORMAT',
|
||||
}),
|
||||
);
|
||||
|
||||
const history: BaseMessage[] = parsedMessage.history.map((msg) => {
|
||||
const history: BaseMessage[] = parsedWSMessage.history.map((msg) => {
|
||||
if (msg[0] === 'human') {
|
||||
return new HumanMessage({
|
||||
content: msg[1],
|
||||
@ -80,17 +179,84 @@ export const handleMessage = async (message: string, ws: WebSocket) => {
|
||||
}
|
||||
});
|
||||
|
||||
if (parsedMessage.type === 'message') {
|
||||
const handler = searchHandlers[parsedMessage.focusMode];
|
||||
if (parsedWSMessage.type === 'message') {
|
||||
const handler: MetaSearchAgentType =
|
||||
searchHandlers[parsedWSMessage.focusMode];
|
||||
|
||||
if (handler) {
|
||||
const emitter = handler(parsedMessage.content, history);
|
||||
handleEmitterEvents(emitter, ws, id);
|
||||
try {
|
||||
const emitter = await handler.searchAndAnswer(
|
||||
parsedMessage.content,
|
||||
history,
|
||||
llm,
|
||||
embeddings,
|
||||
parsedWSMessage.optimizationMode,
|
||||
parsedWSMessage.files,
|
||||
);
|
||||
|
||||
handleEmitterEvents(emitter, ws, aiMessageId, parsedMessage.chatId);
|
||||
|
||||
const chat = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, parsedMessage.chatId),
|
||||
});
|
||||
|
||||
if (!chat) {
|
||||
await db
|
||||
.insert(chats)
|
||||
.values({
|
||||
id: parsedMessage.chatId,
|
||||
title: parsedMessage.content,
|
||||
createdAt: new Date().toString(),
|
||||
focusMode: parsedWSMessage.focusMode,
|
||||
files: parsedWSMessage.files.map(getFileDetails),
|
||||
})
|
||||
.execute();
|
||||
}
|
||||
|
||||
const messageExists = await db.query.messages.findFirst({
|
||||
where: eq(messagesSchema.messageId, humanMessageId),
|
||||
});
|
||||
|
||||
if (!messageExists) {
|
||||
await db
|
||||
.insert(messagesSchema)
|
||||
.values({
|
||||
content: parsedMessage.content,
|
||||
chatId: parsedMessage.chatId,
|
||||
messageId: humanMessageId,
|
||||
role: 'user',
|
||||
metadata: JSON.stringify({
|
||||
createdAt: new Date(),
|
||||
}),
|
||||
})
|
||||
.execute();
|
||||
} else {
|
||||
ws.send(JSON.stringify({ type: 'error', data: 'Invalid focus mode' }));
|
||||
await db
|
||||
.delete(messagesSchema)
|
||||
.where(gt(messagesSchema.id, messageExists.id))
|
||||
.execute();
|
||||
}
|
||||
} catch (err) {
|
||||
console.log(err);
|
||||
}
|
||||
} else {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Invalid focus mode',
|
||||
key: 'INVALID_FOCUS_MODE',
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to handle message', error);
|
||||
ws.send(JSON.stringify({ type: 'error', data: 'Invalid message format' }));
|
||||
} catch (err) {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Invalid message format',
|
||||
key: 'INVALID_FORMAT',
|
||||
}),
|
||||
);
|
||||
logger.error(`Failed to handle message: ${err}`);
|
||||
}
|
||||
};
|
||||
|
@ -1,15 +1,16 @@
|
||||
import { WebSocketServer } from 'ws';
|
||||
import { handleConnection } from './connectionManager';
|
||||
import http from 'http';
|
||||
import { getPort } from '../config';
|
||||
import logger from '../utils/logger';
|
||||
|
||||
export const initServer = (
|
||||
server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
|
||||
) => {
|
||||
const port = getPort();
|
||||
const wss = new WebSocketServer({ server });
|
||||
|
||||
wss.on('connection', (ws) => {
|
||||
handleConnection(ws);
|
||||
});
|
||||
wss.on('connection', handleConnection);
|
||||
|
||||
console.log(`WebSocket server started on port ${process.env.PORT}`);
|
||||
logger.info(`WebSocket server started on port ${port}`);
|
||||
};
|
||||
|
@ -1,7 +1,8 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"lib": ["ESNext"],
|
||||
"module": "commonjs",
|
||||
"module": "Node16",
|
||||
"moduleResolution": "Node16",
|
||||
"target": "ESNext",
|
||||
"outDir": "dist",
|
||||
"sourceMap": false,
|
||||
|
7
ui/app/c/[chatId]/page.tsx
Normal file
7
ui/app/c/[chatId]/page.tsx
Normal file
@ -0,0 +1,7 @@
|
||||
import ChatWindow from '@/components/ChatWindow';
|
||||
|
||||
const Page = ({ params }: { params: { chatId: string } }) => {
|
||||
return <ChatWindow id={params.chatId} />;
|
||||
};
|
||||
|
||||
export default Page;
|
@ -1,5 +1,113 @@
|
||||
'use client';
|
||||
|
||||
import { Search } from 'lucide-react';
|
||||
import { useEffect, useState } from 'react';
|
||||
import Link from 'next/link';
|
||||
import { toast } from 'sonner';
|
||||
|
||||
interface Discover {
|
||||
title: string;
|
||||
content: string;
|
||||
url: string;
|
||||
thumbnail: string;
|
||||
}
|
||||
|
||||
const Page = () => {
|
||||
return <div>page</div>;
|
||||
const [discover, setDiscover] = useState<Discover[] | null>(null);
|
||||
const [loading, setLoading] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
const fetchData = async () => {
|
||||
try {
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const data = await res.json();
|
||||
|
||||
if (!res.ok) {
|
||||
throw new Error(data.message);
|
||||
}
|
||||
|
||||
data.blogs = data.blogs.filter((blog: Discover) => blog.thumbnail);
|
||||
|
||||
setDiscover(data.blogs);
|
||||
} catch (err: any) {
|
||||
console.error('Error fetching data:', err.message);
|
||||
toast.error('Error fetching data');
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
fetchData();
|
||||
}, []);
|
||||
|
||||
return loading ? (
|
||||
<div className="flex flex-row items-center justify-center min-h-screen">
|
||||
<svg
|
||||
aria-hidden="true"
|
||||
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
|
||||
viewBox="0 0 100 101"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
|
||||
fill="currentColor"
|
||||
/>
|
||||
<path
|
||||
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
|
||||
fill="currentFill"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div>
|
||||
<div className="flex flex-col pt-4">
|
||||
<div className="flex items-center">
|
||||
<Search />
|
||||
<h1 className="text-3xl font-medium p-2">Discover</h1>
|
||||
</div>
|
||||
<hr className="border-t border-[#2B2C2C] my-4 w-full" />
|
||||
</div>
|
||||
|
||||
<div className="grid lg:grid-cols-3 sm:grid-cols-2 grid-cols-1 gap-4 pb-28 lg:pb-8 w-full justify-items-center lg:justify-items-start">
|
||||
{discover &&
|
||||
discover?.map((item, i) => (
|
||||
<Link
|
||||
href={`/?q=Summary: ${item.url}`}
|
||||
key={i}
|
||||
className="max-w-sm rounded-lg overflow-hidden bg-light-secondary dark:bg-dark-secondary hover:-translate-y-[1px] transition duration-200"
|
||||
target="_blank"
|
||||
>
|
||||
<img
|
||||
className="object-cover w-full aspect-video"
|
||||
src={
|
||||
new URL(item.thumbnail).origin +
|
||||
new URL(item.thumbnail).pathname +
|
||||
`?id=${new URL(item.thumbnail).searchParams.get('id')}`
|
||||
}
|
||||
alt={item.title}
|
||||
/>
|
||||
<div className="px-6 py-4">
|
||||
<div className="font-bold text-lg mb-2">
|
||||
{item.title.slice(0, 100)}...
|
||||
</div>
|
||||
<p className="text-black-70 dark:text-white/70 text-sm">
|
||||
{item.content.slice(0, 100)}...
|
||||
</p>
|
||||
</div>
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default Page;
|
||||
|
@ -3,6 +3,8 @@ import { Montserrat } from 'next/font/google';
|
||||
import './globals.css';
|
||||
import { cn } from '@/lib/utils';
|
||||
import Sidebar from '@/components/Sidebar';
|
||||
import { Toaster } from 'sonner';
|
||||
import ThemeProvider from '@/components/theme/Provider';
|
||||
|
||||
const montserrat = Montserrat({
|
||||
weight: ['300', '400', '500', '700'],
|
||||
@ -23,9 +25,20 @@ export default function RootLayout({
|
||||
children: React.ReactNode;
|
||||
}>) {
|
||||
return (
|
||||
<html className="h-full" lang="en">
|
||||
<html className="h-full" lang="en" suppressHydrationWarning>
|
||||
<body className={cn('h-full', montserrat.className)}>
|
||||
<ThemeProvider>
|
||||
<Sidebar>{children}</Sidebar>
|
||||
<Toaster
|
||||
toastOptions={{
|
||||
unstyled: true,
|
||||
classNames: {
|
||||
toast:
|
||||
'bg-light-primary dark:bg-dark-secondary dark:text-white/70 text-black-70 rounded-lg p-4 flex flex-row items-center space-x-2',
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</ThemeProvider>
|
||||
</body>
|
||||
</html>
|
||||
);
|
||||
|
12
ui/app/library/layout.tsx
Normal file
12
ui/app/library/layout.tsx
Normal file
@ -0,0 +1,12 @@
|
||||
import { Metadata } from 'next';
|
||||
import React from 'react';
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: 'Library - Perplexica',
|
||||
};
|
||||
|
||||
const Layout = ({ children }: { children: React.ReactNode }) => {
|
||||
return <div>{children}</div>;
|
||||
};
|
||||
|
||||
export default Layout;
|
114
ui/app/library/page.tsx
Normal file
114
ui/app/library/page.tsx
Normal file
@ -0,0 +1,114 @@
|
||||
'use client';
|
||||
|
||||
import DeleteChat from '@/components/DeleteChat';
|
||||
import { cn, formatTimeDifference } from '@/lib/utils';
|
||||
import { BookOpenText, ClockIcon, Delete, ScanEye } from 'lucide-react';
|
||||
import Link from 'next/link';
|
||||
import { useEffect, useState } from 'react';
|
||||
|
||||
export interface Chat {
|
||||
id: string;
|
||||
title: string;
|
||||
createdAt: string;
|
||||
focusMode: string;
|
||||
}
|
||||
|
||||
const Page = () => {
|
||||
const [chats, setChats] = useState<Chat[]>([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
const fetchChats = async () => {
|
||||
setLoading(true);
|
||||
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const data = await res.json();
|
||||
|
||||
setChats(data.chats);
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
fetchChats();
|
||||
}, []);
|
||||
|
||||
return loading ? (
|
||||
<div className="flex flex-row items-center justify-center min-h-screen">
|
||||
<svg
|
||||
aria-hidden="true"
|
||||
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
|
||||
viewBox="0 0 100 101"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
|
||||
fill="currentColor"
|
||||
/>
|
||||
<path
|
||||
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
|
||||
fill="currentFill"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
) : (
|
||||
<div>
|
||||
<div className="flex flex-col pt-4">
|
||||
<div className="flex items-center">
|
||||
<BookOpenText />
|
||||
<h1 className="text-3xl font-medium p-2">Library</h1>
|
||||
</div>
|
||||
<hr className="border-t border-[#2B2C2C] my-4 w-full" />
|
||||
</div>
|
||||
{chats.length === 0 && (
|
||||
<div className="flex flex-row items-center justify-center min-h-screen">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
No chats found.
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
{chats.length > 0 && (
|
||||
<div className="flex flex-col pb-20 lg:pb-2">
|
||||
{chats.map((chat, i) => (
|
||||
<div
|
||||
className={cn(
|
||||
'flex flex-col space-y-4 py-6',
|
||||
i !== chats.length - 1
|
||||
? 'border-b border-white-200 dark:border-dark-200'
|
||||
: '',
|
||||
)}
|
||||
key={i}
|
||||
>
|
||||
<Link
|
||||
href={`/c/${chat.id}`}
|
||||
className="text-black dark:text-white lg:text-xl font-medium truncate transition duration-200 hover:text-[#24A0ED] dark:hover:text-[#24A0ED] cursor-pointer"
|
||||
>
|
||||
{chat.title}
|
||||
</Link>
|
||||
<div className="flex flex-row items-center justify-between w-full">
|
||||
<div className="flex flex-row items-center space-x-1 lg:space-x-1.5 text-black/70 dark:text-white/70">
|
||||
<ClockIcon size={15} />
|
||||
<p className="text-xs">
|
||||
{formatTimeDifference(new Date(), chat.createdAt)} Ago
|
||||
</p>
|
||||
</div>
|
||||
<DeleteChat
|
||||
chatId={chat.id}
|
||||
chats={chats}
|
||||
setChats={setChats}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Page;
|
@ -1,5 +1,6 @@
|
||||
import ChatWindow from '@/components/ChatWindow';
|
||||
import { Metadata } from 'next';
|
||||
import { Suspense } from 'react';
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: 'Chat - Perplexica',
|
||||
@ -9,7 +10,9 @@ export const metadata: Metadata = {
|
||||
const Home = () => {
|
||||
return (
|
||||
<div>
|
||||
<Suspense>
|
||||
<ChatWindow />
|
||||
</Suspense>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
@ -1,8 +1,8 @@
|
||||
'use client';
|
||||
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
import { Fragment, useEffect, useRef, useState } from 'react';
|
||||
import MessageInput from './MessageInput';
|
||||
import { Message } from './ChatWindow';
|
||||
import { File, Message } from './ChatWindow';
|
||||
import MessageBox from './MessageBox';
|
||||
import MessageBoxLoading from './MessageBoxLoading';
|
||||
|
||||
@ -12,12 +12,20 @@ const Chat = ({
|
||||
sendMessage,
|
||||
messageAppeared,
|
||||
rewrite,
|
||||
fileIds,
|
||||
setFileIds,
|
||||
files,
|
||||
setFiles,
|
||||
}: {
|
||||
messages: Message[];
|
||||
sendMessage: (message: string) => void;
|
||||
loading: boolean;
|
||||
messageAppeared: boolean;
|
||||
rewrite: (messageId: string) => void;
|
||||
fileIds: string[];
|
||||
setFileIds: (fileIds: string[]) => void;
|
||||
files: File[];
|
||||
setFiles: (files: File[]) => void;
|
||||
}) => {
|
||||
const [dividerWidth, setDividerWidth] = useState(0);
|
||||
const dividerRef = useRef<HTMLDivElement | null>(null);
|
||||
@ -53,7 +61,7 @@ const Chat = ({
|
||||
const isLast = i === messages.length - 1;
|
||||
|
||||
return (
|
||||
<>
|
||||
<Fragment key={msg.messageId}>
|
||||
<MessageBox
|
||||
key={i}
|
||||
message={msg}
|
||||
@ -63,11 +71,12 @@ const Chat = ({
|
||||
dividerRef={isLast ? dividerRef : undefined}
|
||||
isLast={isLast}
|
||||
rewrite={rewrite}
|
||||
sendMessage={sendMessage}
|
||||
/>
|
||||
{!isLast && msg.role === 'assistant' && (
|
||||
<div className="h-px w-full bg-[#1C1C1C]" />
|
||||
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
|
||||
)}
|
||||
</>
|
||||
</Fragment>
|
||||
);
|
||||
})}
|
||||
{loading && !messageAppeared && <MessageBoxLoading />}
|
||||
@ -77,7 +86,14 @@ const Chat = ({
|
||||
className="bottom-24 lg:bottom-10 fixed z-40"
|
||||
style={{ width: dividerWidth }}
|
||||
>
|
||||
<MessageInput sendMessage={sendMessage} />
|
||||
<MessageInput
|
||||
loading={loading}
|
||||
sendMessage={sendMessage}
|
||||
fileIds={fileIds}
|
||||
setFileIds={setFileIds}
|
||||
files={files}
|
||||
setFiles={setFiles}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
@ -1,50 +1,366 @@
|
||||
'use client';
|
||||
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import Navbar from './Navbar';
|
||||
import Chat from './Chat';
|
||||
import EmptyChat from './EmptyChat';
|
||||
import crypto from 'crypto';
|
||||
import { toast } from 'sonner';
|
||||
import { useSearchParams } from 'next/navigation';
|
||||
import { getSuggestions } from '@/lib/actions';
|
||||
import Error from 'next/error';
|
||||
|
||||
export type Message = {
|
||||
id: string;
|
||||
messageId: string;
|
||||
chatId: string;
|
||||
createdAt: Date;
|
||||
content: string;
|
||||
role: 'user' | 'assistant';
|
||||
suggestions?: string[];
|
||||
sources?: Document[];
|
||||
};
|
||||
|
||||
const useSocket = (url: string) => {
|
||||
export interface File {
|
||||
fileName: string;
|
||||
fileExtension: string;
|
||||
fileId: string;
|
||||
}
|
||||
|
||||
const useSocket = (
|
||||
url: string,
|
||||
setIsWSReady: (ready: boolean) => void,
|
||||
setError: (error: boolean) => void,
|
||||
) => {
|
||||
const [ws, setWs] = useState<WebSocket | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (!ws) {
|
||||
const ws = new WebSocket(url);
|
||||
ws.onopen = () => {
|
||||
console.log('[DEBUG] open');
|
||||
setWs(ws);
|
||||
};
|
||||
const connectWs = async () => {
|
||||
let chatModel = localStorage.getItem('chatModel');
|
||||
let chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||
let embeddingModel = localStorage.getItem('embeddingModel');
|
||||
let embeddingModelProvider = localStorage.getItem(
|
||||
'embeddingModelProvider',
|
||||
);
|
||||
|
||||
const providers = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/models`,
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
).then(async (res) => await res.json());
|
||||
|
||||
if (
|
||||
!chatModel ||
|
||||
!chatModelProvider ||
|
||||
!embeddingModel ||
|
||||
!embeddingModelProvider
|
||||
) {
|
||||
if (!chatModel || !chatModelProvider) {
|
||||
const chatModelProviders = providers.chatModelProviders;
|
||||
|
||||
chatModelProvider = Object.keys(chatModelProviders)[0];
|
||||
|
||||
if (chatModelProvider === 'custom_openai') {
|
||||
toast.error(
|
||||
'Seems like you are using the custom OpenAI provider, please open the settings and configure the API key and base URL',
|
||||
);
|
||||
setError(true);
|
||||
return;
|
||||
} else {
|
||||
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
if (
|
||||
!chatModelProviders ||
|
||||
Object.keys(chatModelProviders).length === 0
|
||||
)
|
||||
return toast.error('No chat models available');
|
||||
}
|
||||
}
|
||||
|
||||
return () => {
|
||||
ws?.close();
|
||||
if (!embeddingModel || !embeddingModelProvider) {
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
|
||||
if (
|
||||
!embeddingModelProviders ||
|
||||
Object.keys(embeddingModelProviders).length === 0
|
||||
)
|
||||
return toast.error('No embedding models available');
|
||||
|
||||
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
|
||||
embeddingModel = Object.keys(
|
||||
embeddingModelProviders[embeddingModelProvider],
|
||||
)[0];
|
||||
}
|
||||
|
||||
localStorage.setItem('chatModel', chatModel!);
|
||||
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||
localStorage.setItem('embeddingModel', embeddingModel!);
|
||||
localStorage.setItem(
|
||||
'embeddingModelProvider',
|
||||
embeddingModelProvider,
|
||||
);
|
||||
} else {
|
||||
const chatModelProviders = providers.chatModelProviders;
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
|
||||
if (
|
||||
Object.keys(chatModelProviders).length > 0 &&
|
||||
!chatModelProviders[chatModelProvider]
|
||||
) {
|
||||
chatModelProvider = Object.keys(chatModelProviders)[0];
|
||||
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||
}
|
||||
|
||||
if (
|
||||
chatModelProvider &&
|
||||
chatModelProvider != 'custom_openai' &&
|
||||
!chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
localStorage.setItem('chatModel', chatModel);
|
||||
}
|
||||
|
||||
if (
|
||||
Object.keys(embeddingModelProviders).length > 0 &&
|
||||
!embeddingModelProviders[embeddingModelProvider]
|
||||
) {
|
||||
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
|
||||
localStorage.setItem(
|
||||
'embeddingModelProvider',
|
||||
embeddingModelProvider,
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
embeddingModelProvider &&
|
||||
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
|
||||
) {
|
||||
embeddingModel = Object.keys(
|
||||
embeddingModelProviders[embeddingModelProvider],
|
||||
)[0];
|
||||
localStorage.setItem('embeddingModel', embeddingModel);
|
||||
}
|
||||
}
|
||||
|
||||
const wsURL = new URL(url);
|
||||
const searchParams = new URLSearchParams({});
|
||||
|
||||
searchParams.append('chatModel', chatModel!);
|
||||
searchParams.append('chatModelProvider', chatModelProvider);
|
||||
|
||||
if (chatModelProvider === 'custom_openai') {
|
||||
searchParams.append(
|
||||
'openAIApiKey',
|
||||
localStorage.getItem('openAIApiKey')!,
|
||||
);
|
||||
searchParams.append(
|
||||
'openAIBaseURL',
|
||||
localStorage.getItem('openAIBaseURL')!,
|
||||
);
|
||||
}
|
||||
|
||||
searchParams.append('embeddingModel', embeddingModel!);
|
||||
searchParams.append('embeddingModelProvider', embeddingModelProvider);
|
||||
|
||||
wsURL.search = searchParams.toString();
|
||||
|
||||
const ws = new WebSocket(wsURL.toString());
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
if (ws.readyState !== 1) {
|
||||
toast.error(
|
||||
'Failed to connect to the server. Please try again later.',
|
||||
);
|
||||
}
|
||||
}, 10000);
|
||||
|
||||
ws.addEventListener('message', (e) => {
|
||||
const data = JSON.parse(e.data);
|
||||
if (data.type === 'signal' && data.data === 'open') {
|
||||
const interval = setInterval(() => {
|
||||
if (ws.readyState === 1) {
|
||||
setIsWSReady(true);
|
||||
clearInterval(interval);
|
||||
}
|
||||
}, 5);
|
||||
clearTimeout(timeoutId);
|
||||
console.log('[DEBUG] opened');
|
||||
}
|
||||
if (data.type === 'error') {
|
||||
toast.error(data.data);
|
||||
}
|
||||
});
|
||||
|
||||
ws.onerror = () => {
|
||||
clearTimeout(timeoutId);
|
||||
setError(true);
|
||||
toast.error('WebSocket connection error.');
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
clearTimeout(timeoutId);
|
||||
setError(true);
|
||||
console.log('[DEBUG] closed');
|
||||
};
|
||||
}, [ws, url]);
|
||||
|
||||
setWs(ws);
|
||||
};
|
||||
|
||||
connectWs();
|
||||
}
|
||||
}, [ws, url, setIsWSReady, setError]);
|
||||
|
||||
return ws;
|
||||
};
|
||||
|
||||
const ChatWindow = () => {
|
||||
const ws = useSocket(process.env.NEXT_PUBLIC_WS_URL!);
|
||||
const [chatHistory, setChatHistory] = useState<[string, string][]>([]);
|
||||
const [messages, setMessages] = useState<Message[]>([]);
|
||||
const loadMessages = async (
|
||||
chatId: string,
|
||||
setMessages: (messages: Message[]) => void,
|
||||
setIsMessagesLoaded: (loaded: boolean) => void,
|
||||
setChatHistory: (history: [string, string][]) => void,
|
||||
setFocusMode: (mode: string) => void,
|
||||
setNotFound: (notFound: boolean) => void,
|
||||
setFiles: (files: File[]) => void,
|
||||
setFileIds: (fileIds: string[]) => void,
|
||||
) => {
|
||||
const res = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
if (res.status === 404) {
|
||||
setNotFound(true);
|
||||
setIsMessagesLoaded(true);
|
||||
return;
|
||||
}
|
||||
|
||||
const data = await res.json();
|
||||
|
||||
const messages = data.messages.map((msg: any) => {
|
||||
return {
|
||||
...msg,
|
||||
...JSON.parse(msg.metadata),
|
||||
};
|
||||
}) as Message[];
|
||||
|
||||
setMessages(messages);
|
||||
|
||||
const history = messages.map((msg) => {
|
||||
return [msg.role, msg.content];
|
||||
}) as [string, string][];
|
||||
|
||||
console.log('[DEBUG] messages loaded');
|
||||
|
||||
document.title = messages[0].content;
|
||||
|
||||
const files = data.chat.files.map((file: any) => {
|
||||
return {
|
||||
fileName: file.name,
|
||||
fileExtension: file.name.split('.').pop(),
|
||||
fileId: file.fileId,
|
||||
};
|
||||
});
|
||||
|
||||
setFiles(files);
|
||||
setFileIds(files.map((file: File) => file.fileId));
|
||||
|
||||
setChatHistory(history);
|
||||
setFocusMode(data.chat.focusMode);
|
||||
setIsMessagesLoaded(true);
|
||||
};
|
||||
|
||||
const ChatWindow = ({ id }: { id?: string }) => {
|
||||
const searchParams = useSearchParams();
|
||||
const initialMessage = searchParams.get('q');
|
||||
|
||||
const [chatId, setChatId] = useState<string | undefined>(id);
|
||||
const [newChatCreated, setNewChatCreated] = useState(false);
|
||||
|
||||
const [hasError, setHasError] = useState(false);
|
||||
const [isReady, setIsReady] = useState(false);
|
||||
|
||||
const [isWSReady, setIsWSReady] = useState(false);
|
||||
const ws = useSocket(
|
||||
process.env.NEXT_PUBLIC_WS_URL!,
|
||||
setIsWSReady,
|
||||
setHasError,
|
||||
);
|
||||
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [messageAppeared, setMessageAppeared] = useState(false);
|
||||
const [focusMode, setFocusMode] = useState('webSearch');
|
||||
|
||||
const sendMessage = async (message: string) => {
|
||||
const [chatHistory, setChatHistory] = useState<[string, string][]>([]);
|
||||
const [messages, setMessages] = useState<Message[]>([]);
|
||||
|
||||
const [files, setFiles] = useState<File[]>([]);
|
||||
const [fileIds, setFileIds] = useState<string[]>([]);
|
||||
|
||||
const [focusMode, setFocusMode] = useState('webSearch');
|
||||
const [optimizationMode, setOptimizationMode] = useState('speed');
|
||||
|
||||
const [isMessagesLoaded, setIsMessagesLoaded] = useState(false);
|
||||
|
||||
const [notFound, setNotFound] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (
|
||||
chatId &&
|
||||
!newChatCreated &&
|
||||
!isMessagesLoaded &&
|
||||
messages.length === 0
|
||||
) {
|
||||
loadMessages(
|
||||
chatId,
|
||||
setMessages,
|
||||
setIsMessagesLoaded,
|
||||
setChatHistory,
|
||||
setFocusMode,
|
||||
setNotFound,
|
||||
setFiles,
|
||||
setFileIds,
|
||||
);
|
||||
} else if (!chatId) {
|
||||
setNewChatCreated(true);
|
||||
setIsMessagesLoaded(true);
|
||||
setChatId(crypto.randomBytes(20).toString('hex'));
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
if (ws?.readyState === 1) {
|
||||
ws.close();
|
||||
console.log('[DEBUG] closed');
|
||||
}
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
const messagesRef = useRef<Message[]>([]);
|
||||
|
||||
useEffect(() => {
|
||||
messagesRef.current = messages;
|
||||
}, [messages]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isMessagesLoaded && isWSReady) {
|
||||
setIsReady(true);
|
||||
console.log('[DEBUG] ready');
|
||||
}
|
||||
}, [isMessagesLoaded, isWSReady]);
|
||||
|
||||
const sendMessage = async (message: string, messageId?: string) => {
|
||||
if (loading) return;
|
||||
|
||||
setLoading(true);
|
||||
setMessageAppeared(false);
|
||||
|
||||
@ -52,11 +368,19 @@ const ChatWindow = () => {
|
||||
let recievedMessage = '';
|
||||
let added = false;
|
||||
|
||||
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
|
||||
|
||||
ws?.send(
|
||||
JSON.stringify({
|
||||
type: 'message',
|
||||
message: {
|
||||
messageId: messageId,
|
||||
chatId: chatId!,
|
||||
content: message,
|
||||
},
|
||||
files: fileIds,
|
||||
focusMode: focusMode,
|
||||
optimizationMode: optimizationMode,
|
||||
history: [...chatHistory, ['human', message]],
|
||||
}),
|
||||
);
|
||||
@ -65,15 +389,22 @@ const ChatWindow = () => {
|
||||
...prevMessages,
|
||||
{
|
||||
content: message,
|
||||
id: Math.random().toString(36).substring(7),
|
||||
messageId: messageId,
|
||||
chatId: chatId!,
|
||||
role: 'user',
|
||||
createdAt: new Date(),
|
||||
},
|
||||
]);
|
||||
|
||||
const messageHandler = (e: MessageEvent) => {
|
||||
const messageHandler = async (e: MessageEvent) => {
|
||||
const data = JSON.parse(e.data);
|
||||
|
||||
if (data.type === 'error') {
|
||||
toast.error(data.data);
|
||||
setLoading(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (data.type === 'sources') {
|
||||
sources = data.data;
|
||||
if (!added) {
|
||||
@ -81,7 +412,8 @@ const ChatWindow = () => {
|
||||
...prevMessages,
|
||||
{
|
||||
content: '',
|
||||
id: data.messageId,
|
||||
messageId: data.messageId,
|
||||
chatId: chatId!,
|
||||
role: 'assistant',
|
||||
sources: sources,
|
||||
createdAt: new Date(),
|
||||
@ -98,7 +430,8 @@ const ChatWindow = () => {
|
||||
...prevMessages,
|
||||
{
|
||||
content: data.data,
|
||||
id: data.messageId,
|
||||
messageId: data.messageId,
|
||||
chatId: chatId!,
|
||||
role: 'assistant',
|
||||
sources: sources,
|
||||
createdAt: new Date(),
|
||||
@ -109,7 +442,7 @@ const ChatWindow = () => {
|
||||
|
||||
setMessages((prev) =>
|
||||
prev.map((message) => {
|
||||
if (message.id === data.messageId) {
|
||||
if (message.messageId === data.messageId) {
|
||||
return { ...message, content: message.content + data.data };
|
||||
}
|
||||
|
||||
@ -127,8 +460,28 @@ const ChatWindow = () => {
|
||||
['human', message],
|
||||
['assistant', recievedMessage],
|
||||
]);
|
||||
|
||||
ws?.removeEventListener('message', messageHandler);
|
||||
setLoading(false);
|
||||
|
||||
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
|
||||
|
||||
if (
|
||||
lastMsg.role === 'assistant' &&
|
||||
lastMsg.sources &&
|
||||
lastMsg.sources.length > 0 &&
|
||||
!lastMsg.suggestions
|
||||
) {
|
||||
const suggestions = await getSuggestions(messagesRef.current);
|
||||
setMessages((prev) =>
|
||||
prev.map((msg) => {
|
||||
if (msg.messageId === lastMsg.messageId) {
|
||||
return { ...msg, suggestions: suggestions };
|
||||
}
|
||||
return msg;
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -136,7 +489,7 @@ const ChatWindow = () => {
|
||||
};
|
||||
|
||||
const rewrite = (messageId: string) => {
|
||||
const index = messages.findIndex((msg) => msg.id === messageId);
|
||||
const index = messages.findIndex((msg) => msg.messageId === messageId);
|
||||
|
||||
if (index === -1) return;
|
||||
|
||||
@ -149,20 +502,44 @@ const ChatWindow = () => {
|
||||
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
|
||||
});
|
||||
|
||||
sendMessage(message.content);
|
||||
sendMessage(message.content, message.messageId);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (isReady && initialMessage && ws?.readyState === 1) {
|
||||
sendMessage(initialMessage);
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [ws?.readyState, isReady, initialMessage, isWSReady]);
|
||||
|
||||
if (hasError) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center min-h-screen">
|
||||
<p className="dark:text-white/70 text-black/70 text-sm">
|
||||
Failed to connect to the server. Please try again later.
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return isReady ? (
|
||||
notFound ? (
|
||||
<Error statusCode={404} />
|
||||
) : (
|
||||
<div>
|
||||
{messages.length > 0 ? (
|
||||
<>
|
||||
<Navbar messages={messages} />
|
||||
<Navbar chatId={chatId!} messages={messages} />
|
||||
<Chat
|
||||
loading={loading}
|
||||
messages={messages}
|
||||
sendMessage={sendMessage}
|
||||
messageAppeared={messageAppeared}
|
||||
rewrite={rewrite}
|
||||
fileIds={fileIds}
|
||||
setFileIds={setFileIds}
|
||||
files={files}
|
||||
setFiles={setFiles}
|
||||
/>
|
||||
</>
|
||||
) : (
|
||||
@ -170,9 +547,35 @@ const ChatWindow = () => {
|
||||
sendMessage={sendMessage}
|
||||
focusMode={focusMode}
|
||||
setFocusMode={setFocusMode}
|
||||
optimizationMode={optimizationMode}
|
||||
setOptimizationMode={setOptimizationMode}
|
||||
fileIds={fileIds}
|
||||
setFileIds={setFileIds}
|
||||
files={files}
|
||||
setFiles={setFiles}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
) : (
|
||||
<div className="flex flex-row items-center justify-center min-h-screen">
|
||||
<svg
|
||||
aria-hidden="true"
|
||||
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
|
||||
viewBox="0 0 100 101"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
|
||||
fill="currentColor"
|
||||
/>
|
||||
<path
|
||||
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
|
||||
fill="currentFill"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
|
128
ui/components/DeleteChat.tsx
Normal file
128
ui/components/DeleteChat.tsx
Normal file
@ -0,0 +1,128 @@
|
||||
import { Trash } from 'lucide-react';
|
||||
import {
|
||||
Description,
|
||||
Dialog,
|
||||
DialogBackdrop,
|
||||
DialogPanel,
|
||||
DialogTitle,
|
||||
Transition,
|
||||
TransitionChild,
|
||||
} from '@headlessui/react';
|
||||
import { Fragment, useState } from 'react';
|
||||
import { toast } from 'sonner';
|
||||
import { Chat } from '@/app/library/page';
|
||||
|
||||
const DeleteChat = ({
|
||||
chatId,
|
||||
chats,
|
||||
setChats,
|
||||
redirect = false,
|
||||
}: {
|
||||
chatId: string;
|
||||
chats: Chat[];
|
||||
setChats: (chats: Chat[]) => void;
|
||||
redirect?: boolean;
|
||||
}) => {
|
||||
const [confirmationDialogOpen, setConfirmationDialogOpen] = useState(false);
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const handleDelete = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
const res = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
|
||||
{
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
if (res.status != 200) {
|
||||
throw new Error('Failed to delete chat');
|
||||
}
|
||||
|
||||
const newChats = chats.filter((chat) => chat.id !== chatId);
|
||||
|
||||
setChats(newChats);
|
||||
|
||||
if (redirect) {
|
||||
window.location.href = '/';
|
||||
}
|
||||
} catch (err: any) {
|
||||
toast.error(err.message);
|
||||
} finally {
|
||||
setConfirmationDialogOpen(false);
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<button
|
||||
onClick={() => {
|
||||
setConfirmationDialogOpen(true);
|
||||
}}
|
||||
className="bg-transparent text-red-400 hover:scale-105 transition duration-200"
|
||||
>
|
||||
<Trash size={17} />
|
||||
</button>
|
||||
<Transition appear show={confirmationDialogOpen} as={Fragment}>
|
||||
<Dialog
|
||||
as="div"
|
||||
className="relative z-50"
|
||||
onClose={() => {
|
||||
if (!loading) {
|
||||
setConfirmationDialogOpen(false);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<DialogBackdrop className="fixed inset-0 bg-black/30" />
|
||||
<div className="fixed inset-0 overflow-y-auto">
|
||||
<div className="flex min-h-full items-center justify-center p-4 text-center">
|
||||
<TransitionChild
|
||||
as={Fragment}
|
||||
enter="ease-out duration-200"
|
||||
enterFrom="opacity-0 scale-95"
|
||||
enterTo="opacity-100 scale-100"
|
||||
leave="ease-in duration-100"
|
||||
leaveFrom="opacity-100 scale-200"
|
||||
leaveTo="opacity-0 scale-95"
|
||||
>
|
||||
<DialogPanel className="w-full max-w-md transform rounded-2xl bg-light-secondary dark:bg-dark-secondary border border-light-200 dark:border-dark-200 p-6 text-left align-middle shadow-xl transition-all">
|
||||
<DialogTitle className="text-lg font-medium leading-6 dark:text-white">
|
||||
Delete Confirmation
|
||||
</DialogTitle>
|
||||
<Description className="text-sm dark:text-white/70 text-black/70">
|
||||
Are you sure you want to delete this chat?
|
||||
</Description>
|
||||
<div className="flex flex-row items-end justify-end space-x-4 mt-6">
|
||||
<button
|
||||
onClick={() => {
|
||||
if (!loading) {
|
||||
setConfirmationDialogOpen(false);
|
||||
}
|
||||
}}
|
||||
className="text-black/50 dark:text-white/50 text-sm hover:text-black/70 hover:dark:text-white/70 transition duration-200"
|
||||
>
|
||||
Cancel
|
||||
</button>
|
||||
<button
|
||||
onClick={handleDelete}
|
||||
className="text-red-400 text-sm hover:text-red-500 transition duration200"
|
||||
>
|
||||
Delete
|
||||
</button>
|
||||
</div>
|
||||
</DialogPanel>
|
||||
</TransitionChild>
|
||||
</div>
|
||||
</div>
|
||||
</Dialog>
|
||||
</Transition>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default DeleteChat;
|
@ -1,25 +1,58 @@
|
||||
import { Settings } from 'lucide-react';
|
||||
import EmptyChatMessageInput from './EmptyChatMessageInput';
|
||||
import SettingsDialog from './SettingsDialog';
|
||||
import { useState } from 'react';
|
||||
import { File } from './ChatWindow';
|
||||
|
||||
const EmptyChat = ({
|
||||
sendMessage,
|
||||
focusMode,
|
||||
setFocusMode,
|
||||
optimizationMode,
|
||||
setOptimizationMode,
|
||||
fileIds,
|
||||
setFileIds,
|
||||
files,
|
||||
setFiles,
|
||||
}: {
|
||||
sendMessage: (message: string) => void;
|
||||
focusMode: string;
|
||||
setFocusMode: (mode: string) => void;
|
||||
optimizationMode: string;
|
||||
setOptimizationMode: (mode: string) => void;
|
||||
fileIds: string[];
|
||||
setFileIds: (fileIds: string[]) => void;
|
||||
files: File[];
|
||||
setFiles: (files: File[]) => void;
|
||||
}) => {
|
||||
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
|
||||
|
||||
return (
|
||||
<div className="relative">
|
||||
<SettingsDialog isOpen={isSettingsOpen} setIsOpen={setIsSettingsOpen} />
|
||||
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
|
||||
<Settings
|
||||
className="cursor-pointer lg:hidden"
|
||||
onClick={() => setIsSettingsOpen(true)}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-8">
|
||||
<h2 className="text-white/70 text-3xl font-medium -mt-8">
|
||||
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
|
||||
Research begins here.
|
||||
</h2>
|
||||
<EmptyChatMessageInput
|
||||
sendMessage={sendMessage}
|
||||
focusMode={focusMode}
|
||||
setFocusMode={setFocusMode}
|
||||
optimizationMode={optimizationMode}
|
||||
setOptimizationMode={setOptimizationMode}
|
||||
fileIds={fileIds}
|
||||
setFileIds={setFileIds}
|
||||
files={files}
|
||||
setFiles={setFiles}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
|
@ -1,20 +1,62 @@
|
||||
import { ArrowRight } from 'lucide-react';
|
||||
import { useState } from 'react';
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
import TextareaAutosize from 'react-textarea-autosize';
|
||||
import { Attach, CopilotToggle, Focus } from './MessageInputActions';
|
||||
import CopilotToggle from './MessageInputActions/Copilot';
|
||||
import Focus from './MessageInputActions/Focus';
|
||||
import Optimization from './MessageInputActions/Optimization';
|
||||
import Attach from './MessageInputActions/Attach';
|
||||
import { File } from './ChatWindow';
|
||||
|
||||
const EmptyChatMessageInput = ({
|
||||
sendMessage,
|
||||
focusMode,
|
||||
setFocusMode,
|
||||
optimizationMode,
|
||||
setOptimizationMode,
|
||||
fileIds,
|
||||
setFileIds,
|
||||
files,
|
||||
setFiles,
|
||||
}: {
|
||||
sendMessage: (message: string) => void;
|
||||
focusMode: string;
|
||||
setFocusMode: (mode: string) => void;
|
||||
optimizationMode: string;
|
||||
setOptimizationMode: (mode: string) => void;
|
||||
fileIds: string[];
|
||||
setFileIds: (fileIds: string[]) => void;
|
||||
files: File[];
|
||||
setFiles: (files: File[]) => void;
|
||||
}) => {
|
||||
const [copilotEnabled, setCopilotEnabled] = useState(false);
|
||||
const [message, setMessage] = useState('');
|
||||
|
||||
const inputRef = useRef<HTMLTextAreaElement | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
const activeElement = document.activeElement;
|
||||
|
||||
const isInputFocused =
|
||||
activeElement?.tagName === 'INPUT' ||
|
||||
activeElement?.tagName === 'TEXTAREA' ||
|
||||
activeElement?.hasAttribute('contenteditable');
|
||||
|
||||
if (e.key === '/' && !isInputFocused) {
|
||||
e.preventDefault();
|
||||
inputRef.current?.focus();
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener('keydown', handleKeyDown);
|
||||
|
||||
inputRef.current?.focus();
|
||||
|
||||
return () => {
|
||||
document.removeEventListener('keydown', handleKeyDown);
|
||||
};
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<form
|
||||
onSubmit={(e) => {
|
||||
@ -31,27 +73,34 @@ const EmptyChatMessageInput = ({
|
||||
}}
|
||||
className="w-full"
|
||||
>
|
||||
<div className="flex flex-col bg-[#111111] px-5 pt-5 pb-2 rounded-lg w-full border border-[#1C1C1C]">
|
||||
<div className="flex flex-col bg-light-secondary dark:bg-dark-secondary px-5 pt-5 pb-2 rounded-lg w-full border border-light-200 dark:border-dark-200">
|
||||
<TextareaAutosize
|
||||
ref={inputRef}
|
||||
value={message}
|
||||
onChange={(e) => setMessage(e.target.value)}
|
||||
minRows={2}
|
||||
className="bg-transparent placeholder:text-white/50 text-sm text-white resize-none focus:outline-none w-full max-h-24 lg:max-h-36 xl:max-h-48"
|
||||
className="bg-transparent placeholder:text-black/50 dark:placeholder:text-white/50 text-sm text-black dark:text-white resize-none focus:outline-none w-full max-h-24 lg:max-h-36 xl:max-h-48"
|
||||
placeholder="Ask anything..."
|
||||
/>
|
||||
<div className="flex flex-row items-center justify-between mt-4">
|
||||
<div className="flex flex-row items-center space-x-1 -mx-2">
|
||||
<div className="flex flex-row items-center space-x-2 lg:space-x-4">
|
||||
<Focus focusMode={focusMode} setFocusMode={setFocusMode} />
|
||||
{/* <Attach /> */}
|
||||
<Attach
|
||||
fileIds={fileIds}
|
||||
setFileIds={setFileIds}
|
||||
files={files}
|
||||
setFiles={setFiles}
|
||||
showText
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-row items-center space-x-4 -mx-2">
|
||||
<CopilotToggle
|
||||
copilotEnabled={copilotEnabled}
|
||||
setCopilotEnabled={setCopilotEnabled}
|
||||
<div className="flex flex-row items-center space-x-1 sm:space-x-4">
|
||||
<Optimization
|
||||
optimizationMode={optimizationMode}
|
||||
setOptimizationMode={setOptimizationMode}
|
||||
/>
|
||||
<button
|
||||
disabled={message.trim().length === 0}
|
||||
className="bg-[#24A0ED] text-white disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#ececec21] rounded-full p-2"
|
||||
className="bg-[#24A0ED] text-white disabled:text-black/50 dark:disabled:text-white/50 disabled:bg-[#e0e0dc] dark:disabled:bg-[#ececec21] hover:bg-opacity-85 transition duration-100 rounded-full p-2"
|
||||
>
|
||||
<ArrowRight className="bg-background" size={17} />
|
||||
</button>
|
||||
|
@ -1,6 +1,6 @@
|
||||
const Layout = ({ children }: { children: React.ReactNode }) => {
|
||||
return (
|
||||
<main className="lg:pl-20 bg-[#0A0A0A] min-h-screen">
|
||||
<main className="lg:pl-20 bg-light-primary dark:bg-dark-primary min-h-screen">
|
||||
<div className="max-w-screen-lg lg:mx-auto mx-4">{children}</div>
|
||||
</main>
|
||||
);
|
||||
|
@ -19,7 +19,7 @@ const Copy = ({
|
||||
setCopied(true);
|
||||
setTimeout(() => setCopied(false), 1000);
|
||||
}}
|
||||
className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white"
|
||||
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
|
||||
>
|
||||
{copied ? <Check size={18} /> : <ClipboardList size={18} />}
|
||||
</button>
|
||||
|
@ -10,9 +10,10 @@ const Rewrite = ({
|
||||
return (
|
||||
<button
|
||||
onClick={() => rewrite(messageId)}
|
||||
className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white"
|
||||
className="py-2 px-3 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white flex flex-row items-center space-x-1"
|
||||
>
|
||||
<ArrowLeftRight size={18} />
|
||||
<p className="text-xs font-medium">Rewrite</p>
|
||||
</button>
|
||||
);
|
||||
};
|
||||
|
@ -1,3 +1,5 @@
|
||||
'use client';
|
||||
|
||||
/* eslint-disable @next/next/no-img-element */
|
||||
import React, { MutableRefObject, useEffect, useState } from 'react';
|
||||
import { Message } from './ChatWindow';
|
||||
@ -5,17 +7,18 @@ import { cn } from '@/lib/utils';
|
||||
import {
|
||||
BookCopy,
|
||||
Disc3,
|
||||
FilePen,
|
||||
PlusIcon,
|
||||
Share,
|
||||
ThumbsDown,
|
||||
VideoIcon,
|
||||
Volume2,
|
||||
StopCircle,
|
||||
Layers3,
|
||||
Plus,
|
||||
} from 'lucide-react';
|
||||
import Markdown from 'markdown-to-jsx';
|
||||
import Copy from './MessageActions/Copy';
|
||||
import Rewrite from './MessageActions/Rewrite';
|
||||
import MessageSources from './MessageSources';
|
||||
import SearchImages from './SearchImages';
|
||||
import SearchVideos from './SearchVideos';
|
||||
import { useSpeech } from 'react-text-to-speech';
|
||||
|
||||
const MessageBox = ({
|
||||
message,
|
||||
@ -25,6 +28,7 @@ const MessageBox = ({
|
||||
dividerRef,
|
||||
isLast,
|
||||
rewrite,
|
||||
sendMessage,
|
||||
}: {
|
||||
message: Message;
|
||||
messageIndex: number;
|
||||
@ -33,33 +37,39 @@ const MessageBox = ({
|
||||
dividerRef?: MutableRefObject<HTMLDivElement | null>;
|
||||
isLast: boolean;
|
||||
rewrite: (messageId: string) => void;
|
||||
sendMessage: (message: string) => void;
|
||||
}) => {
|
||||
const [parsedMessage, setParsedMessage] = useState(message.content);
|
||||
const [speechMessage, setSpeechMessage] = useState(message.content);
|
||||
|
||||
useEffect(() => {
|
||||
const regex = /\[(\d+)\]/g;
|
||||
|
||||
if (
|
||||
message.role === 'assistant' &&
|
||||
message?.sources &&
|
||||
message.sources.length > 0
|
||||
) {
|
||||
const regex = /\[(\d+)\]/g;
|
||||
|
||||
return setParsedMessage(
|
||||
message.content.replace(
|
||||
regex,
|
||||
(_, number) =>
|
||||
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-[#1C1C1C] px-1 rounded ml-1 no-underline text-xs text-white/70 relative">${number}</a>`,
|
||||
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
setParsedMessage(message.content);
|
||||
}, [message.content, message.sources, message.role]);
|
||||
|
||||
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
|
||||
|
||||
return (
|
||||
<div>
|
||||
{message.role === 'user' && (
|
||||
<div className={cn('w-full', messageIndex === 0 ? 'pt-16' : 'pt-8')}>
|
||||
<h2 className="text-white font-medium text-3xl lg:w-9/12">
|
||||
<h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12">
|
||||
{message.content}
|
||||
</h2>
|
||||
</div>
|
||||
@ -74,8 +84,10 @@ const MessageBox = ({
|
||||
{message.sources && message.sources.length > 0 && (
|
||||
<div className="flex flex-col space-y-2">
|
||||
<div className="flex flex-row items-center space-x-2">
|
||||
<BookCopy className="text-white" size={20} />
|
||||
<h3 className="text-white font-medium text-xl">Sources</h3>
|
||||
<BookCopy className="text-black dark:text-white" size={20} />
|
||||
<h3 className="text-black dark:text-white font-medium text-xl">
|
||||
Sources
|
||||
</h3>
|
||||
</div>
|
||||
<MessageSources sources={message.sources} />
|
||||
</div>
|
||||
@ -84,46 +96,102 @@ const MessageBox = ({
|
||||
<div className="flex flex-row items-center space-x-2">
|
||||
<Disc3
|
||||
className={cn(
|
||||
'text-white',
|
||||
'text-black dark:text-white',
|
||||
isLast && loading ? 'animate-spin' : 'animate-none',
|
||||
)}
|
||||
size={20}
|
||||
/>
|
||||
<h3 className="text-white font-medium text-xl">Answer</h3>
|
||||
<h3 className="text-black dark:text-white font-medium text-xl">
|
||||
Answer
|
||||
</h3>
|
||||
</div>
|
||||
<Markdown className="prose max-w-none break-words prose-invert prose-p:leading-relaxed prose-pre:p-0 text-white text-sm md:text-base font-medium">
|
||||
<Markdown
|
||||
className={cn(
|
||||
'prose dark:prose-invert prose-p:leading-relaxed prose-pre:p-0',
|
||||
'max-w-none break-words text-black dark:text-white text-sm md:text-base font-medium',
|
||||
)}
|
||||
>
|
||||
{parsedMessage}
|
||||
</Markdown>
|
||||
{!loading && (
|
||||
<div className="flex flex-row items-center justify-between w-full text-white py-4 -mx-2">
|
||||
{loading && isLast ? null : (
|
||||
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
|
||||
<div className="flex flex-row items-center space-x-1">
|
||||
<button className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white">
|
||||
{/* <button className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black text-black dark:hover:text-white">
|
||||
<Share size={18} />
|
||||
</button>
|
||||
<Rewrite rewrite={rewrite} messageId={message.id} />
|
||||
</button> */}
|
||||
<Rewrite rewrite={rewrite} messageId={message.messageId} />
|
||||
</div>
|
||||
<div className="flex flex-row items-center space-x-1">
|
||||
<Copy initialMessage={message.content} message={message} />
|
||||
<button className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white">
|
||||
<FilePen size={18} />
|
||||
</button>
|
||||
<button className="p-2 text-white/70 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white">
|
||||
<ThumbsDown size={18} />
|
||||
<button
|
||||
onClick={() => {
|
||||
if (speechStatus === 'started') {
|
||||
stop();
|
||||
} else {
|
||||
start();
|
||||
}
|
||||
}}
|
||||
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
|
||||
>
|
||||
{speechStatus === 'started' ? (
|
||||
<StopCircle size={18} />
|
||||
) : (
|
||||
<Volume2 size={18} />
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{isLast &&
|
||||
message.suggestions &&
|
||||
message.suggestions.length > 0 &&
|
||||
message.role === 'assistant' &&
|
||||
!loading && (
|
||||
<>
|
||||
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
|
||||
<div className="flex flex-col space-y-3 text-black dark:text-white">
|
||||
<div className="flex flex-row items-center space-x-2 mt-4">
|
||||
<Layers3 />
|
||||
<h3 className="text-xl font-medium">Related</h3>
|
||||
</div>
|
||||
<div className="flex flex-col space-y-3">
|
||||
{message.suggestions.map((suggestion, i) => (
|
||||
<div
|
||||
className="flex flex-col space-y-3 text-sm"
|
||||
key={i}
|
||||
>
|
||||
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
|
||||
<div
|
||||
onClick={() => {
|
||||
sendMessage(suggestion);
|
||||
}}
|
||||
className="cursor-pointer flex flex-row justify-between font-medium space-x-2 items-center"
|
||||
>
|
||||
<p className="transition duration-200 hover:text-[#24A0ED]">
|
||||
{suggestion}
|
||||
</p>
|
||||
<Plus
|
||||
size={20}
|
||||
className="text-[#24A0ED] flex-shrink-0"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="lg:sticky lg:top-20 flex flex-col items-center space-y-3 w-full lg:w-3/12 z-30 h-full pb-4">
|
||||
<SearchImages query={history[messageIndex - 1].content} />
|
||||
<div className="border border-dashed border-[#1C1C1C] px-4 py-2 flex flex-row items-center justify-between rounded-lg text-white text-sm w-full">
|
||||
<div className="flex flex-row items-center space-x-2">
|
||||
<VideoIcon size={17} />
|
||||
<p>Search videos</p>
|
||||
</div>
|
||||
<PlusIcon className="text-[#24A0ED]" size={17} />
|
||||
</div>
|
||||
<SearchImages
|
||||
query={history[messageIndex - 1].content}
|
||||
chatHistory={history.slice(0, messageIndex - 1)}
|
||||
/>
|
||||
<SearchVideos
|
||||
chatHistory={history.slice(0, messageIndex - 1)}
|
||||
query={history[messageIndex - 1].content}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
@ -1,9 +1,9 @@
|
||||
const MessageBoxLoading = () => {
|
||||
return (
|
||||
<div className="flex flex-col space-y-2 w-full lg:w-9/12 bg-[#111111] animate-pulse rounded-lg p-3">
|
||||
<div className="h-2 rounded-full w-full bg-[#1c1c1c]" />
|
||||
<div className="h-2 rounded-full w-9/12 bg-[#1c1c1c]" />
|
||||
<div className="h-2 rounded-full w-10/12 bg-[#1c1c1c]" />
|
||||
<div className="flex flex-col space-y-2 w-full lg:w-9/12 bg-light-primary dark:bg-dark-primary animate-pulse rounded-lg py-3">
|
||||
<div className="h-2 rounded-full w-full bg-light-secondary dark:bg-dark-secondary" />
|
||||
<div className="h-2 rounded-full w-9/12 bg-light-secondary dark:bg-dark-secondary" />
|
||||
<div className="h-2 rounded-full w-10/12 bg-light-secondary dark:bg-dark-secondary" />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
@ -1,13 +1,26 @@
|
||||
import { cn } from '@/lib/utils';
|
||||
import { ArrowUp } from 'lucide-react';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
import TextareaAutosize from 'react-textarea-autosize';
|
||||
import { Attach, CopilotToggle } from './MessageInputActions';
|
||||
import Attach from './MessageInputActions/Attach';
|
||||
import CopilotToggle from './MessageInputActions/Copilot';
|
||||
import { File } from './ChatWindow';
|
||||
import AttachSmall from './MessageInputActions/AttachSmall';
|
||||
|
||||
const MessageInput = ({
|
||||
sendMessage,
|
||||
loading,
|
||||
fileIds,
|
||||
setFileIds,
|
||||
files,
|
||||
setFiles,
|
||||
}: {
|
||||
sendMessage: (message: string) => void;
|
||||
loading: boolean;
|
||||
fileIds: string[];
|
||||
setFileIds: (fileIds: string[]) => void;
|
||||
files: File[];
|
||||
setFiles: (files: File[]) => void;
|
||||
}) => {
|
||||
const [copilotEnabled, setCopilotEnabled] = useState(false);
|
||||
const [message, setMessage] = useState('');
|
||||
@ -22,33 +35,66 @@ const MessageInput = ({
|
||||
}
|
||||
}, [textareaRows, mode, message]);
|
||||
|
||||
const inputRef = useRef<HTMLTextAreaElement | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
const activeElement = document.activeElement;
|
||||
|
||||
const isInputFocused =
|
||||
activeElement?.tagName === 'INPUT' ||
|
||||
activeElement?.tagName === 'TEXTAREA' ||
|
||||
activeElement?.hasAttribute('contenteditable');
|
||||
|
||||
if (e.key === '/' && !isInputFocused) {
|
||||
e.preventDefault();
|
||||
inputRef.current?.focus();
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener('keydown', handleKeyDown);
|
||||
|
||||
return () => {
|
||||
document.removeEventListener('keydown', handleKeyDown);
|
||||
};
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<form
|
||||
onSubmit={(e) => {
|
||||
if (loading) return;
|
||||
e.preventDefault();
|
||||
sendMessage(message);
|
||||
setMessage('');
|
||||
}}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
if (e.key === 'Enter' && !e.shiftKey && !loading) {
|
||||
e.preventDefault();
|
||||
sendMessage(message);
|
||||
setMessage('');
|
||||
}
|
||||
}}
|
||||
className={cn(
|
||||
'bg-[#111111] p-4 flex items-center overflow-hidden border border-[#1C1C1C]',
|
||||
'bg-light-secondary dark:bg-dark-secondary p-4 flex items-center overflow-hidden border border-light-200 dark:border-dark-200',
|
||||
mode === 'multi' ? 'flex-col rounded-lg' : 'flex-row rounded-full',
|
||||
)}
|
||||
>
|
||||
{mode === 'single' && <Attach />}
|
||||
{mode === 'single' && (
|
||||
<AttachSmall
|
||||
fileIds={fileIds}
|
||||
setFileIds={setFileIds}
|
||||
files={files}
|
||||
setFiles={setFiles}
|
||||
/>
|
||||
)}
|
||||
<TextareaAutosize
|
||||
ref={inputRef}
|
||||
value={message}
|
||||
onChange={(e) => setMessage(e.target.value)}
|
||||
onHeightChange={(height, props) => {
|
||||
setTextareaRows(Math.ceil(height / props.rowHeight));
|
||||
}}
|
||||
className="transition bg-transparent placeholder:text-white/50 placeholder:text-sm text-sm text-white resize-none focus:outline-none w-full px-2 max-h-24 lg:max-h-36 xl:max-h-48 flex-grow flex-shrink"
|
||||
className="transition bg-transparent dark:placeholder:text-white/50 placeholder:text-sm text-sm dark:text-white resize-none focus:outline-none w-full px-2 max-h-24 lg:max-h-36 xl:max-h-48 flex-grow flex-shrink"
|
||||
placeholder="Ask a follow-up"
|
||||
/>
|
||||
{mode === 'single' && (
|
||||
@ -58,8 +104,8 @@ const MessageInput = ({
|
||||
setCopilotEnabled={setCopilotEnabled}
|
||||
/>
|
||||
<button
|
||||
disabled={message.trim().length === 0}
|
||||
className="bg-[#24A0ED] text-white disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#ececec21] rounded-full p-2"
|
||||
disabled={message.trim().length === 0 || loading}
|
||||
className="bg-[#24A0ED] text-white disabled:text-black/50 dark:disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#e0e0dc79] dark:disabled:bg-[#ececec21] rounded-full p-2"
|
||||
>
|
||||
<ArrowUp className="bg-background" size={17} />
|
||||
</button>
|
||||
@ -67,15 +113,20 @@ const MessageInput = ({
|
||||
)}
|
||||
{mode === 'multi' && (
|
||||
<div className="flex flex-row items-center justify-between w-full pt-2">
|
||||
<Attach />
|
||||
<AttachSmall
|
||||
fileIds={fileIds}
|
||||
setFileIds={setFileIds}
|
||||
files={files}
|
||||
setFiles={setFiles}
|
||||
/>
|
||||
<div className="flex flex-row items-center space-x-4">
|
||||
<CopilotToggle
|
||||
copilotEnabled={copilotEnabled}
|
||||
setCopilotEnabled={setCopilotEnabled}
|
||||
/>
|
||||
<button
|
||||
disabled={message.trim().length === 0}
|
||||
className="bg-[#24A0ED] text-white disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#ececec21] rounded-full p-2"
|
||||
disabled={message.trim().length === 0 || loading}
|
||||
className="bg-[#24A0ED] text-white text-black/50 dark:disabled:text-white/50 hover:bg-opacity-85 transition duration-100 disabled:bg-[#e0e0dc79] dark:disabled:bg-[#ececec21] rounded-full p-2"
|
||||
>
|
||||
<ArrowUp className="bg-background" size={17} />
|
||||
</button>
|
||||
|
185
ui/components/MessageInputActions/Attach.tsx
Normal file
185
ui/components/MessageInputActions/Attach.tsx
Normal file
@ -0,0 +1,185 @@
|
||||
import { cn } from '@/lib/utils';
|
||||
import {
|
||||
Popover,
|
||||
PopoverButton,
|
||||
PopoverPanel,
|
||||
Transition,
|
||||
} from '@headlessui/react';
|
||||
import { CopyPlus, File, LoaderCircle, Plus, Trash } from 'lucide-react';
|
||||
import { Fragment, useRef, useState } from 'react';
|
||||
import { File as FileType } from '../ChatWindow';
|
||||
|
||||
const Attach = ({
|
||||
fileIds,
|
||||
setFileIds,
|
||||
showText,
|
||||
files,
|
||||
setFiles,
|
||||
}: {
|
||||
fileIds: string[];
|
||||
setFileIds: (fileIds: string[]) => void;
|
||||
showText?: boolean;
|
||||
files: FileType[];
|
||||
setFiles: (files: FileType[]) => void;
|
||||
}) => {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const fileInputRef = useRef<any>();
|
||||
|
||||
const handleChange = async (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
setLoading(true);
|
||||
const data = new FormData();
|
||||
|
||||
for (let i = 0; i < e.target.files!.length; i++) {
|
||||
data.append('files', e.target.files![i]);
|
||||
}
|
||||
|
||||
const embeddingModelProvider = localStorage.getItem(
|
||||
'embeddingModelProvider',
|
||||
);
|
||||
const embeddingModel = localStorage.getItem('embeddingModel');
|
||||
|
||||
data.append('embedding_model_provider', embeddingModelProvider!);
|
||||
data.append('embedding_model', embeddingModel!);
|
||||
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
|
||||
method: 'POST',
|
||||
body: data,
|
||||
});
|
||||
|
||||
const resData = await res.json();
|
||||
|
||||
setFiles([...files, ...resData.files]);
|
||||
setFileIds([...fileIds, ...resData.files.map((file: any) => file.fileId)]);
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
return loading ? (
|
||||
<div className="flex flex-row items-center justify-between space-x-1">
|
||||
<LoaderCircle size={18} className="text-sky-400 animate-spin" />
|
||||
<p className="text-sky-400 inline whitespace-nowrap text-xs font-medium">
|
||||
Uploading..
|
||||
</p>
|
||||
</div>
|
||||
) : files.length > 0 ? (
|
||||
<Popover className="relative w-full max-w-[15rem] md:max-w-md lg:max-w-lg">
|
||||
<PopoverButton
|
||||
type="button"
|
||||
className={cn(
|
||||
'flex flex-row items-center justify-between space-x-1 p-2 text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary active:scale-95 transition duration-200 hover:text-black dark:hover:text-white',
|
||||
files.length > 0 ? '-ml-2 lg:-ml-3' : '',
|
||||
)}
|
||||
>
|
||||
{files.length > 1 && (
|
||||
<>
|
||||
<File size={19} className="text-sky-400" />
|
||||
<p className="text-sky-400 inline whitespace-nowrap text-xs font-medium">
|
||||
{files.length} files
|
||||
</p>
|
||||
</>
|
||||
)}
|
||||
|
||||
{files.length === 1 && (
|
||||
<>
|
||||
<File size={18} className="text-sky-400" />
|
||||
<p className="text-sky-400 text-xs font-medium">
|
||||
{files[0].fileName.length > 10
|
||||
? files[0].fileName.replace(/\.\w+$/, '').substring(0, 3) +
|
||||
'...' +
|
||||
files[0].fileExtension
|
||||
: files[0].fileName}
|
||||
</p>
|
||||
</>
|
||||
)}
|
||||
</PopoverButton>
|
||||
<Transition
|
||||
as={Fragment}
|
||||
enter="transition ease-out duration-150"
|
||||
enterFrom="opacity-0 translate-y-1"
|
||||
enterTo="opacity-100 translate-y-0"
|
||||
leave="transition ease-in duration-150"
|
||||
leaveFrom="opacity-100 translate-y-0"
|
||||
leaveTo="opacity-0 translate-y-1"
|
||||
>
|
||||
<PopoverPanel className="absolute z-10 w-64 md:w-[350px] right-0">
|
||||
<div className="bg-light-primary dark:bg-dark-primary border rounded-md border-light-200 dark:border-dark-200 w-full max-h-[200px] md:max-h-none overflow-y-auto flex flex-col">
|
||||
<div className="flex flex-row items-center justify-between px-3 py-2">
|
||||
<h4 className="text-black dark:text-white font-medium text-sm">
|
||||
Attached files
|
||||
</h4>
|
||||
<div className="flex flex-row items-center space-x-4">
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => fileInputRef.current.click()}
|
||||
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
|
||||
>
|
||||
<input
|
||||
type="file"
|
||||
onChange={handleChange}
|
||||
ref={fileInputRef}
|
||||
accept=".pdf,.docx,.txt"
|
||||
multiple
|
||||
hidden
|
||||
/>
|
||||
<Plus size={18} />
|
||||
<p className="text-xs">Add</p>
|
||||
</button>
|
||||
<button
|
||||
onClick={() => {
|
||||
setFiles([]);
|
||||
setFileIds([]);
|
||||
}}
|
||||
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
|
||||
>
|
||||
<Trash size={14} />
|
||||
<p className="text-xs">Clear</p>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="h-[0.5px] mx-2 bg-white/10" />
|
||||
<div className="flex flex-col items-center">
|
||||
{files.map((file, i) => (
|
||||
<div
|
||||
key={i}
|
||||
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
|
||||
>
|
||||
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
|
||||
<File size={16} className="text-white/70" />
|
||||
</div>
|
||||
<p className="text-white/70 text-sm">
|
||||
{file.fileName.length > 25
|
||||
? file.fileName.replace(/\.\w+$/, '').substring(0, 25) +
|
||||
'...' +
|
||||
file.fileExtension
|
||||
: file.fileName}
|
||||
</p>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</PopoverPanel>
|
||||
</Transition>
|
||||
</Popover>
|
||||
) : (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => fileInputRef.current.click()}
|
||||
className={cn(
|
||||
'flex flex-row items-center space-x-1 text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white',
|
||||
showText ? '' : 'p-2',
|
||||
)}
|
||||
>
|
||||
<input
|
||||
type="file"
|
||||
onChange={handleChange}
|
||||
ref={fileInputRef}
|
||||
accept=".pdf,.docx,.txt"
|
||||
multiple
|
||||
hidden
|
||||
/>
|
||||
<CopyPlus size={showText ? 18 : undefined} />
|
||||
{showText && <p className="text-xs font-medium pl-[1px]">Attach</p>}
|
||||
</button>
|
||||
);
|
||||
};
|
||||
|
||||
export default Attach;
|
153
ui/components/MessageInputActions/AttachSmall.tsx
Normal file
153
ui/components/MessageInputActions/AttachSmall.tsx
Normal file
@ -0,0 +1,153 @@
|
||||
import { cn } from '@/lib/utils';
|
||||
import {
|
||||
Popover,
|
||||
PopoverButton,
|
||||
PopoverPanel,
|
||||
Transition,
|
||||
} from '@headlessui/react';
|
||||
import { CopyPlus, File, LoaderCircle, Plus, Trash } from 'lucide-react';
|
||||
import { Fragment, useRef, useState } from 'react';
|
||||
import { File as FileType } from '../ChatWindow';
|
||||
|
||||
const AttachSmall = ({
|
||||
fileIds,
|
||||
setFileIds,
|
||||
files,
|
||||
setFiles,
|
||||
}: {
|
||||
fileIds: string[];
|
||||
setFileIds: (fileIds: string[]) => void;
|
||||
files: FileType[];
|
||||
setFiles: (files: FileType[]) => void;
|
||||
}) => {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const fileInputRef = useRef<any>();
|
||||
|
||||
const handleChange = async (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
setLoading(true);
|
||||
const data = new FormData();
|
||||
|
||||
for (let i = 0; i < e.target.files!.length; i++) {
|
||||
data.append('files', e.target.files![i]);
|
||||
}
|
||||
|
||||
const embeddingModelProvider = localStorage.getItem(
|
||||
'embeddingModelProvider',
|
||||
);
|
||||
const embeddingModel = localStorage.getItem('embeddingModel');
|
||||
|
||||
data.append('embedding_model_provider', embeddingModelProvider!);
|
||||
data.append('embedding_model', embeddingModel!);
|
||||
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
|
||||
method: 'POST',
|
||||
body: data,
|
||||
});
|
||||
|
||||
const resData = await res.json();
|
||||
|
||||
setFiles([...files, ...resData.files]);
|
||||
setFileIds([...fileIds, ...resData.files.map((file: any) => file.fileId)]);
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
return loading ? (
|
||||
<div className="flex flex-row items-center justify-between space-x-1 p-1">
|
||||
<LoaderCircle size={20} className="text-sky-400 animate-spin" />
|
||||
</div>
|
||||
) : files.length > 0 ? (
|
||||
<Popover className="max-w-[15rem] md:max-w-md lg:max-w-lg">
|
||||
<PopoverButton
|
||||
type="button"
|
||||
className="flex flex-row items-center justify-between space-x-1 p-1 text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary active:scale-95 transition duration-200 hover:text-black dark:hover:text-white"
|
||||
>
|
||||
<File size={20} className="text-sky-400" />
|
||||
</PopoverButton>
|
||||
<Transition
|
||||
as={Fragment}
|
||||
enter="transition ease-out duration-150"
|
||||
enterFrom="opacity-0 translate-y-1"
|
||||
enterTo="opacity-100 translate-y-0"
|
||||
leave="transition ease-in duration-150"
|
||||
leaveFrom="opacity-100 translate-y-0"
|
||||
leaveTo="opacity-0 translate-y-1"
|
||||
>
|
||||
<PopoverPanel className="absolute z-10 w-64 md:w-[350px] bottom-14 -ml-3">
|
||||
<div className="bg-light-primary dark:bg-dark-primary border rounded-md border-light-200 dark:border-dark-200 w-full max-h-[200px] md:max-h-none overflow-y-auto flex flex-col">
|
||||
<div className="flex flex-row items-center justify-between px-3 py-2">
|
||||
<h4 className="text-black dark:text-white font-medium text-sm">
|
||||
Attached files
|
||||
</h4>
|
||||
<div className="flex flex-row items-center space-x-4">
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => fileInputRef.current.click()}
|
||||
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
|
||||
>
|
||||
<input
|
||||
type="file"
|
||||
onChange={handleChange}
|
||||
ref={fileInputRef}
|
||||
accept=".pdf,.docx,.txt"
|
||||
multiple
|
||||
hidden
|
||||
/>
|
||||
<Plus size={18} />
|
||||
<p className="text-xs">Add</p>
|
||||
</button>
|
||||
<button
|
||||
onClick={() => {
|
||||
setFiles([]);
|
||||
setFileIds([]);
|
||||
}}
|
||||
className="flex flex-row items-center space-x-1 text-white/70 hover:text-white transition duration-200"
|
||||
>
|
||||
<Trash size={14} />
|
||||
<p className="text-xs">Clear</p>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="h-[0.5px] mx-2 bg-white/10" />
|
||||
<div className="flex flex-col items-center">
|
||||
{files.map((file, i) => (
|
||||
<div
|
||||
key={i}
|
||||
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
|
||||
>
|
||||
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
|
||||
<File size={16} className="text-white/70" />
|
||||
</div>
|
||||
<p className="text-white/70 text-sm">
|
||||
{file.fileName.length > 25
|
||||
? file.fileName.replace(/\.\w+$/, '').substring(0, 25) +
|
||||
'...' +
|
||||
file.fileExtension
|
||||
: file.fileName}
|
||||
</p>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</PopoverPanel>
|
||||
</Transition>
|
||||
</Popover>
|
||||
) : (
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => fileInputRef.current.click()}
|
||||
className="flex flex-row items-center space-x-1 text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white p-1"
|
||||
>
|
||||
<input
|
||||
type="file"
|
||||
onChange={handleChange}
|
||||
ref={fileInputRef}
|
||||
accept=".pdf,.docx,.txt"
|
||||
multiple
|
||||
hidden
|
||||
/>
|
||||
<CopyPlus size={20} />
|
||||
</button>
|
||||
);
|
||||
};
|
||||
|
||||
export default AttachSmall;
|
43
ui/components/MessageInputActions/Copilot.tsx
Normal file
43
ui/components/MessageInputActions/Copilot.tsx
Normal file
@ -0,0 +1,43 @@
|
||||
import { cn } from '@/lib/utils';
|
||||
import { Switch } from '@headlessui/react';
|
||||
|
||||
const CopilotToggle = ({
|
||||
copilotEnabled,
|
||||
setCopilotEnabled,
|
||||
}: {
|
||||
copilotEnabled: boolean;
|
||||
setCopilotEnabled: (enabled: boolean) => void;
|
||||
}) => {
|
||||
return (
|
||||
<div className="group flex flex-row items-center space-x-1 active:scale-95 duration-200 transition cursor-pointer">
|
||||
<Switch
|
||||
checked={copilotEnabled}
|
||||
onChange={setCopilotEnabled}
|
||||
className="bg-light-secondary dark:bg-dark-secondary border border-light-200/70 dark:border-dark-200 relative inline-flex h-5 w-10 sm:h-6 sm:w-11 items-center rounded-full"
|
||||
>
|
||||
<span className="sr-only">Copilot</span>
|
||||
<span
|
||||
className={cn(
|
||||
copilotEnabled
|
||||
? 'translate-x-6 bg-[#24A0ED]'
|
||||
: 'translate-x-1 bg-black/50 dark:bg-white/50',
|
||||
'inline-block h-3 w-3 sm:h-4 sm:w-4 transform rounded-full transition-all duration-200',
|
||||
)}
|
||||
/>
|
||||
</Switch>
|
||||
<p
|
||||
onClick={() => setCopilotEnabled(!copilotEnabled)}
|
||||
className={cn(
|
||||
'text-xs font-medium transition-colors duration-150 ease-in-out',
|
||||
copilotEnabled
|
||||
? 'text-[#24A0ED]'
|
||||
: 'text-black/50 dark:text-white/50 group-hover:text-black dark:group-hover:text-white',
|
||||
)}
|
||||
>
|
||||
Copilot
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default CopilotToggle;
|
@ -1,28 +1,21 @@
|
||||
import {
|
||||
BadgePercent,
|
||||
ChevronDown,
|
||||
CopyPlus,
|
||||
Globe,
|
||||
Pencil,
|
||||
ScanEye,
|
||||
SwatchBook,
|
||||
} from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { Popover, Switch, Transition } from '@headlessui/react';
|
||||
import {
|
||||
Popover,
|
||||
PopoverButton,
|
||||
PopoverPanel,
|
||||
Transition,
|
||||
} from '@headlessui/react';
|
||||
import { SiReddit, SiYoutube } from '@icons-pack/react-simple-icons';
|
||||
import { Fragment } from 'react';
|
||||
|
||||
export const Attach = () => {
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
className="p-2 text-white/50 rounded-xl hover:bg-[#1c1c1c] transition duration-200 hover:text-white"
|
||||
>
|
||||
<CopyPlus />
|
||||
</button>
|
||||
);
|
||||
};
|
||||
|
||||
const focusModes = [
|
||||
{
|
||||
key: 'webSearch',
|
||||
@ -74,7 +67,7 @@ const focusModes = [
|
||||
},
|
||||
];
|
||||
|
||||
export const Focus = ({
|
||||
const Focus = ({
|
||||
focusMode,
|
||||
setFocusMode,
|
||||
}: {
|
||||
@ -82,10 +75,10 @@ export const Focus = ({
|
||||
setFocusMode: (mode: string) => void;
|
||||
}) => {
|
||||
return (
|
||||
<Popover className="fixed w-full max-w-[15rem] md:max-w-md lg:max-w-lg">
|
||||
<Popover.Button
|
||||
<Popover className="relative w-full max-w-[15rem] md:max-w-md lg:max-w-lg mt-[6.5px]">
|
||||
<PopoverButton
|
||||
type="button"
|
||||
className="p-2 text-white/50 rounded-xl hover:bg-[#1c1c1c] active:scale-95 transition duration-200 hover:text-white"
|
||||
className=" text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary active:scale-95 transition duration-200 hover:text-black dark:hover:text-white"
|
||||
>
|
||||
{focusMode !== 'webSearch' ? (
|
||||
<div className="flex flex-row items-center space-x-1">
|
||||
@ -93,12 +86,15 @@ export const Focus = ({
|
||||
<p className="text-xs font-medium">
|
||||
{focusModes.find((mode) => mode.key === focusMode)?.title}
|
||||
</p>
|
||||
<ChevronDown size={20} />
|
||||
<ChevronDown size={20} className="-translate-x-1" />
|
||||
</div>
|
||||
) : (
|
||||
<ScanEye />
|
||||
<div className="flex flex-row items-center space-x-1">
|
||||
<ScanEye size={20} />
|
||||
<p className="text-xs font-medium">Focus</p>
|
||||
</div>
|
||||
)}
|
||||
</Popover.Button>
|
||||
</PopoverButton>
|
||||
<Transition
|
||||
as={Fragment}
|
||||
enter="transition ease-out duration-150"
|
||||
@ -108,73 +104,40 @@ export const Focus = ({
|
||||
leaveFrom="opacity-100 translate-y-0"
|
||||
leaveTo="opacity-0 translate-y-1"
|
||||
>
|
||||
<Popover.Panel className="absolute z-10 w-full">
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-1 bg-[#0A0A0A] border rounded-lg border-[#1c1c1c] w-full p-2">
|
||||
<PopoverPanel className="absolute z-10 w-64 md:w-[500px] left-0">
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-2 bg-light-primary dark:bg-dark-primary border rounded-lg border-light-200 dark:border-dark-200 w-full p-4 max-h-[200px] md:max-h-none overflow-y-auto">
|
||||
{focusModes.map((mode, i) => (
|
||||
<Popover.Button
|
||||
<PopoverButton
|
||||
onClick={() => setFocusMode(mode.key)}
|
||||
key={i}
|
||||
className={cn(
|
||||
'p-2 rounded-lg flex flex-col items-start justify-start text-start space-y-2 duration-200 cursor-pointer transition',
|
||||
focusMode === mode.key
|
||||
? 'bg-[#111111]'
|
||||
: 'hover:bg-[#111111]',
|
||||
? 'bg-light-secondary dark:bg-dark-secondary'
|
||||
: 'hover:bg-light-secondary dark:hover:bg-dark-secondary',
|
||||
)}
|
||||
>
|
||||
<div
|
||||
className={cn(
|
||||
'flex flex-row items-center space-x-1',
|
||||
focusMode === mode.key ? 'text-[#24A0ED]' : 'text-white',
|
||||
focusMode === mode.key
|
||||
? 'text-[#24A0ED]'
|
||||
: 'text-black dark:text-white',
|
||||
)}
|
||||
>
|
||||
{mode.icon}
|
||||
<p className="text-sm font-medium">{mode.title}</p>
|
||||
</div>
|
||||
<p className="text-white/70 text-xs">{mode.description}</p>
|
||||
</Popover.Button>
|
||||
<p className="text-black/70 dark:text-white/70 text-xs">
|
||||
{mode.description}
|
||||
</p>
|
||||
</PopoverButton>
|
||||
))}
|
||||
</div>
|
||||
</Popover.Panel>
|
||||
</PopoverPanel>
|
||||
</Transition>
|
||||
</Popover>
|
||||
);
|
||||
};
|
||||
|
||||
export const CopilotToggle = ({
|
||||
copilotEnabled,
|
||||
setCopilotEnabled,
|
||||
}: {
|
||||
copilotEnabled: boolean;
|
||||
setCopilotEnabled: (enabled: boolean) => void;
|
||||
}) => {
|
||||
return (
|
||||
<div className="group flex flex-row items-center space-x-1 active:scale-95 duration-200 transition cursor-pointer">
|
||||
<Switch
|
||||
checked={copilotEnabled}
|
||||
onChange={setCopilotEnabled}
|
||||
className="bg-[#111111] border border-[#1C1C1C] relative inline-flex h-5 w-10 sm:h-6 sm:w-11 items-center rounded-full"
|
||||
>
|
||||
<span className="sr-only">Copilot</span>
|
||||
<span
|
||||
className={cn(
|
||||
copilotEnabled
|
||||
? 'translate-x-6 bg-[#24A0ED]'
|
||||
: 'translate-x-1 bg-white/50',
|
||||
'inline-block h-3 w-3 sm:h-4 sm:w-4 transform rounded-full transition-all duration-200',
|
||||
)}
|
||||
/>
|
||||
</Switch>
|
||||
<p
|
||||
onClick={() => setCopilotEnabled(!copilotEnabled)}
|
||||
className={cn(
|
||||
'text-xs font-medium transition-colors duration-150 ease-in-out',
|
||||
copilotEnabled
|
||||
? 'text-[#24A0ED]'
|
||||
: 'text-white/50 group-hover:text-white',
|
||||
)}
|
||||
>
|
||||
Copilot
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
export default Focus;
|
104
ui/components/MessageInputActions/Optimization.tsx
Normal file
104
ui/components/MessageInputActions/Optimization.tsx
Normal file
@ -0,0 +1,104 @@
|
||||
import { ChevronDown, Sliders, Star, Zap } from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import {
|
||||
Popover,
|
||||
PopoverButton,
|
||||
PopoverPanel,
|
||||
Transition,
|
||||
} from '@headlessui/react';
|
||||
import { Fragment } from 'react';
|
||||
|
||||
const OptimizationModes = [
|
||||
{
|
||||
key: 'speed',
|
||||
title: 'Speed',
|
||||
description: 'Prioritize speed and get the quickest possible answer.',
|
||||
icon: <Zap size={20} className="text-[#FF9800]" />,
|
||||
},
|
||||
{
|
||||
key: 'balanced',
|
||||
title: 'Balanced',
|
||||
description: 'Find the right balance between speed and accuracy',
|
||||
icon: <Sliders size={20} className="text-[#4CAF50]" />,
|
||||
},
|
||||
{
|
||||
key: 'quality',
|
||||
title: 'Quality (Soon)',
|
||||
description: 'Get the most thorough and accurate answer',
|
||||
icon: (
|
||||
<Star
|
||||
size={16}
|
||||
className="text-[#2196F3] dark:text-[#BBDEFB] fill-[#BBDEFB] dark:fill-[#2196F3]"
|
||||
/>
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
const Optimization = ({
|
||||
optimizationMode,
|
||||
setOptimizationMode,
|
||||
}: {
|
||||
optimizationMode: string;
|
||||
setOptimizationMode: (mode: string) => void;
|
||||
}) => {
|
||||
return (
|
||||
<Popover className="relative w-full max-w-[15rem] md:max-w-md lg:max-w-lg">
|
||||
<PopoverButton
|
||||
type="button"
|
||||
className="p-2 text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary active:scale-95 transition duration-200 hover:text-black dark:hover:text-white"
|
||||
>
|
||||
<div className="flex flex-row items-center space-x-1">
|
||||
{
|
||||
OptimizationModes.find((mode) => mode.key === optimizationMode)
|
||||
?.icon
|
||||
}
|
||||
<p className="text-xs font-medium">
|
||||
{
|
||||
OptimizationModes.find((mode) => mode.key === optimizationMode)
|
||||
?.title
|
||||
}
|
||||
</p>
|
||||
<ChevronDown size={20} />
|
||||
</div>
|
||||
</PopoverButton>
|
||||
<Transition
|
||||
as={Fragment}
|
||||
enter="transition ease-out duration-150"
|
||||
enterFrom="opacity-0 translate-y-1"
|
||||
enterTo="opacity-100 translate-y-0"
|
||||
leave="transition ease-in duration-150"
|
||||
leaveFrom="opacity-100 translate-y-0"
|
||||
leaveTo="opacity-0 translate-y-1"
|
||||
>
|
||||
<PopoverPanel className="absolute z-10 w-64 md:w-[250px] right-0">
|
||||
<div className="flex flex-col gap-2 bg-light-primary dark:bg-dark-primary border rounded-lg border-light-200 dark:border-dark-200 w-full p-4 max-h-[200px] md:max-h-none overflow-y-auto">
|
||||
{OptimizationModes.map((mode, i) => (
|
||||
<PopoverButton
|
||||
onClick={() => setOptimizationMode(mode.key)}
|
||||
key={i}
|
||||
disabled={mode.key === 'quality'}
|
||||
className={cn(
|
||||
'p-2 rounded-lg flex flex-col items-start justify-start text-start space-y-1 duration-200 cursor-pointer transition',
|
||||
optimizationMode === mode.key
|
||||
? 'bg-light-secondary dark:bg-dark-secondary'
|
||||
: 'hover:bg-light-secondary dark:hover:bg-dark-secondary',
|
||||
mode.key === 'quality' && 'opacity-50 cursor-not-allowed',
|
||||
)}
|
||||
>
|
||||
<div className="flex flex-row items-center space-x-1 text-black dark:text-white">
|
||||
{mode.icon}
|
||||
<p className="text-sm font-medium">{mode.title}</p>
|
||||
</div>
|
||||
<p className="text-black/70 dark:text-white/70 text-xs">
|
||||
{mode.description}
|
||||
</p>
|
||||
</PopoverButton>
|
||||
))}
|
||||
</div>
|
||||
</PopoverPanel>
|
||||
</Transition>
|
||||
</Popover>
|
||||
);
|
||||
};
|
||||
|
||||
export default Optimization;
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user