Compare commits

..

31 Commits

Author SHA1 Message Date
59ab10110a Merge branch 'master' of https://github.com/HadiCherkaoui/Perplexica into pr/658 2025-03-02 15:35:14 +05:30
10f9cd2f79 feat(app): lint & beautify 2025-03-02 15:35:08 +05:30
82e1dd73b0 add config to ui 2025-03-02 11:00:56 +01:00
728b499281 Merge branch 'ItzCrazyKns:master' into master 2025-03-01 18:16:08 +01:00
5a4dafc753 Update yarn.lock 2025-03-01 18:12:40 +01:00
4ac99786f0 Update yarn.lock 2025-03-01 18:11:34 +01:00
1224281278 Update yarn.lock 2025-03-01 18:09:57 +01:00
3daae29a5d Update package.json 2025-03-01 18:05:28 +01:00
50bcaa13f2 Revert "Delete package.json"
This reverts commit 31e4abf068.
2025-03-01 17:58:54 +01:00
31e4abf068 Delete package.json 2025-03-01 17:57:40 +01:00
fd6e701cf0 Delete docs/installation/DEVELOPMENT.md 2025-03-01 17:49:59 +01:00
89880a2555 Delete .dev directory 2025-03-01 17:48:54 +01:00
07776d8699 feat(config): remove key cloak config functions 2025-03-01 22:13:38 +05:30
32fb6ac131 Update config.ts 2025-03-01 14:16:39 +01:00
99137d95e7 Update docker-compose.yaml 2025-03-01 14:15:44 +01:00
490a8db538 Update sample.config.toml 2025-03-01 14:15:21 +01:00
aba702c51b make it possible to configure multiple engines 2025-02-28 15:50:59 +01:00
89a6e7fbb1 Merge pull request #11 from HadiCherkaoui/add-bing
add bing
2025-02-28 15:17:40 +01:00
f19d2e3a97 add bing 2025-02-28 15:16:54 +01:00
4a7ca8fc68 Add keycloak config params to sample config 2025-02-28 15:07:50 +01:00
3d642f2539 fix sample messup 2025-02-28 14:58:16 +01:00
aa91d3bc60 Add config params for keycloak 2025-02-28 14:56:33 +01:00
93c5ed46f6 Merge pull request #9 from HadiCherkaoui/add-bing
Added brave, yacy and changed back the default search engine to searxng
2025-02-28 14:49:58 +01:00
af4b97b766 add yacy 2025-02-28 14:46:24 +01:00
ca86a7e358 add brave as a search engine 2025-02-28 13:11:00 +01:00
99351fc2a6 Added multiple search backends
Add support for multiple search engines/google as a search engine
2025-02-28 09:33:50 +01:00
7a816efc04 fix google pse
i tested everything, also tested if invalid api key and such and regression tested searxng which still works
2025-02-28 09:13:11 +01:00
4d41243108 added multi search engine support (didnt test) WIP 2025-02-27 21:32:26 +01:00
6c218b5fee move searxng.ts and update imports 2025-02-27 18:24:54 +01:00
1c1f31e23a Update .gitignore 2025-02-27 18:21:36 +01:00
5b15bcfe17 Add config for multiple search engines 2025-02-27 18:21:00 +01:00
134 changed files with 8227 additions and 6029 deletions

View File

@ -8,12 +8,18 @@ on:
types: [published]
jobs:
build-amd64:
build-and-push:
runs-on: ubuntu-latest
strategy:
matrix:
service: [backend, app]
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
@ -30,109 +36,38 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push AMD64 Docker image
- name: Build and push Docker image for ${{ matrix.service }}
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:amd64 \
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:amd64 \
-t itzcrazykns1337/${IMAGE_NAME}:main \
--push .
- name: Build and push AMD64 release Docker image
- name: Build and push release Docker image for ${{ matrix.service }}
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--push .
build-arm64:
runs-on: ubuntu-24.04-arm
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push ARM64 Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:arm64 \
--push .
- name: Build and push ARM64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--push .
manifest:
needs: [build-amd64, build-arm64]
runs-on: ubuntu-latest
steps:
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Create and push multi-arch manifest for main
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:main \
--amend itzcrazykns1337/${IMAGE_NAME}:amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:main
- name: Create and push multi-arch manifest for releases
if: github.event_name == 'release'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}

10
.gitignore vendored
View File

@ -2,11 +2,12 @@
node_modules/
npm-debug.log
yarn-error.log
package-lock.json
# Build output
.next/
out/
dist/
/.next/
/out/
/dist/
# IDE/Editor specific
.vscode/
@ -37,3 +38,6 @@ Thumbs.db
# Db
db.sqlite
/searxng
# Dev
docker-compose-dev.yaml

View File

@ -6,6 +6,7 @@ const config = {
endOfLine: 'auto',
singleQuote: true,
tabWidth: 2,
semi: true,
};
module.exports = config;

View File

@ -1,43 +1,32 @@
# How to Contribute to Perplexica
Thanks for your interest in contributing to Perplexica! Your help makes this project better. This guide explains how to contribute effectively.
Perplexica is a modern AI chat application with advanced search capabilities.
Hey there, thanks for deciding to contribute to Perplexica. Anything you help with will support the development of Perplexica and will make it better. Let's walk you through the key aspects to ensure your contributions are effective and in harmony with the project's setup.
## Project Structure
Perplexica's codebase is organized as follows:
Perplexica's design consists of two main domains:
- **UI Components and Pages**:
- **Components (`src/components`)**: Reusable UI components.
- **Pages and Routes (`src/app`)**: Next.js app directory structure with page components.
- Main app routes include: home (`/`), chat (`/c`), discover (`/discover`), library (`/library`), and settings (`/settings`).
- **API Routes (`src/app/api`)**: API endpoints implemented with Next.js API routes.
- `/api/chat`: Handles chat interactions.
- `/api/search`: Provides direct access to Perplexica's search capabilities.
- Other endpoints for models, files, and suggestions.
- **Backend Logic (`src/lib`)**: Contains all the backend functionality including search, database, and API logic.
- The search functionality is present inside `src/lib/search` directory.
- All of the focus modes are implemented using the Meta Search Agent class in `src/lib/search/metaSearchAgent.ts`.
- Database functionality is in `src/lib/db`.
- Chat model and embedding model providers are managed in `src/lib/providers`.
- Prompt templates and LLM chain definitions are in `src/lib/prompts` and `src/lib/chains` respectively.
## API Documentation
Perplexica exposes several API endpoints for programmatic access, including:
- **Search API**: Access Perplexica's advanced search capabilities directly via the `/api/search` endpoint. For detailed documentation, see `docs/api/search.md`.
- **Frontend (`ui` directory)**: This is a Next.js application holding all user interface components. It's a self-contained environment that manages everything the user interacts with.
- **Backend (root and `src` directory)**: The backend logic is situated in the `src` folder, but the root directory holds the main `package.json` for backend dependency management.
- All of the focus modes are created using the Meta Search Agent class present in `src/search/metaSearchAgent.ts`. The main logic behind Perplexica lies there.
## Setting Up Your Environment
Before diving into coding, setting up your local environment is key. Here's what you need to do:
### Backend
1. In the root directory, locate the `sample.config.toml` file.
2. Rename it to `config.toml` and fill in the necessary configuration fields.
3. Run `npm install` to install all dependencies.
4. Run `npm run db:push` to set up the local sqlite database.
5. Use `npm run dev` to start the application in development mode.
2. Rename it to `config.toml` and fill in the necessary configuration fields specific to the backend.
3. Run `npm install` to install dependencies.
4. Run `npm run db:push` to set up the local sqlite.
5. Use `npm run dev` to start the backend in development mode.
### Frontend
1. Navigate to the `ui` folder and repeat the process of renaming `.env.example` to `.env`, making sure to provide the frontend-specific variables.
2. Execute `npm install` within the `ui` directory to get the frontend dependencies ready.
3. Launch the frontend development server with `npm run dev`.
**Please note**: Docker configurations are present for setting up production environments, whereas `npm run dev` is used for development purposes.

View File

@ -1,21 +1,5 @@
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
<div align="center" markdown="1">
<sup>Special thanks to:</sup>
<br>
<br>
<a href="https://www.warp.dev/perplexica">
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/775dd593-9b5f-40f1-bf48-479faff4c27b">
</a>
### [Warp, the AI Devtool that lives in your terminal](https://www.warp.dev/perplexica)
[Available for MacOS, Linux, & Windows](https://www.warp.dev/perplexica)
</div>
<hr/>
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?)
@ -109,13 +93,14 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
1. Install SearXNG and allow `JSON` format in the SearXNG settings.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm rum start`
3. Rename the `.env.example` file to `.env` in the `ui` folder and fill in all necessary fields.
4. After populating the configuration and environment files, run `npm i` in both the `ui` folder and the root directory.
5. Install the dependencies and then execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like exposing it your network, etc.
### Ollama Connection Errors
@ -153,13 +138,12 @@ For more details, check out the full documentation [here](https://github.com/Itz
## Expose Perplexica to network
Perplexica runs on Next.js and handles all API requests. It works right away on the same network and stays accessible even with port forwarding.
You can access Perplexica over your home network by following our networking guide [here](https://github.com/ItzCrazyKns/Perplexica/blob/master/docs/installation/NETWORKING.md).
## One-Click Deployment
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
[![Run on ClawCloud](https://raw.githubusercontent.com/ClawCloud/Run-Template/refs/heads/main/Run-on-ClawCloud.svg)](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
## Upcoming Features

View File

@ -1,27 +1,15 @@
FROM node:20.18.0-slim AS builder
FROM node:20.18.0-alpine
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
WORKDIR /home/perplexica
COPY package.json yarn.lock ./
RUN yarn install --frozen-lockfile --network-timeout 600000
COPY ui /home/perplexica/
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
RUN mkdir -p /home/perplexica/data
RUN yarn install --frozen-lockfile
RUN yarn build
FROM node:20.18.0-slim
WORKDIR /home/perplexica
COPY --from=builder /home/perplexica/public ./public
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data
RUN mkdir /home/perplexica/uploads
CMD ["node", "server.js"]
CMD ["yarn", "start"]

17
backend.dockerfile Normal file
View File

@ -0,0 +1,17 @@
FROM node:18-slim
WORKDIR /home/perplexica
COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/
COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn build
CMD ["yarn", "start"]

View File

@ -4,35 +4,44 @@ services:
volumes:
- ./searxng:/etc/searxng:rw
ports:
- 4000:8080
- '4000:8080'
networks:
- perplexica-network
restart: unless-stopped
app:
image: itzcrazykns1337/perplexica:main
perplexica-backend:
build:
context: .
dockerfile: app.dockerfile
dockerfile: backend.dockerfile
image: itzcrazykns1337/perplexica-backend:main
environment:
- SEARXNG_API_URL=http://searxng:8080
depends_on:
- searxng
ports:
- 3000:3000
networks:
- perplexica-network
- '3001:3001'
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- perplexica-network
restart: unless-stopped
redict:
image: registry.redict.io/redict:latest
container_name: perplexica-redict
perplexica-frontend:
build:
context: .
dockerfile: app.dockerfile
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
ports:
- "6379:6379"
volumes:
- redict_data:/data
- '3000:3000'
networks:
- perplexica-network
restart: unless-stopped
@ -43,4 +52,3 @@ networks:
volumes:
backend-dbstore:
uploads:
redict_data:

View File

@ -6,9 +6,9 @@ Perplexicas Search API makes it easy to use our AI-powered search engine. You
## Endpoint
### **POST** `http://localhost:3000/api/search`
### **POST** `http://localhost:3001/api/search`
**Note**: Replace `3000` with any other port if you've changed the default PORT
**Note**: Replace `3001` with any other port if you've changed the default PORT
### Request
@ -20,11 +20,11 @@ The API accepts a JSON object in the request body, where you define the focus mo
{
"chatModel": {
"provider": "openai",
"name": "gpt-4o-mini"
"model": "gpt-4o-mini"
},
"embeddingModel": {
"provider": "openai",
"name": "text-embedding-3-large"
"model": "text-embedding-3-large"
},
"optimizationMode": "speed",
"focusMode": "webSearch",
@ -32,26 +32,24 @@ The API accepts a JSON object in the request body, where you define the focus mo
"history": [
["human", "Hi, how are you?"],
["assistant", "I am doing well, how can I help you today?"]
],
"systemInstructions": "Focus on providing technical details about Perplexica's architecture.",
"stream": false
]
}
```
### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `name`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- Optional fields for custom OpenAI configuration:
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- `provider`: The provider for the embedding model (e.g., `openai`).
- `name`: The specific embedding model (e.g., `text-embedding-3-large`).
- `model`: The specific embedding model (e.g., `text-embedding-3-large`).
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:
@ -64,8 +62,6 @@ The API accepts a JSON object in the request body, where you define the focus mo
- **`query`** (string, required): The search query or question.
- **`systemInstructions`** (string, optional): Custom instructions provided by the user to guide the AI's response. These instructions are treated as user preferences and have lower priority than the system's core instructions. For example, you can specify a particular writing style, format, or focus area.
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
```json
@ -75,13 +71,11 @@ The API accepts a JSON object in the request body, where you define the focus mo
]
```
- **`stream`** (boolean, optional): When set to `true`, enables streaming responses. Default is `false`.
### Response
The response from the API includes both the final message and the sources used to generate that message.
#### Standard Response (stream: false)
#### Example Response
```json
{
@ -106,28 +100,6 @@ The response from the API includes both the final message and the sources used t
}
```
#### Streaming Response (stream: true)
When streaming is enabled, the API returns a stream of newline-delimited JSON objects. Each line contains a complete, valid JSON object. The response has Content-Type: application/json.
Example of streamed response objects:
```
{"type":"init","data":"Stream connected"}
{"type":"sources","data":[{"pageContent":"...","metadata":{"title":"...","url":"..."}},...]}
{"type":"response","data":"Perplexica is an "}
{"type":"response","data":"innovative, open-source "}
{"type":"response","data":"AI-powered search engine..."}
{"type":"done"}
```
Clients should process each line as a separate JSON object. The different message types include:
- **`init`**: Initial connection message
- **`sources`**: All sources used for the response
- **`response`**: Chunks of the generated answer text
- **`done`**: Indicates the stream is complete
### Fields in the Response
- **`message`** (string): The search result, generated based on the query and focus mode.

View File

@ -4,7 +4,7 @@ Curious about how Perplexica works? Don't worry, we'll cover it here. Before we
We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows:
1. The message is sent to the `/api/chat` route where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
1. The message is sent via WS to the backend server where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources and searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started.
3. The query returned by the first chain is passed to SearXNG to search the web for information.
4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query.

View File

@ -0,0 +1,109 @@
# Expose Perplexica to a network
This guide will show you how to make Perplexica available over a network. Follow these steps to allow computers on the same network to interact with Perplexica. Choose the instructions that match the operating system you are using.
## Windows
1. Open PowerShell as Administrator
2. Navigate to the directory containing the `docker-compose.yaml` file
3. Stop and remove the existing Perplexica containers and images:
```bash
docker compose down --rmi all
```
4. Open the `docker-compose.yaml` file in a text editor like Notepad++
5. Replace `127.0.0.1` with the IP address of the server Perplexica is running on in these two lines:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and close the `docker-compose.yaml` file
7. Rebuild and restart the Perplexica container:
```bash
docker compose up -d --build
```
## macOS
1. Open the Terminal application
2. Navigate to the directory with the `docker-compose.yaml` file:
```bash
cd /path/to/docker-compose.yaml
```
3. Stop and remove existing containers and images:
```bash
docker compose down --rmi all
```
4. Open `docker-compose.yaml` in a text editor like Sublime Text:
```bash
nano docker-compose.yaml
```
5. Replace `127.0.0.1` with the server IP in these lines:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and exit the editor
7. Rebuild and restart Perplexica:
```bash
docker compose up -d --build
```
## Linux
1. Open the terminal
2. Navigate to the `docker-compose.yaml` directory:
```bash
cd /path/to/docker-compose.yaml
```
3. Stop and remove containers and images:
```bash
docker compose down --rmi all
```
4. Edit `docker-compose.yaml`:
```bash
nano docker-compose.yaml
```
5. Replace `127.0.0.1` with the server IP:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and exit the editor
7. Rebuild and restart Perplexica:
```bash
docker compose up -d --build
```

View File

@ -39,8 +39,11 @@ To update Perplexica to the latest version, follow these steps:
2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm rum start`
4. Execute `npm i` in both the `ui` folder and the root directory.
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
---

View File

@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
export default defineConfig({
dialect: 'sqlite',
schema: './src/lib/db/schema.ts',
schema: './src/db/schema.ts',
out: './drizzle',
dbCredentials: {
url: './data/db.sqlite',

5
next-env.d.ts vendored
View File

@ -1,5 +0,0 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@ -1,65 +1,53 @@
{
"name": "perplexica-frontend",
"version": "1.10.2",
"name": "perplexica-backend",
"version": "1.10.0-rc3",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
"dev": "next dev",
"build": "npm run db:push && next build",
"start": "next start",
"lint": "next lint",
"format:write": "prettier . --write",
"db:push": "drizzle-kit push"
},
"dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/google-genai": "^0.1.12",
"@langchain/openai": "^0.0.25",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
"axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
"compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0",
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"langchain": "^0.1.30",
"lucide-react": "^0.363.0",
"markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"pdf-parse": "^1.1.1",
"react": "^18",
"react-dom": "^18",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4"
"start": "npm run db:push && node dist/app.js",
"build": "tsc",
"dev": "nodemon --ignore uploads/ src/app.ts ",
"db:push": "drizzle-kit push sqlite",
"format": "prettier . --check",
"format:write": "prettier . --write"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/better-sqlite3": "^7.6.10",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/html-to-text": "^9.0.4",
"@types/node": "^20",
"@types/multer": "^1.4.12",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"drizzle-kit": "^0.30.5",
"eslint": "^8",
"eslint-config-next": "14.1.4",
"postcss": "^8",
"@types/readable-stream": "^4.0.11",
"@types/ws": "^8.5.12",
"drizzle-kit": "^0.22.7",
"nodemon": "^3.1.0",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"typescript": "^5"
"ts-node": "^10.9.2",
"typescript": "^5.4.3"
},
"dependencies": {
"@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3",
"@langchain/community": "^0.2.16",
"@langchain/google-genai": "^0.0.23",
"@langchain/openai": "^0.0.25",
"@xenova/transformers": "^2.17.1",
"axios": "^1.6.8",
"better-sqlite3": "^11.0.0",
"compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0",
"cors": "^2.8.5",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.31.2",
"express": "^4.19.2",
"html-to-text": "^9.0.5",
"langchain": "^0.1.30",
"mammoth": "^1.8.0",
"multer": "^1.4.5-lts.1",
"pdf-parse": "^1.1.1",
"winston": "^3.13.0",
"ws": "^8.17.1",
"zod": "^3.22.4"
}
}

View File

@ -1,7 +1,14 @@
[GENERAL]
PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
[SEARCH_ENGINE_BACKENDS] # "google" | "searxng" | "bing" | "brave" | "yacy"
SEARCH = "searxng"
IMAGE = "searxng"
VIDEO = "searxng"
NEWS = "searxng"
[MODELS.OPENAI]
API_KEY = ""
@ -17,16 +24,22 @@ API_KEY = ""
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
API_URL = ""
MODEL_NAME = ""
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK]
[SEARCH_ENGINES.GOOGLE]
API_KEY = ""
CSE_ID = ""
[SEARCH_ENGINES.SEARXNG]
ENDPOINT = ""
[SEARCH_ENGINES.BING]
SUBSCRIPTION_KEY = ""
[SEARCH_ENGINES.BRAVE]
API_KEY = ""
[MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
[API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768
[SEARCH_ENGINES.YACY]
ENDPOINT = ""

View File

@ -12,9 +12,6 @@ search:
server:
secret_key: 'a2fb23f1b02e6ee83875b09826990de0f6bd908b6638e8c10277d415f6ab852b' # Is overwritten by ${SEARXNG_SECRET}
redis:
url: redis://redict:6379/0
engines:
- name: wolframalpha
disabled: false

38
src/app.ts Normal file
View File

@ -0,0 +1,38 @@
import { startWebSocketServer } from './websocket';
import express from 'express';
import cors from 'cors';
import http from 'http';
import routes from './routes';
import { getPort } from './config';
import logger from './utils/logger';
const port = getPort();
const app = express();
const server = http.createServer(app);
const corsOptions = {
origin: '*',
};
app.use(cors(corsOptions));
app.use(express.json());
app.use('/api', routes);
app.get('/api', (_, res) => {
res.status(200).json({ status: 'ok' });
});
server.listen(port, () => {
logger.info(`Server is running on port ${port}`);
});
startWebSocketServer(server);
process.on('uncaughtException', (err, origin) => {
logger.error(`Uncaught Exception at ${origin}: ${err}`);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
});

View File

@ -1,306 +0,0 @@
import prompts from '@/lib/prompts';
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream';
import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import db from '@/lib/db';
import { chats, messages as messagesSchema } from '@/lib/db/schema';
import { and, eq, gt } from 'drizzle-orm';
import { getFileDetails } from '@/lib/utils/files';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
type Message = {
messageId: string;
chatId: string;
content: string;
};
type ChatModel = {
provider: string;
name: string;
};
type EmbeddingModel = {
provider: string;
name: string;
};
type Body = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
systemInstructions: string;
};
const handleEmitterEvents = async (
stream: EventEmitter,
writer: WritableStreamDefaultWriter,
encoder: TextEncoder,
aiMessageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources: any[] = [];
stream.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'sources',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
sources = parsedData.data;
}
});
stream.on('end', () => {
writer.write(
encoder.encode(
JSON.stringify({
type: 'messageEnd',
messageId: aiMessageId,
}) + '\n',
),
);
writer.close();
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
})
.execute();
});
stream.on('error', (data) => {
const parsedData = JSON.parse(data);
writer.write(
encoder.encode(
JSON.stringify({
type: 'error',
data: parsedData.data,
}),
),
);
writer.close();
});
};
const handleHistorySave = async (
message: Message,
humanMessageId: string,
focusMode: string,
files: string[],
) => {
const chat = await db.query.chats.findFirst({
where: eq(chats.id, message.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: message.chatId,
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: message.content,
chatId: message.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gt(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, message.chatId),
),
)
.execute();
}
};
export const POST = async (req: Request) => {
try {
const body = (await req.json()) as Body;
const { message } = body;
if (message.content === '') {
return Response.json(
{
message: 'Please provide a message to process',
},
{ status: 400 },
);
}
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.name || Object.keys(chatModelProvider)[0]
];
const embeddingProvider =
embeddingModelProviders[
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
];
const embeddingModel =
embeddingProvider[
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
];
let llm: BaseChatModel | undefined;
let embedding = embeddingModel.model;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
if (!embedding) {
return Response.json(
{ error: 'Invalid embedding model' },
{ status: 400 },
);
}
const humanMessageId =
message.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const handler = searchHandlers[body.focusMode];
if (!handler) {
return Response.json(
{
message: 'Invalid focus mode',
},
{ status: 400 },
);
}
const stream = await handler.searchAndAnswer(
message.content,
history,
llm,
embedding,
body.optimizationMode,
body.files,
body.systemInstructions,
);
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId);
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
return new Response(responseStream.readable, {
headers: {
'Content-Type': 'text/event-stream',
Connection: 'keep-alive',
'Cache-Control': 'no-cache, no-transform',
},
});
} catch (err) {
console.error('An error occurred while processing chat request:', err);
return Response.json(
{ message: 'An error occurred while processing chat request' },
{ status: 500 },
);
}
};

View File

@ -1,69 +0,0 @@
import db from '@/lib/db';
import { chats, messages } from '@/lib/db/schema';
import { eq } from 'drizzle-orm';
export const GET = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, id),
});
return Response.json(
{
chat: chatExists,
messages: chatMessages,
},
{ status: 200 },
);
} catch (err) {
console.error('Error in getting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};
export const DELETE = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
await db.delete(chats).where(eq(chats.id, id)).execute();
await db.delete(messages).where(eq(messages.chatId, id)).execute();
return Response.json(
{ message: 'Chat deleted successfully' },
{ status: 200 },
);
} catch (err) {
console.error('Error in deleting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -1,15 +0,0 @@
import db from '@/lib/db';
export const GET = async (req: Request) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return Response.json({ chats: chats }, { status: 200 });
} catch (err) {
console.error('Error in getting chats: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -1,119 +0,0 @@
import {
getAnthropicApiKey,
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
getGeminiApiKey,
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
getDeepseekApiKey,
getLMStudioApiEndpoint,
updateConfig,
} from '@/lib/config';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const config: Record<string, any> = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
config['chatModelProviders'] = {};
config['embeddingModelProviders'] = {};
for (const provider in chatModelProviders) {
config['chatModelProviders'][provider] = Object.keys(
chatModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: chatModelProviders[provider][model].displayName,
};
});
}
for (const provider in embeddingModelProviders) {
config['embeddingModelProviders'][provider] = Object.keys(
embeddingModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: embeddingModelProviders[provider][model].displayName,
};
});
}
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
return Response.json({ ...config }, { status: 200 });
} catch (err) {
console.error('An error occurred while getting config:', err);
return Response.json(
{ message: 'An error occurred while getting config' },
{ status: 500 },
);
}
};
export const POST = async (req: Request) => {
try {
const config = await req.json();
const updatedConfig = {
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
DEEPSEEK: {
API_KEY: config.deepseekApiKey,
},
LM_STUDIO: {
API_URL: config.lmStudioApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
};
updateConfig(updatedConfig);
return Response.json({ message: 'Config updated' }, { status: 200 });
} catch (err) {
console.error('An error occurred while updating config:', err);
return Response.json(
{ message: 'An error occurred while updating config' },
{ status: 500 },
);
}
};

View File

@ -1,61 +0,0 @@
import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
export const GET = async (req: Request) => {
try {
const data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
).results;
}),
])
)
.map((result) => result)
.flat()
.sort(() => Math.random() - 0.5);
return Response.json(
{
blogs: data,
},
{
status: 200,
},
);
} catch (err) {
console.error(`An error occurred in discover route: ${err}`);
return Response.json(
{
message: 'An error has occurred',
},
{
status: 500,
},
);
}
};

View File

@ -1,83 +0,0 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const images = await handleImageSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ images }, { status: 200 });
} catch (err) {
console.error(`An error occurred while searching images: ${err}`);
return Response.json(
{ message: 'An error occurred while searching images' },
{ status: 500 },
);
}
};

View File

@ -1,47 +0,0 @@
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete (chatModelProviders[provider][model] as { model?: unknown })
.model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete (embeddingModelProviders[provider][model] as { model?: unknown })
.model;
});
});
return Response.json(
{
chatModelProviders,
embeddingModelProviders,
},
{
status: 200,
},
);
} catch (err) {
console.error('An error occurred while fetching models', err);
return Response.json(
{
message: 'An error has occurred.',
},
{
status: 500,
},
);
}
};

View File

@ -1,270 +0,0 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import { ChatOpenAI } from '@langchain/openai';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '@/lib/search/metaSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
interface chatModel {
provider: string;
name: string;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
}
interface embeddingModel {
provider: string;
name: string;
}
interface ChatRequestBody {
optimizationMode: 'speed' | 'balanced';
focusMode: string;
chatModel?: chatModel;
embeddingModel?: embeddingModel;
query: string;
history: Array<[string, string]>;
stream?: boolean;
systemInstructions?: string;
}
export const POST = async (req: Request) => {
try {
const body: ChatRequestBody = await req.json();
if (!body.focusMode || !body.query) {
return Response.json(
{ message: 'Missing focus mode or query' },
{ status: 400 },
);
}
body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
body.stream = body.stream || false;
const history: BaseMessage[] = body.history.map((msg) => {
return msg[0] === 'human'
? new HumanMessage({ content: msg[1] })
: new AIMessage({ content: msg[1] });
});
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.name ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
const embeddingModel =
body.embeddingModel?.name ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
return Response.json(
{ message: 'Invalid model selected' },
{ status: 400 },
);
}
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {
return Response.json({ message: 'Invalid focus mode' }, { status: 400 });
}
const emitter = await searchHandler.searchAndAnswer(
body.query,
history,
llm,
embeddings,
body.optimizationMode,
[],
body.systemInstructions || '',
);
if (!body.stream) {
return new Promise(
(
resolve: (value: Response) => void,
reject: (value: Response) => void,
) => {
let message = '';
let sources: any[] = [];
emitter.on('data', (data: string) => {
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
message += parsedData.data;
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
}
} catch (error) {
reject(
Response.json(
{ message: 'Error parsing data' },
{ status: 500 },
),
);
}
});
emitter.on('end', () => {
resolve(Response.json({ message, sources }, { status: 200 }));
});
emitter.on('error', (error: any) => {
reject(
Response.json(
{ message: 'Search error', error },
{ status: 500 },
),
);
});
},
);
}
const encoder = new TextEncoder();
const abortController = new AbortController();
const { signal } = abortController;
const stream = new ReadableStream({
start(controller) {
let sources: any[] = [];
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'init',
data: 'Stream connected',
}) + '\n',
),
);
signal.addEventListener('abort', () => {
emitter.removeAllListeners();
try {
controller.close();
} catch (error) {}
});
emitter.on('data', (data: string) => {
if (signal.aborted) return;
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'response',
data: parsedData.data,
}) + '\n',
),
);
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'sources',
data: sources,
}) + '\n',
),
);
}
} catch (error) {
controller.error(error);
}
});
emitter.on('end', () => {
if (signal.aborted) return;
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'done',
}) + '\n',
),
);
controller.close();
});
emitter.on('error', (error: any) => {
if (signal.aborted) return;
controller.error(error);
});
},
cancel() {
abortController.abort();
},
});
return new Response(stream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache, no-transform',
Connection: 'keep-alive',
},
});
} catch (err: any) {
console.error(`Error in getting search results: ${err.message}`);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -1,81 +0,0 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsGenerationBody {
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
},
llm,
);
return Response.json({ suggestions }, { status: 200 });
} catch (err) {
console.error(`An error occurred while generating suggestions: ${err}`);
return Response.json(
{ message: 'An error occurred while generating suggestions' },
{ status: 500 },
);
}
};

View File

@ -1,134 +0,0 @@
import { NextResponse } from 'next/server';
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import { getAvailableEmbeddingModelProviders } from '@/lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
interface FileRes {
fileName: string;
fileExtension: string;
fileId: string;
}
const uploadDir = path.join(process.cwd(), 'uploads');
if (!fs.existsSync(uploadDir)) {
fs.mkdirSync(uploadDir, { recursive: true });
}
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
export async function POST(req: Request) {
try {
const formData = await req.formData();
const files = formData.getAll('files') as File[];
const embedding_model = formData.get('embedding_model');
const embedding_model_provider = formData.get('embedding_model_provider');
if (!embedding_model || !embedding_model_provider) {
return NextResponse.json(
{ message: 'Missing embedding model or provider' },
{ status: 400 },
);
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel =
embedding_model ?? Object.keys(embeddingModels[provider as string])[0];
let embeddingsModel =
embeddingModels[provider as string]?.[embeddingModel as string]?.model;
if (!embeddingsModel) {
return NextResponse.json(
{ message: 'Invalid embedding model selected' },
{ status: 400 },
);
}
const processedFiles: FileRes[] = [];
await Promise.all(
files.map(async (file: any) => {
const fileExtension = file.name.split('.').pop();
if (!['pdf', 'docx', 'txt'].includes(fileExtension!)) {
return NextResponse.json(
{ message: 'File type not supported' },
{ status: 400 },
);
}
const uniqueFileName = `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`;
const filePath = path.join(uploadDir, uniqueFileName);
const buffer = Buffer.from(await file.arrayBuffer());
fs.writeFileSync(filePath, new Uint8Array(buffer));
let docs: any[] = [];
if (fileExtension === 'pdf') {
const loader = new PDFLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'docx') {
const loader = new DocxLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'txt') {
const text = fs.readFileSync(filePath, 'utf-8');
docs = [
new Document({ pageContent: text, metadata: { title: file.name } }),
];
}
const splitted = await splitter.splitDocuments(docs);
const extractedDataPath = filePath.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(
extractedDataPath,
JSON.stringify({
title: file.name,
contents: splitted.map((doc) => doc.pageContent),
}),
);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsDataPath = filePath.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(
embeddingsDataPath,
JSON.stringify({
title: file.name,
embeddings,
}),
);
processedFiles.push({
fileName: file.name,
fileExtension: fileExtension,
fileId: uniqueFileName.replace(/\.\w+$/, ''),
});
}),
);
return NextResponse.json({
files: processedFiles,
});
} catch (error) {
console.error('Error uploading file:', error);
return NextResponse.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
}

View File

@ -1,83 +0,0 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ videos }, { status: 200 });
} catch (err) {
console.error(`An error occurred while searching videos: ${err}`);
return Response.json(
{ message: 'An error occurred while searching videos' },
{ status: 500 },
);
}
};

View File

@ -1,9 +0,0 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
};
export default Page;

View File

@ -0,0 +1,171 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchYaCy } from '../lib/searchEngines/yacy';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { getImageSearchEngineBackend } from '../config';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type ImageSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
async function performImageSearch(query: string) {
const searchEngine = getImageSearchEngineBackend();
let images = [];
switch (searchEngine) {
case 'google': {
const googleResult = await searchGooglePSE(query);
images = googleResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.displayLink,
};
}
})
.filter(Boolean);
break;
}
case 'searxng': {
const searxResult = await searchSearxng(query, {
engines: ['google images', 'bing images'],
pageno: 1,
});
searxResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
break;
}
case 'brave': {
const braveResult = await searchBraveAPI(query);
images = braveResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
case 'yacy': {
const yacyResult = await searchYaCy(query);
images = yacyResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
case 'bing': {
const bingResult = await searchBingAPI(query);
images = bingResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
return images;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: ImageSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: ImageSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const images = await performImageSearch(input);
return images.slice(0, 10);
}),
]);
};
const handleImageSearch = (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const imageSearchChain = createImageSearchChain(llm);
return imageSearchChain.invoke(input);
};
export default handleImageSearch;

View File

@ -1,5 +1,5 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';

View File

@ -0,0 +1,169 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { getVideoSearchEngineBackend } from '../config';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
const strParser = new StringOutputParser();
async function performVideoSearch(query: string) {
const searchEngine = getVideoSearchEngineBackend();
const youtubeQuery = `${query} site:youtube.com`;
let videos = [];
switch (searchEngine) {
case 'google': {
const googleResult = await searchGooglePSE(youtubeQuery);
googleResult.results.forEach((result) => {
// Use .results instead of .originalres
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
case 'searxng': {
const searxResult = await searchSearxng(query, {
engines: ['youtube'],
});
searxResult.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
break;
}
case 'brave': {
const braveResult = await searchBraveAPI(youtubeQuery);
braveResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
case 'yacy': {
console.log('Not available for yacy');
videos = [];
break;
}
case 'bing': {
const bingResult = await searchBingAPI(youtubeQuery);
bingResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
return videos;
}
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: VideoSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const videos = await performVideoSearch(input);
return videos.slice(0, 10);
}),
]);
};
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@ -1,43 +0,0 @@
'use client';
import { useState } from 'react';
import { cn } from '@/lib/utils';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps {
content: string;
}
const ThinkBox = ({ content }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false);
return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">
<button
onClick={() => setIsExpanded(!isExpanded)}
className="w-full flex items-center justify-between px-4 py-1 text-black/90 dark:text-white/90 hover:bg-light-200 dark:hover:bg-dark-200 transition duration-200"
>
<div className="flex items-center space-x-2">
<BrainCircuit
size={20}
className="text-[#9C27B0] dark:text-[#CE93D8]"
/>
<p className="font-medium text-sm">Thinking Process</p>
</div>
{isExpanded ? (
<ChevronUp size={18} className="text-black/70 dark:text-white/70" />
) : (
<ChevronDown size={18} className="text-black/70 dark:text-white/70" />
)}
</button>
{isExpanded && (
<div className="px-4 py-3 text-black/80 dark:text-white/80 text-sm border-t border-light-200 dark:border-dark-200 bg-light-100/50 dark:bg-dark-100/50 whitespace-pre-wrap">
{content}
</div>
)}
</div>
);
};
export default ThinkBox;

View File

@ -1,21 +1,21 @@
import fs from 'fs';
import path from 'path';
import toml from '@iarna/toml';
// Use dynamic imports for Node.js modules to prevent client-side errors
let fs: any;
let path: any;
if (typeof window === 'undefined') {
// We're on the server
fs = require('fs');
path = require('path');
}
const configFileName = 'config.toml';
interface Config {
GENERAL: {
PORT: number;
SIMILARITY_MEASURE: string;
KEEP_ALIVE: string;
};
SEARCH_ENGINE_BACKENDS: {
SEARCH: string;
IMAGE: string;
VIDEO: string;
NEWS: string;
};
MODELS: {
OPENAI: {
API_KEY: string;
@ -32,20 +32,29 @@ interface Config {
OLLAMA: {
API_URL: string;
};
DEEPSEEK: {
API_KEY: string;
};
LM_STUDIO: {
API_URL: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
MODEL_NAME: string;
};
};
API_ENDPOINTS: {
SEARXNG: string;
SEARCH_ENGINES: {
GOOGLE: {
API_KEY: string;
CSE_ID: string;
};
SEARXNG: {
ENDPOINT: string;
};
BING: {
SUBSCRIPTION_KEY: string;
};
BRAVE: {
API_KEY: string;
};
YACY: {
ENDPOINT: string;
};
};
}
@ -53,17 +62,12 @@ type RecursivePartial<T> = {
[P in keyof T]?: RecursivePartial<T[P]>;
};
const loadConfig = () => {
// Server-side only
if (typeof window === 'undefined') {
return toml.parse(
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
}
const loadConfig = () =>
toml.parse(
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
) as any as Config;
// Client-side fallback - settings will be loaded via API
return {} as Config;
};
export const getPort = () => loadConfig().GENERAL.PORT;
export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE;
@ -78,13 +82,35 @@ export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.SEARCH;
export const getImageSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.IMAGE || getSearchEngineBackend();
export const getVideoSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.VIDEO || getSearchEngineBackend();
export const getNewsSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.NEWS || getSearchEngineBackend();
export const getGoogleApiKey = () => loadConfig().SEARCH_ENGINES.GOOGLE.API_KEY;
export const getGoogleCseId = () => loadConfig().SEARCH_ENGINES.GOOGLE.CSE_ID;
export const getBraveApiKey = () => loadConfig().SEARCH_ENGINES.BRAVE.API_KEY;
export const getBingSubscriptionKey = () =>
loadConfig().SEARCH_ENGINES.BING.SUBSCRIPTION_KEY;
export const getYacyJsonEndpoint = () =>
loadConfig().SEARCH_ENGINES.YACY.ENDPOINT;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
process.env.SEARXNG_API_URL || loadConfig().SEARCH_ENGINES.SEARXNG.ENDPOINT;
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@ -94,9 +120,6 @@ export const getCustomOpenaiApiUrl = () =>
export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
export const getLMStudioApiEndpoint = () =>
loadConfig().MODELS.LM_STUDIO.API_URL;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
@ -129,13 +152,11 @@ const mergeConfigs = (current: any, update: any): any => {
};
export const updateConfig = (config: RecursivePartial<Config>) => {
// Server-side only
if (typeof window === 'undefined') {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig),
);
}
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(__dirname, `../${configFileName}`),
toml.stringify(mergedConfig),
);
};

View File

@ -1,9 +1,8 @@
import { drizzle } from 'drizzle-orm/better-sqlite3';
import Database from 'better-sqlite3';
import * as schema from './schema';
import path from 'path';
const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite'));
const sqlite = new Database('data/db.sqlite');
const db = drizzle(sqlite, {
schema: schema,
});

View File

@ -1,92 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type ImageSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: ImageSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: ImageSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images: ImageSearchResult[] = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
return images.slice(0, 10);
}),
]);
};
const handleImageSearch = (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const imageSearchChain = createImageSearchChain(llm);
return imageSearchChain.invoke(input);
};
export default handleImageSearch;

View File

@ -1,99 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: VideoSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos: VideoSearchResult[] = [];
res.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
}),
]);
};
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@ -28,7 +28,7 @@ export class HuggingFaceTransformersEmbeddings
timeout?: number;
private pipelinePromise: Promise<any> | undefined;
private pipelinePromise: Promise<any>;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});

View File

@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
constructor(args?: LineOutputParserArgs) {
super();
this.key = args?.key ?? this.key;
this.key = args.key ?? this.key;
}
static lc_name() {

View File

@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
constructor(args?: LineListOutputParserArgs) {
super();
this.key = args?.key ?? this.key;
this.key = args.key ?? this.key;
}
static lc_name() {

View File

@ -1,43 +1,6 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config';
export const PROVIDER_INFO = {
key: 'anthropic',
displayName: 'Anthropic',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
import { getAnthropicApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey();
@ -45,22 +8,52 @@ export const loadAnthropicChatModels = async () => {
if (!anthropicApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
anthropicChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
const chatModels = {
'claude-3-5-sonnet-20241022': {
displayName: 'Claude 3.5 Sonnet',
model: new ChatAnthropic({
apiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
};
});
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-sonnet-20241022',
}),
},
'claude-3-5-haiku-20241022': {
displayName: 'Claude 3.5 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-haiku-20241022',
}),
},
'claude-3-opus-20240229': {
displayName: 'Claude 3 Opus',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-opus-20240229',
}),
},
'claude-3-sonnet-20240229': {
displayName: 'Claude 3 Sonnet',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-sonnet-20240229',
}),
},
'claude-3-haiku-20240307': {
displayName: 'Claude 3 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-haiku-20240307',
}),
},
};
return chatModels;
} catch (err) {
console.error(`Error loading Anthropic models: ${err}`);
logger.error(`Error loading Anthropic models: ${err}`);
return {};
}
};

View File

@ -1,49 +0,0 @@
import { ChatOpenAI } from '@langchain/openai';
import { getDeepseekApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
export const PROVIDER_INFO = {
key: 'deepseek',
displayName: 'Deepseek AI',
};
const deepseekChatModels: Record<string, string>[] = [
{
displayName: 'Deepseek Chat (Deepseek V3)',
key: 'deepseek-chat',
},
{
displayName: 'Deepseek Reasoner (Deepseek R1)',
key: 'deepseek-reasoner',
},
];
export const loadDeepseekChatModels = async () => {
const deepseekApiKey = getDeepseekApiKey();
if (!deepseekApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
deepseekChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.deepseek.com',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Deepseek models: ${err}`);
return {};
}
};

View File

@ -2,57 +2,8 @@ import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'gemini',
displayName: 'Google Gemini',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-exp-03-25',
},
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Flash Thinking Experimental',
key: 'gemini-2.0-flash-thinking-exp-01-21',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 004',
key: 'models/text-embedding-004',
},
{
displayName: 'Embedding 001',
key: 'models/embedding-001',
},
];
import { getGeminiApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey();
@ -60,47 +11,75 @@ export const loadGeminiChatModels = async () => {
if (!geminiApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
geminiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
const chatModels = {
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash',
model: new ChatGoogleGenerativeAI({
apiKey: geminiApiKey,
modelName: model.key,
modelName: 'gemini-1.5-flash',
temperature: 0.7,
}) as unknown as BaseChatModel,
};
});
apiKey: geminiApiKey,
}),
},
'gemini-1.5-flash-8b': {
displayName: 'Gemini 1.5 Flash 8B',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash-8b',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-exp': {
displayName: 'Gemini 2.0 Flash Exp',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-exp',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-thinking-exp-01-21': {
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
};
return chatModels;
} catch (err) {
console.error(`Error loading Gemini models: ${err}`);
logger.error(`Error loading Gemini models: ${err}`);
return {};
}
};
export const loadGeminiEmbeddingModels = async () => {
export const loadGeminiEmbeddingsModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
try {
const embeddingModels: Record<string, EmbeddingModel> = {};
geminiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
const embeddingModels = {
'text-embedding-004': {
displayName: 'Text Embedding',
model: new GoogleGenerativeAIEmbeddings({
apiKey: geminiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};
});
modelName: 'text-embedding-004',
}),
},
};
return embeddingModels;
} catch (err) {
console.error(`Error loading OpenAI embeddings models: ${err}`);
logger.error(`Error loading Gemini embeddings model: ${err}`);
return {};
}
};

View File

@ -1,91 +1,6 @@
import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../config';
import { ChatModel } from '.';
export const PROVIDER_INFO = {
key: 'groq',
displayName: 'Groq',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
/* {
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
}, */
{
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
},
];
import { getGroqApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey();
@ -93,25 +8,129 @@ export const loadGroqChatModels = async () => {
if (!groqApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
groqChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
const chatModels = {
'llama-3.3-70b-versatile': {
displayName: 'Llama 3.3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.3-70b-versatile',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
}) as unknown as BaseChatModel,
};
});
),
},
'llama-3.2-3b-preview': {
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-vision-preview': {
displayName: 'Llama 3.2 11B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-vision-preview': {
displayName: 'Llama 3.2 90B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-8b-instant': {
displayName: 'Llama 3.1 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.1-8b-instant',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-8b-8192': {
displayName: 'LLaMA3 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-70b-8192': {
displayName: 'LLaMA3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'mixtral-8x7b-32768': {
displayName: 'Mixtral 8x7B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'gemma2-9b-it': {
displayName: 'Gemma2 9B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma2-9b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
};
return chatModels;
} catch (err) {
console.error(`Error loading Groq models: ${err}`);
logger.error(`Error loading Groq models: ${err}`);
return {};
}
};

View File

@ -1,97 +1,33 @@
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import {
loadOpenAIChatModels,
loadOpenAIEmbeddingModels,
PROVIDER_INFO as OpenAIInfo,
PROVIDER_INFO,
} from './openai';
import { loadGroqChatModels } from './groq';
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
} from '../../config';
import { ChatOpenAI } from '@langchain/openai';
import {
loadOllamaChatModels,
loadOllamaEmbeddingModels,
PROVIDER_INFO as OllamaInfo,
} from './ollama';
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
import {
loadAnthropicChatModels,
PROVIDER_INFO as AnthropicInfo,
} from './anthropic';
import {
loadGeminiChatModels,
loadGeminiEmbeddingModels,
PROVIDER_INFO as GeminiInfo,
} from './gemini';
import {
loadTransformersEmbeddingsModels,
PROVIDER_INFO as TransformersInfo,
} from './transformers';
import {
loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo,
} from './deepseek';
import {
loadLMStudioChatModels,
loadLMStudioEmbeddingsModels,
PROVIDER_INFO as LMStudioInfo,
} from './lmstudio';
export const PROVIDER_METADATA = {
openai: OpenAIInfo,
ollama: OllamaInfo,
groq: GroqInfo,
anthropic: AnthropicInfo,
gemini: GeminiInfo,
transformers: TransformersInfo,
deepseek: DeepseekInfo,
lmstudio: LMStudioInfo,
custom_openai: {
key: 'custom_openai',
displayName: 'Custom OpenAI',
},
};
export interface ChatModel {
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
string,
() => Promise<Record<string, ChatModel>>
> = {
const chatModelProviders = {
openai: loadOpenAIChatModels,
ollama: loadOllamaChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels,
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels,
lmstudio: loadLMStudioChatModels,
};
export const embeddingModelProviders: Record<
string,
() => Promise<Record<string, EmbeddingModel>>
> = {
openai: loadOpenAIEmbeddingModels,
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels,
lmstudio: loadLMStudioEmbeddingsModels,
const embeddingModelProviders = {
openai: loadOpenAIEmbeddingsModels,
local: loadTransformersEmbeddingsModels,
ollama: loadOllamaEmbeddingsModels,
gemini: loadGeminiEmbeddingsModels,
};
export const getAvailableChatModelProviders = async () => {
const models: Record<string, Record<string, ChatModel>> = {};
const models = {};
for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider]();
@ -116,7 +52,7 @@ export const getAvailableChatModelProviders = async () => {
configuration: {
baseURL: customOpenAiApiUrl,
},
}) as unknown as BaseChatModel,
}),
},
}
: {}),
@ -126,7 +62,7 @@ export const getAvailableChatModelProviders = async () => {
};
export const getAvailableEmbeddingModelProviders = async () => {
const models: Record<string, Record<string, EmbeddingModel>> = {};
const models = {};
for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider]();

View File

@ -1,100 +0,0 @@
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
import axios from 'axios';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'lmstudio',
displayName: 'LM Studio',
};
import { ChatOpenAI } from '@langchain/openai';
import { OpenAIEmbeddings } from '@langchain/openai';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
interface LMStudioModel {
id: string;
name?: string;
}
const ensureV1Endpoint = (endpoint: string): string =>
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
try {
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
return true;
} catch {
return false;
}
};
export const loadLMStudioChatModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
temperature: 0.7,
streaming: true,
maxRetries: 3,
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading LM Studio models: ${err}`);
return {};
}
};
export const loadLMStudioEmbeddingsModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const embeddingsModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
}) as unknown as Embeddings,
};
});
return embeddingsModels;
} catch (err) {
console.error(`Error loading LM Studio embeddings model: ${err}`);
return {};
}
};

View File

@ -1,78 +1,74 @@
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import axios from 'axios';
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
const ollamaEndpoint = getOllamaApiEndpoint();
const keepAlive = getKeepAlive();
if (!ollamaApiEndpoint) return {};
if (!ollamaEndpoint) return {};
try {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models } = res.data;
const { models: ollamaModels } = response.data;
const chatModels: Record<string, ChatModel> = {};
models.forEach((model: any) => {
chatModels[model.model] = {
const chatModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
displayName: model.name,
model: new ChatOllama({
baseUrl: ollamaApiEndpoint,
baseUrl: ollamaEndpoint,
model: model.model,
temperature: 0.7,
keepAlive: getKeepAlive(),
keepAlive: keepAlive,
}),
};
});
return acc;
}, {});
return chatModels;
} catch (err) {
console.error(`Error loading Ollama models: ${err}`);
logger.error(`Error loading Ollama models: ${err}`);
return {};
}
};
export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
export const loadOllamaEmbeddingsModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint();
if (!ollamaApiEndpoint) return {};
if (!ollamaEndpoint) return {};
try {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models } = res.data;
const { models: ollamaModels } = response.data;
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model: any) => {
embeddingModels[model.model] = {
const embeddingsModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
displayName: model.name,
model: new OllamaEmbeddings({
baseUrl: ollamaApiEndpoint,
baseUrl: ollamaEndpoint,
model: model.model,
}),
};
});
return embeddingModels;
return acc;
}, {});
return embeddingsModels;
} catch (err) {
console.error(`Error loading Ollama embeddings models: ${err}`);
logger.error(`Error loading Ollama embeddings model: ${err}`);
return {};
}
};

View File

@ -1,95 +1,89 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'openai',
displayName: 'OpenAI',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
import { getOpenaiApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadOpenAIChatModels = async () => {
const openaiApiKey = getOpenaiApiKey();
const openAIApiKey = getOpenaiApiKey();
if (!openaiApiKey) return {};
if (!openAIApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
const chatModels = {
'gpt-3.5-turbo': {
displayName: 'GPT-3.5 Turbo',
model: new ChatOpenAI({
openAIApiKey: openaiApiKey,
modelName: model.key,
openAIApiKey,
modelName: 'gpt-3.5-turbo',
temperature: 0.7,
}) as unknown as BaseChatModel,
};
});
}),
},
'gpt-4': {
displayName: 'GPT-4',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
},
'gpt-4-turbo': {
displayName: 'GPT-4 turbo',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
},
'gpt-4o': {
displayName: 'GPT-4 omni',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o',
temperature: 0.7,
}),
},
'gpt-4o-mini': {
displayName: 'GPT-4 omni mini',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o-mini',
temperature: 0.7,
}),
},
};
return chatModels;
} catch (err) {
console.error(`Error loading OpenAI models: ${err}`);
logger.error(`Error loading OpenAI models: ${err}`);
return {};
}
};
export const loadOpenAIEmbeddingModels = async () => {
const openaiApiKey = getOpenaiApiKey();
export const loadOpenAIEmbeddingsModels = async () => {
const openAIApiKey = getOpenaiApiKey();
if (!openaiApiKey) return {};
if (!openAIApiKey) return {};
try {
const embeddingModels: Record<string, EmbeddingModel> = {};
openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
const embeddingModels = {
'text-embedding-3-small': {
displayName: 'Text Embedding 3 Small',
model: new OpenAIEmbeddings({
openAIApiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};
});
openAIApiKey,
modelName: 'text-embedding-3-small',
}),
},
'text-embedding-3-large': {
displayName: 'Text Embedding 3 Large',
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
},
};
return embeddingModels;
} catch (err) {
console.error(`Error loading OpenAI embeddings models: ${err}`);
logger.error(`Error loading OpenAI embeddings model: ${err}`);
return {};
}
};

View File

@ -1,10 +1,6 @@
import logger from '../../utils/logger';
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const PROVIDER_INFO = {
key: 'transformers',
displayName: 'Hugging Face',
};
export const loadTransformersEmbeddingsModels = async () => {
try {
const embeddingModels = {
@ -30,7 +26,7 @@ export const loadTransformersEmbeddingsModels = async () => {
return embeddingModels;
} catch (err) {
console.error(`Error loading Transformers embeddings model: ${err}`);
logger.error(`Error loading Transformers embeddings model: ${err}`);
return {};
}
};

View File

@ -1,59 +0,0 @@
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import prompts from '../prompts';
export const searchHandlers: Record<string, MetaSearchAgent> = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};

View File

@ -0,0 +1,105 @@
import axios from 'axios';
import { getBingSubscriptionKey } from '../../config';
interface BingAPISearchResult {
_type: string;
name: string;
url: string;
displayUrl: string;
snippet?: string;
dateLastCrawled?: string;
thumbnailUrl?: string;
contentUrl?: string;
hostPageUrl?: string;
width?: number;
height?: number;
accentColor?: string;
contentSize?: string;
datePublished?: string;
encodingFormat?: string;
hostPageDisplayUrl?: string;
id?: string;
isLicensed?: boolean;
isFamilyFriendly?: boolean;
language?: string;
mediaUrl?: string;
motionThumbnailUrl?: string;
publisher?: string;
viewCount?: number;
webSearchUrl?: string;
primaryImageOfPage?: {
thumbnailUrl?: string;
width?: number;
height?: number;
};
video?: {
allowHttpsEmbed?: boolean;
embedHtml?: string;
allowMobileEmbed?: boolean;
viewCount?: number;
duration?: string;
};
image?: {
thumbnail?: {
contentUrl?: string;
width?: number;
height?: number;
};
imageInsightsToken?: string;
imageId?: string;
};
}
export const searchBingAPI = async (query: string) => {
try {
const bingApiKey = await getBingSubscriptionKey();
const url = new URL(`https://api.cognitive.microsoft.com/bing/v7.0/search`);
url.searchParams.append('q', query);
url.searchParams.append('responseFilter', 'Webpages,Images,Videos');
const res = await axios.get(url.toString(), {
headers: {
'Ocp-Apim-Subscription-Key': bingApiKey,
Accept: 'application/json',
},
});
if (res.data.error) {
throw new Error(`Bing API Error: ${res.data.error.message}`);
}
const originalres = res.data;
// Extract web, image, and video results
const webResults = originalres.webPages?.value || [];
const imageResults = originalres.images?.value || [];
const videoResults = originalres.videos?.value || [];
const results = webResults.map((item: BingAPISearchResult) => ({
title: item.name,
url: item.url,
content: item.snippet,
img_src:
item.primaryImageOfPage?.thumbnailUrl ||
imageResults.find((img: any) => img.hostPageUrl === item.url)
?.thumbnailUrl ||
videoResults.find((vid: any) => vid.hostPageUrl === item.url)
?.thumbnailUrl,
...(item.video && {
videoData: {
duration: item.video.duration,
embedUrl: item.video.embedHtml?.match(/src="(.*?)"/)?.[1],
},
publisher: item.publisher,
datePublished: item.datePublished,
}),
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Bing API Error: ${errorMessage}`);
}
};

View File

@ -0,0 +1,102 @@
import axios from 'axios';
import { getBraveApiKey } from '../../config';
interface BraveSearchResult {
title: string;
url: string;
content?: string;
img_src?: string;
age?: string;
family_friendly?: boolean;
language?: string;
video?: {
embedUrl?: string;
duration?: string;
};
rating?: {
value: number;
scale: number;
};
products?: Array<{
name: string;
price?: string;
}>;
recipe?: {
ingredients?: string[];
cookTime?: string;
};
meta?: {
fetched?: string;
lastCrawled?: string;
};
}
export const searchBraveAPI = async (
query: string,
numResults: number = 20,
): Promise<{ results: BraveSearchResult[]; originalres: any }> => {
try {
const braveApiKey = await getBraveApiKey();
const url = new URL(`https://api.search.brave.com/res/v1/web/search`);
url.searchParams.append('q', query);
url.searchParams.append('count', numResults.toString());
const res = await axios.get(url.toString(), {
headers: {
'X-Subscription-Token': braveApiKey,
Accept: 'application/json',
},
});
if (res.data.error) {
throw new Error(`Brave API Error: ${res.data.error.message}`);
}
const originalres = res.data;
const webResults = originalres.web?.results || [];
const results: BraveSearchResult[] = webResults.map((item: any) => ({
title: item.title,
url: item.url,
content: item.description,
img_src: item.thumbnail?.src || item.deep_results?.images?.[0]?.src,
age: item.age,
family_friendly: item.family_friendly,
language: item.language,
video: item.video
? {
embedUrl: item.video.embed_url,
duration: item.video.duration,
}
: undefined,
rating: item.rating
? {
value: item.rating.value,
scale: item.rating.scale_max,
}
: undefined,
products: item.deep_results?.product_cluster?.map((p: any) => ({
name: p.name,
price: p.price,
})),
recipe: item.recipe
? {
ingredients: item.recipe.ingredients,
cookTime: item.recipe.cook_time,
}
: undefined,
meta: {
fetched: item.meta?.fetched,
lastCrawled: item.meta?.last_crawled,
},
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Brave API Error: ${errorMessage}`);
}
};

View File

@ -0,0 +1,85 @@
import axios from 'axios';
import { getGoogleApiKey, getGoogleCseId } from '../../config';
interface GooglePSESearchResult {
kind: string;
title: string;
htmlTitle: string;
link: string;
displayLink: string;
snippet?: string;
htmlSnippet?: string;
cacheId?: string;
formattedUrl: string;
htmlFormattedUrl: string;
pagemap?: {
videoobject: any;
cse_thumbnail?: Array<{
src: string;
width: string;
height: string;
}>;
metatags?: Array<{
[key: string]: string;
author?: string;
}>;
cse_image?: Array<{
src: string;
}>;
};
fileFormat?: string;
image?: {
contextLink: string;
thumbnailLink: string;
};
mime?: string;
labels?: Array<{
name: string;
displayName: string;
}>;
}
export const searchGooglePSE = async (query: string) => {
try {
const [googleApiKey, googleCseID] = await Promise.all([
getGoogleApiKey(),
getGoogleCseId(),
]);
const url = new URL(`https://www.googleapis.com/customsearch/v1`);
url.searchParams.append('q', query);
url.searchParams.append('cx', googleCseID);
url.searchParams.append('key', googleApiKey);
const res = await axios.get(url.toString());
if (res.data.error) {
throw new Error(`Google PSE Error: ${res.data.error.message}`);
}
const originalres = res.data.items;
const results = originalres.map((item: GooglePSESearchResult) => ({
title: item.title,
url: item.link,
content: item.snippet,
img_src:
item.pagemap?.cse_image?.[0]?.src ||
item.pagemap?.cse_thumbnail?.[0]?.src ||
item.image?.thumbnailLink,
...(item.pagemap?.videoobject?.[0] && {
videoData: {
duration: item.pagemap.videoobject[0].duration,
embedUrl: item.pagemap.videoobject[0].embedurl,
},
}),
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Google PSE Error: ${errorMessage}`);
}
};

View File

@ -1,5 +1,5 @@
import axios from 'axios';
import { getSearxngApiEndpoint } from './config';
import { getSearxngApiEndpoint } from '../../config';
interface SearxngSearchOptions {
categories?: string[];
@ -30,12 +30,11 @@ export const searchSearxng = async (
if (opts) {
Object.keys(opts).forEach((key) => {
const value = opts[key as keyof SearxngSearchOptions];
if (Array.isArray(value)) {
url.searchParams.append(key, value.join(','));
if (Array.isArray(opts[key])) {
url.searchParams.append(key, opts[key].join(','));
return;
}
url.searchParams.append(key, value as string);
url.searchParams.append(key, opts[key]);
});
}

View File

@ -0,0 +1,79 @@
import axios from 'axios';
import { getYacyJsonEndpoint } from '../../config';
interface YaCySearchResult {
channels: {
title: string;
description: string;
link: string;
image: {
url: string;
title: string;
link: string;
};
startIndex: string;
itemsPerPage: string;
searchTerms: string;
items: {
title: string;
link: string;
code: string;
description: string;
pubDate: string;
image?: string;
size: string;
sizename: string;
guid: string;
faviconUrl: string;
host: string;
path: string;
file: string;
urlhash: string;
ranking: string;
}[];
navigation: {
facetname: string;
displayname: string;
type: string;
min: string;
max: string;
mean: string;
elements: {
name: string;
count: string;
modifier: string;
url: string;
}[];
}[];
}[];
}
export const searchYaCy = async (query: string, numResults: number = 20) => {
try {
const yacyBaseUrl = getYacyJsonEndpoint();
const url = new URL(`${yacyBaseUrl}/yacysearch.json`);
url.searchParams.append('query', query);
url.searchParams.append('count', numResults.toString());
const res = await axios.get(url.toString());
const originalres = res.data as YaCySearchResult;
const results = originalres.channels[0].items.map((item) => ({
title: item.title,
url: item.link,
content: item.description,
img_src: item.image || null,
pubDate: item.pubDate,
host: item.host,
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`YaCy Error: ${errorMessage}`);
}
};

View File

@ -1,5 +0,0 @@
declare function computeDot(vectorA: number[], vectorB: number[]): number;
declare module 'compute-dot' {
export default computeDot;
}

View File

@ -51,10 +51,6 @@ export const academicSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Academic', this means you will be searching for academic papers and articles on the web.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -51,10 +51,6 @@ export const redditSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -1,6 +1,6 @@
export const webSearchRetrieverPrompt = `
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If it is a smple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
@ -92,10 +92,6 @@ export const webSearchResponsePrompt = `
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -51,10 +51,6 @@ export const wolframAlphaSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -7,10 +7,6 @@ You have to cite the answer using [number] notation. You must cite the sentences
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
<context>
{context}
</context>

View File

@ -51,10 +51,6 @@ export const youtubeSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcrip
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

66
src/routes/chats.ts Normal file
View File

@ -0,0 +1,66 @@
import express from 'express';
import logger from '../utils/logger';
import db from '../db/index';
import { eq } from 'drizzle-orm';
import { chats, messages } from '../db/schema';
const router = express.Router();
router.get('/', async (_, res) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return res.status(200).json({ chats: chats });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chats: ${err.message}`);
}
});
router.get('/:id', async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, req.params.id),
});
return res.status(200).json({ chat: chatExists, messages: chatMessages });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chat: ${err.message}`);
}
});
router.delete(`/:id`, async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
await db.delete(chats).where(eq(chats.id, req.params.id)).execute();
await db
.delete(messages)
.where(eq(messages.chatId, req.params.id))
.execute();
return res.status(200).json({ message: 'Chat deleted successfully' });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in deleting chat: ${err.message}`);
}
});
export default router;

153
src/routes/config.ts Normal file
View File

@ -0,0 +1,153 @@
import express from 'express';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import {
getGroqApiKey,
getOllamaApiEndpoint,
getAnthropicApiKey,
getGeminiApiKey,
getOpenaiApiKey,
updateConfig,
getCustomOpenaiApiUrl,
getCustomOpenaiApiKey,
getCustomOpenaiModelName,
getSearchEngineBackend,
getImageSearchEngineBackend,
getVideoSearchEngineBackend,
getNewsSearchEngineBackend,
getSearxngApiEndpoint,
getGoogleApiKey,
getGoogleCseId,
getBingSubscriptionKey,
getBraveApiKey,
getYacyJsonEndpoint,
} from '../config';
import logger from '../utils/logger';
const router = express.Router();
router.get('/', async (_, res) => {
try {
const config = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
config['chatModelProviders'] = {};
config['embeddingModelProviders'] = {};
for (const provider in chatModelProviders) {
config['chatModelProviders'][provider] = Object.keys(
chatModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: chatModelProviders[provider][model].displayName,
};
});
}
for (const provider in embeddingModelProviders) {
config['embeddingModelProviders'][provider] = Object.keys(
embeddingModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: embeddingModelProviders[provider][model].displayName,
};
});
}
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
// Add search engine configuration
config['searchEngineBackends'] = {
search: getSearchEngineBackend(),
image: getImageSearchEngineBackend(),
video: getVideoSearchEngineBackend(),
news: getNewsSearchEngineBackend(),
};
config['searxngEndpoint'] = getSearxngApiEndpoint();
config['googleApiKey'] = getGoogleApiKey();
config['googleCseId'] = getGoogleCseId();
config['bingSubscriptionKey'] = getBingSubscriptionKey();
config['braveApiKey'] = getBraveApiKey();
config['yacyEndpoint'] = getYacyJsonEndpoint();
res.status(200).json(config);
} catch (err: any) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error getting config: ${err.message}`);
}
});
router.post('/', async (req, res) => {
const config = req.body;
const updatedConfig = {
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
SEARCH_ENGINE_BACKENDS: config.searchEngineBackends ? {
SEARCH: config.searchEngineBackends.search,
IMAGE: config.searchEngineBackends.image,
VIDEO: config.searchEngineBackends.video,
NEWS: config.searchEngineBackends.news,
} : undefined,
SEARCH_ENGINES: {
GOOGLE: {
API_KEY: config.googleApiKey,
CSE_ID: config.googleCseId,
},
SEARXNG: {
ENDPOINT: config.searxngEndpoint,
},
BING: {
SUBSCRIPTION_KEY: config.bingSubscriptionKey,
},
BRAVE: {
API_KEY: config.braveApiKey,
},
YACY: {
ENDPOINT: config.yacyEndpoint,
},
},
};
updateConfig(updatedConfig);
res.status(200).json({ message: 'Config updated' });
});
export default router;

131
src/routes/discover.ts Normal file
View File

@ -0,0 +1,131 @@
import express from 'express';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchYaCy } from '../lib/searchEngines/yacy';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { getNewsSearchEngineBackend } from '../config';
import logger from '../utils/logger';
const router = express.Router();
async function performSearch(query: string, site: string) {
const searchEngine = getNewsSearchEngineBackend();
switch (searchEngine) {
case 'google': {
const googleResult = await searchGooglePSE(query);
return googleResult.originalres.map((item) => {
const imageSources = [
item.pagemap?.cse_image?.[0]?.src,
item.pagemap?.cse_thumbnail?.[0]?.src,
item.pagemap?.metatags?.[0]?.['og:image'],
item.pagemap?.metatags?.[0]?.['twitter:image'],
item.pagemap?.metatags?.[0]?.['image'],
].filter(Boolean); // Remove undefined values
return {
title: item.title,
url: item.link,
content: item.snippet,
thumbnail: imageSources[0], // First available image
img_src: imageSources[0], // Same as thumbnail for consistency
iframe_src: null,
author: item.pagemap?.metatags?.[0]?.['og:site_name'] || site,
publishedDate:
item.pagemap?.metatags?.[0]?.['article:published_time'],
};
});
}
case 'searxng': {
const searxResult = await searchSearxng(query, {
engines: ['bing news'],
pageno: 1,
});
return searxResult.results;
}
case 'brave': {
const braveResult = await searchBraveAPI(query);
return braveResult.results.map((item) => ({
title: item.title,
url: item.url,
content: item.content,
thumbnail: item.img_src,
img_src: item.img_src,
iframe_src: null,
author: item.meta?.fetched || site,
publishedDate: item.meta?.lastCrawled,
}));
}
case 'yacy': {
const yacyResult = await searchYaCy(query);
return yacyResult.results.map((item) => ({
title: item.title,
url: item.url,
content: item.content,
thumbnail: item.img_src,
img_src: item.img_src,
iframe_src: null,
author: item?.host || site,
publishedDate: item?.pubDate,
}));
}
case 'bing': {
const bingResult = await searchBingAPI(query);
return bingResult.results.map((item) => ({
title: item.title,
url: item.url,
content: item.content,
thumbnail: item.img_src,
img_src: item.img_src,
iframe_src: null,
author: item?.publisher || site,
publishedDate: item?.datePublished,
}));
}
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
}
router.get('/', async (req, res) => {
try {
const queries = [
{ site: 'businessinsider.com', topic: 'AI' },
{ site: 'www.exchangewire.com', topic: 'AI' },
{ site: 'yahoo.com', topic: 'AI' },
{ site: 'businessinsider.com', topic: 'tech' },
{ site: 'www.exchangewire.com', topic: 'tech' },
{ site: 'yahoo.com', topic: 'tech' },
];
const data = (
await Promise.all(
queries.map(async ({ site, topic }) => {
try {
const query = `site:${site} ${topic}`;
return await performSearch(query, site);
} catch (error) {
logger.error(`Error searching ${site}: ${error.message}`);
return [];
}
}),
)
)
.flat()
.sort(() => Math.random() - 0.5)
.filter((item) => item.title && item.url && item.content);
return res.json({ blogs: data });
} catch (err: any) {
logger.error(`Error in discover route: ${err.message}`);
return res.status(500).json({ message: 'An error has occurred' });
}
});
export default router;

82
src/routes/images.ts Normal file
View File

@ -0,0 +1,82 @@
import express from 'express';
import handleImageSearch from '../chains/imageSearchAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: ImageSearchBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const images = await handleImageSearch(
{ query: body.query, chat_history: chatHistory },
llm,
);
res.status(200).json({ images });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in image search: ${err.message}`);
}
});
export default router;

24
src/routes/index.ts Normal file
View File

@ -0,0 +1,24 @@
import express from 'express';
import imagesRouter from './images';
import videosRouter from './videos';
import configRouter from './config';
import modelsRouter from './models';
import suggestionsRouter from './suggestions';
import chatsRouter from './chats';
import searchRouter from './search';
import discoverRouter from './discover';
import uploadsRouter from './uploads';
const router = express.Router();
router.use('/images', imagesRouter);
router.use('/videos', videosRouter);
router.use('/config', configRouter);
router.use('/models', modelsRouter);
router.use('/suggestions', suggestionsRouter);
router.use('/chats', chatsRouter);
router.use('/search', searchRouter);
router.use('/discover', discoverRouter);
router.use('/uploads', uploadsRouter);
export default router;

36
src/routes/models.ts Normal file
View File

@ -0,0 +1,36 @@
import express from 'express';
import logger from '../utils/logger';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete chatModelProviders[provider][model].model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete embeddingModelProviders[provider][model].model;
});
});
res.status(200).json({ chatModelProviders, embeddingModelProviders });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(err.message);
}
});
export default router;

158
src/routes/search.ts Normal file
View File

@ -0,0 +1,158 @@
import express from 'express';
import logger from '../utils/logger';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import { ChatOpenAI } from '@langchain/openai';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import { searchHandlers } from '../websocket/messageHandler';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '../search/metaSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface chatModel {
provider: string;
model: string;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
}
interface embeddingModel {
provider: string;
model: string;
}
interface ChatRequestBody {
optimizationMode: 'speed' | 'balanced';
focusMode: string;
chatModel?: chatModel;
embeddingModel?: embeddingModel;
query: string;
history: Array<[string, string]>;
}
router.post('/', async (req, res) => {
try {
const body: ChatRequestBody = req.body;
if (!body.focusMode || !body.query) {
return res.status(400).json({ message: 'Missing focus mode or query' });
}
body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
const embeddingModel =
body.embeddingModel?.model ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {
return res.status(400).json({ message: 'Invalid focus mode' });
}
const emitter = await searchHandler.searchAndAnswer(
body.query,
history,
llm,
embeddings,
body.optimizationMode,
[],
);
let message = '';
let sources = [];
emitter.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
message += parsedData.data;
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
}
});
emitter.on('end', () => {
res.status(200).json({ message, sources });
});
emitter.on('error', (data) => {
const parsedData = JSON.parse(data);
res.status(500).json({ message: parsedData.data });
});
} catch (err: any) {
logger.error(`Error in getting search results: ${err.message}`);
res.status(500).json({ message: 'An error has occurred.' });
}
});
export default router;

81
src/routes/suggestions.ts Normal file
View File

@ -0,0 +1,81 @@
import express from 'express';
import generateSuggestions from '../chains/suggestionGeneratorAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsBody {
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: SuggestionsBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const suggestions = await generateSuggestions(
{ chat_history: chatHistory },
llm,
);
res.status(200).json({ suggestions: suggestions });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in generating suggestions: ${err.message}`);
}
});
export default router;

151
src/routes/uploads.ts Normal file
View File

@ -0,0 +1,151 @@
import express from 'express';
import logger from '../utils/logger';
import multer from 'multer';
import path from 'path';
import crypto from 'crypto';
import fs from 'fs';
import { Embeddings } from '@langchain/core/embeddings';
import { getAvailableEmbeddingModelProviders } from '../lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
const router = express.Router();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
const storage = multer.diskStorage({
destination: (req, file, cb) => {
cb(null, path.join(process.cwd(), './uploads'));
},
filename: (req, file, cb) => {
const splitedFileName = file.originalname.split('.');
const fileExtension = splitedFileName[splitedFileName.length - 1];
if (!['pdf', 'docx', 'txt'].includes(fileExtension)) {
return cb(new Error('File type is not supported'), '');
}
cb(null, `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`);
},
});
const upload = multer({ storage });
router.post(
'/',
upload.fields([
{ name: 'files' },
{ name: 'embedding_model', maxCount: 1 },
{ name: 'embedding_model_provider', maxCount: 1 },
]),
async (req, res) => {
try {
const { embedding_model, embedding_model_provider } = req.body;
if (!embedding_model || !embedding_model_provider) {
res
.status(400)
.json({ message: 'Missing embedding model or provider' });
return;
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel: Embeddings =
embedding_model ?? Object.keys(embeddingModels[provider])[0];
let embeddingsModel: Embeddings | undefined;
if (
embeddingModels[provider] &&
embeddingModels[provider][embeddingModel]
) {
embeddingsModel = embeddingModels[provider][embeddingModel].model as
| Embeddings
| undefined;
}
if (!embeddingsModel) {
res.status(400).json({ message: 'Invalid LLM model selected' });
return;
}
const files = req.files['files'] as Express.Multer.File[];
if (!files || files.length === 0) {
res.status(400).json({ message: 'No files uploaded' });
return;
}
await Promise.all(
files.map(async (file) => {
let docs: Document[] = [];
if (file.mimetype === 'application/pdf') {
const loader = new PDFLoader(file.path);
docs = await loader.load();
} else if (
file.mimetype ===
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
) {
const loader = new DocxLoader(file.path);
docs = await loader.load();
} else if (file.mimetype === 'text/plain') {
const text = fs.readFileSync(file.path, 'utf-8');
docs = [
new Document({
pageContent: text,
metadata: {
title: file.originalname,
},
}),
];
}
const splitted = await splitter.splitDocuments(docs);
const json = JSON.stringify({
title: file.originalname,
contents: splitted.map((doc) => doc.pageContent),
});
const pathToSave = file.path.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(pathToSave, json);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsJSON = JSON.stringify({
title: file.originalname,
embeddings: embeddings,
});
const pathToSaveEmbeddings = file.path.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(pathToSaveEmbeddings, embeddingsJSON);
}),
);
res.status(200).json({
files: files.map((file) => {
return {
fileName: file.originalname,
fileExtension: file.filename.split('.').pop(),
fileId: file.filename.replace(/\.\w+$/, ''),
};
}),
});
} catch (err: any) {
logger.error(`Error in uploading file results: ${err.message}`);
res.status(500).json({ message: 'An error has occurred.' });
}
},
);
export default router;

82
src/routes/videos.ts Normal file
View File

@ -0,0 +1,82 @@
import express from 'express';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import handleVideoSearch from '../chains/videoSearchAgent';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: VideoSearchBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const videos = await handleVideoSearch(
{ chat_history: chatHistory, query: body.query },
llm,
);
res.status(200).json({ videos });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in video search: ${err.message}`);
}
});
export default router;

View File

@ -13,17 +13,23 @@ import {
} from '@langchain/core/runnables';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../outputParsers/lineOutputParser';
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document';
import { searchSearxng } from '../searxng';
import path from 'node:path';
import fs from 'node:fs';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchYaCy } from '../lib/searchEngines/yacy';
import { getSearchEngineBackend } from '../config';
import path from 'path';
import fs from 'fs';
import computeSimilarity from '../utils/computeSimilarity';
import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events';
import { StreamEvent } from '@langchain/core/tracers/log_stream';
import { IterableReadableStream } from '@langchain/core/utils/stream';
export interface MetaSearchAgentType {
searchAndAnswer: (
@ -33,7 +39,6 @@ export interface MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) => Promise<eventEmitter>;
}
@ -90,7 +95,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
question = 'summarize';
}
let docs: Document[] = [];
let docs = [];
const linkDocs = await getDocumentsFromLinks({ links });
@ -132,7 +137,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
You are a web search summarizer, tasked with summarizing a piece of text retrieved from a web search. Your job is to summarize the
text into a detailed, 2-4 paragraph explanation that captures the main ideas and provides a comprehensive answer to the query.
If the query is \"summarize\", you should provide a detailed summary of the text. If the query is a specific question, you should answer it in the summary.
- **Journalistic tone**: The summary should sound professional and journalistic, not too casual or vague.
- **Thorough and detailed**: Ensure that every key point from the text is captured and that the summary directly answers the query.
- **Not too lengthy, but detailed**: The summary should be informative but not excessively long. Focus on providing detailed information in a concise format.
@ -203,12 +208,37 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: docs };
} else {
question = question.replace(/<think>.*?<\/think>/g, '');
const searchEngine = getSearchEngineBackend();
const res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
let res;
switch (searchEngine) {
case 'searxng':
res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
break;
case 'google':
res = await searchGooglePSE(question);
break;
case 'bing':
res = await searchBingAPI(question);
break;
case 'brave':
res = await searchBraveAPI(question);
break;
case 'yacy':
res = await searchYaCy(question);
break;
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
if (!res?.results) {
throw new Error(
`No results found for search engine: ${searchEngine}`,
);
}
const documents = res.results.map(
(result) =>
@ -237,11 +267,9 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds: string[],
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
systemInstructions: string,
) {
return RunnableSequence.from([
RunnableMap.from({
systemInstructions: () => systemInstructions,
query: (input: BasicChainInput) => input.query,
chat_history: (input: BasicChainInput) => input.chat_history,
date: () => new Date().toISOString(),
@ -315,7 +343,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
const fileSimilaritySearchObject = content.contents.map(
(c: string, i: number) => {
(c: string, i) => {
return {
fileName: content.title,
content: c,
@ -418,8 +446,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
return sortedDocs;
}
return [];
}
private processDocs(docs: Document[]) {
@ -432,7 +458,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
}
private async handleStream(
stream: AsyncGenerator<StreamEvent, any, any>,
stream: IterableReadableStream<StreamEvent>,
emitter: eventEmitter,
) {
for await (const event of stream) {
@ -471,7 +497,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) {
const emitter = new eventEmitter();
@ -480,7 +505,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds,
embeddings,
optimizationMode,
systemInstructions,
);
const stream = answeringChain.streamEvents(

View File

@ -6,7 +6,7 @@ const computeSimilarity = (x: number[], y: number[]): number => {
const similarityMeasure = getSimilarityMeasure();
if (similarityMeasure === 'cosine') {
return cosineSimilarity(x, y) as number;
return cosineSimilarity(x, y);
} else if (similarityMeasure === 'dot') {
return dot(x, y);
}

View File

@ -3,6 +3,7 @@ import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse';
import logger from './logger';
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splitter = new RecursiveCharacterTextSplitter();
@ -64,7 +65,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splittedText = await splitter.splitText(parsedText);
const title = res.data
.toString('utf8')
.match(/<title.*>(.*?)<\/title>/)?.[1];
.match(/<title>(.*?)<\/title>/)?.[1];
const linkDocs = splittedText.map((text) => {
return new Document({
@ -78,13 +79,12 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
docs.push(...linkDocs);
} catch (err) {
console.error(
'An error occurred while getting documents from links: ',
err,
logger.error(
`Error at generating documents from links: ${err.message}`,
);
docs.push(
new Document({
pageContent: `Failed to retrieve content from the link: ${err}`,
pageContent: `Failed to retrieve content from the link: ${err.message}`,
metadata: {
title: 'Failed to retrieve content',
url: link,

22
src/utils/logger.ts Normal file
View File

@ -0,0 +1,22 @@
import winston from 'winston';
const logger = winston.createLogger({
level: 'info',
transports: [
new winston.transports.Console({
format: winston.format.combine(
winston.format.colorize(),
winston.format.simple(),
),
}),
new winston.transports.File({
filename: 'app.log',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.json(),
),
}),
],
});
export default logger;

View File

@ -0,0 +1,122 @@
import { WebSocket } from 'ws';
import { handleMessage } from './messageHandler';
import {
getAvailableEmbeddingModelProviders,
getAvailableChatModelProviders,
} from '../lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import type { IncomingMessage } from 'http';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
export const handleConnection = async (
ws: WebSocket,
request: IncomingMessage,
) => {
try {
const searchParams = new URL(request.url, `http://${request.headers.host}`)
.searchParams;
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
searchParams.get('chatModelProvider') ||
Object.keys(chatModelProviders)[0];
const chatModel =
searchParams.get('chatModel') ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
searchParams.get('embeddingModelProvider') ||
Object.keys(embeddingModelProviders)[0];
const embeddingModel =
searchParams.get('embeddingModel') ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel] &&
chatModelProvider != 'custom_openai'
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
} else if (chatModelProvider == 'custom_openai') {
const customOpenaiApiKey = getCustomOpenaiApiKey();
const customOpenaiApiUrl = getCustomOpenaiApiUrl();
const customOpenaiModelName = getCustomOpenaiModelName();
if (customOpenaiApiKey && customOpenaiApiUrl && customOpenaiModelName) {
llm = new ChatOpenAI({
modelName: customOpenaiModelName,
openAIApiKey: customOpenaiApiKey,
temperature: 0.7,
configuration: {
baseURL: customOpenaiApiUrl,
},
}) as unknown as BaseChatModel;
}
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid LLM or embeddings model selected, please refresh the page and try again.',
key: 'INVALID_MODEL_SELECTED',
}),
);
ws.close();
}
const interval = setInterval(() => {
if (ws.readyState === ws.OPEN) {
ws.send(
JSON.stringify({
type: 'signal',
data: 'open',
}),
);
clearInterval(interval);
}
}, 5);
ws.on(
'message',
async (message) =>
await handleMessage(message.toString(), ws, llm, embeddings),
);
ws.on('close', () => logger.debug('Connection closed'));
} catch (err) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Internal server error.',
key: 'INTERNAL_SERVER_ERROR',
}),
);
ws.close();
logger.error(err);
}
};

8
src/websocket/index.ts Normal file
View File

@ -0,0 +1,8 @@
import { initServer } from './websocketServer';
import http from 'http';
export const startWebSocketServer = (
server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
) => {
initServer(server);
};

View File

@ -0,0 +1,272 @@
import { EventEmitter, WebSocket } from 'ws';
import { BaseMessage, AIMessage, HumanMessage } from '@langchain/core/messages';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import logger from '../utils/logger';
import db from '../db';
import { chats, messages as messagesSchema } from '../db/schema';
import { eq, asc, gt, and } from 'drizzle-orm';
import crypto from 'crypto';
import { getFileDetails } from '../utils/files';
import MetaSearchAgent, {
MetaSearchAgentType,
} from '../search/metaSearchAgent';
import prompts from '../prompts';
type Message = {
messageId: string;
chatId: string;
content: string;
};
type WSMessage = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
type: string;
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
};
export const searchHandlers = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};
const handleEmitterEvents = (
emitter: EventEmitter,
ws: WebSocket,
messageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources = [];
emitter.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
ws.send(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: messageId,
}),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
ws.send(
JSON.stringify({
type: 'sources',
data: parsedData.data,
messageId: messageId,
}),
);
sources = parsedData.data;
}
});
emitter.on('end', () => {
ws.send(JSON.stringify({ type: 'messageEnd', messageId: messageId }));
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: messageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
})
.execute();
});
emitter.on('error', (data) => {
const parsedData = JSON.parse(data);
ws.send(
JSON.stringify({
type: 'error',
data: parsedData.data,
key: 'CHAIN_ERROR',
}),
);
});
};
export const handleMessage = async (
message: string,
ws: WebSocket,
llm: BaseChatModel,
embeddings: Embeddings,
) => {
try {
const parsedWSMessage = JSON.parse(message) as WSMessage;
const parsedMessage = parsedWSMessage.message;
if (parsedWSMessage.files.length > 0) {
/* TODO: Implement uploads in other classes/single meta class system*/
parsedWSMessage.focusMode = 'webSearch';
}
const humanMessageId =
parsedMessage.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
if (!parsedMessage.content)
return ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid message format',
key: 'INVALID_FORMAT',
}),
);
const history: BaseMessage[] = parsedWSMessage.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
if (parsedWSMessage.type === 'message') {
const handler: MetaSearchAgentType =
searchHandlers[parsedWSMessage.focusMode];
if (handler) {
try {
const emitter = await handler.searchAndAnswer(
parsedMessage.content,
history,
llm,
embeddings,
parsedWSMessage.optimizationMode,
parsedWSMessage.files,
);
handleEmitterEvents(emitter, ws, aiMessageId, parsedMessage.chatId);
const chat = await db.query.chats.findFirst({
where: eq(chats.id, parsedMessage.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: parsedMessage.chatId,
title: parsedMessage.content,
createdAt: new Date().toString(),
focusMode: parsedWSMessage.focusMode,
files: parsedWSMessage.files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: parsedMessage.content,
chatId: parsedMessage.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gt(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, parsedMessage.chatId),
),
)
.execute();
}
} catch (err) {
console.log(err);
}
} else {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid focus mode',
key: 'INVALID_FOCUS_MODE',
}),
);
}
}
} catch (err) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid message format',
key: 'INVALID_FORMAT',
}),
);
logger.error(`Failed to handle message: ${err}`);
}
};

View File

@ -0,0 +1,16 @@
import { WebSocketServer } from 'ws';
import { handleConnection } from './connectionManager';
import http from 'http';
import { getPort } from '../config';
import logger from '../utils/logger';
export const initServer = (
server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
) => {
const port = getPort();
const wss = new WebSocketServer({ server });
wss.on('connection', handleConnection);
logger.info(`WebSocket server started on port ${port}`);
};

View File

@ -1,27 +1,18 @@
{
"compilerOptions": {
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"lib": ["ESNext"],
"module": "Node16",
"moduleResolution": "Node16",
"target": "ESNext",
"outDir": "dist",
"sourceMap": false,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"plugins": [
{
"name": "next"
}
],
"paths": {
"@/*": ["./src/*"]
},
"target": "ES2017"
"experimentalDecorators": true,
"emitDecoratorMetadata": true,
"allowSyntheticDefaultImports": true,
"skipLibCheck": true,
"skipDefaultLibCheck": true
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
"exclude": ["node_modules"]
"include": ["src"],
"exclude": ["node_modules", "**/*.spec.ts"]
}

2
ui/.env.example Normal file
View File

@ -0,0 +1,2 @@
NEXT_PUBLIC_WS_URL=ws://localhost:3001
NEXT_PUBLIC_API_URL=http://localhost:3001/api

34
ui/.gitignore vendored Normal file
View File

@ -0,0 +1,34 @@
# dependencies
/node_modules
/.pnp
.pnp.js
.yarn/install-state.gz
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# local env files
.env*.local
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts

11
ui/.prettierrc.js Normal file
View File

@ -0,0 +1,11 @@
/** @type {import("prettier").Config} */
const config = {
printWidth: 80,
trailingComma: 'all',
endOfLine: 'auto',
singleQuote: true,
tabWidth: 2,
};
module.exports = config;

View File

@ -0,0 +1,7 @@
import ChatWindow from '@/components/ChatWindow';
const Page = ({ params }: { params: { chatId: string } }) => {
return <ChatWindow id={params.chatId} />;
};
export default Page;

View File

@ -19,7 +19,7 @@ const Page = () => {
useEffect(() => {
const fetchData = async () => {
try {
const res = await fetch(`/api/discover`, {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

@ -21,7 +21,7 @@ const Page = () => {
const fetchChats = async () => {
setLoading(true);
const res = await fetch(`/api/chats`, {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

View File

@ -7,7 +7,6 @@ import { Switch } from '@headlessui/react';
import ThemeSwitcher from '@/components/theme/Switcher';
import { ImagesIcon, VideoIcon } from 'lucide-react';
import Link from 'next/link';
import { PROVIDER_METADATA } from '@/lib/providers';
interface SettingsType {
chatModelProviders: {
@ -21,11 +20,21 @@ interface SettingsType {
anthropicApiKey: string;
geminiApiKey: string;
ollamaApiUrl: string;
lmStudioApiUrl: string;
deepseekApiKey: string;
customOpenaiApiKey: string;
customOpenaiApiUrl: string;
customOpenaiModelName: string;
searchEngineBackends: {
search: string;
image: string;
video: string;
news: string;
};
searxngEndpoint: string;
googleApiKey: string;
googleCseId: string;
bingSubscriptionKey: string;
braveApiKey: string;
yacyEndpoint: string;
}
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
@ -57,38 +66,6 @@ const Input = ({ className, isSaving, onSave, ...restProps }: InputProps) => {
);
};
interface TextareaProps extends React.InputHTMLAttributes<HTMLTextAreaElement> {
isSaving?: boolean;
onSave?: (value: string) => void;
}
const Textarea = ({
className,
isSaving,
onSave,
...restProps
}: TextareaProps) => {
return (
<div className="relative">
<textarea
placeholder="Any special instructions for the LLM"
className="placeholder:text-sm text-sm w-full flex items-center justify-between p-3 bg-light-secondary dark:bg-dark-secondary rounded-lg hover:bg-light-200 dark:hover:bg-dark-200 transition-colors"
rows={4}
onBlur={(e) => onSave?.(e.target.value)}
{...restProps}
/>
{isSaving && (
<div className="absolute right-3 top-3">
<Loader2
size={16}
className="animate-spin text-black/70 dark:text-white/70"
/>
</div>
)}
</div>
);
};
const Select = ({
className,
options,
@ -146,13 +123,18 @@ const Page = () => {
const [isLoading, setIsLoading] = useState(false);
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [systemInstructions, setSystemInstructions] = useState<string>('');
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
const [searchEngineBackends, setSearchEngineBackends] = useState({
search: '',
image: '',
video: '',
news: '',
});
useEffect(() => {
const fetchConfig = async () => {
setIsLoading(true);
const res = await fetch(`/api/config`, {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
headers: {
'Content-Type': 'application/json',
},
@ -161,6 +143,16 @@ const Page = () => {
const data = (await res.json()) as SettingsType;
setConfig(data);
// Set search engine backends if they exist in the response
if (data.searchEngineBackends) {
setSearchEngineBackends({
search: data.searchEngineBackends.search || '',
image: data.searchEngineBackends.image || '',
video: data.searchEngineBackends.video || '',
news: data.searchEngineBackends.news || '',
});
}
const chatModelProvidersKeys = Object.keys(data.chatModelProviders || {});
const embeddingModelProvidersKeys = Object.keys(
data.embeddingModelProviders || {},
@ -208,8 +200,6 @@ const Page = () => {
localStorage.getItem('autoVideoSearch') === 'true',
);
setSystemInstructions(localStorage.getItem('systemInstructions')!);
setIsLoading(false);
};
@ -225,13 +215,16 @@ const Page = () => {
[key]: value,
} as SettingsType;
const response = await fetch(`/api/config`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
const response = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/config`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(updatedConfig),
},
body: JSON.stringify(updatedConfig),
});
);
if (!response.ok) {
throw new Error('Failed to update config');
@ -243,7 +236,7 @@ const Page = () => {
key.toLowerCase().includes('api') ||
key.toLowerCase().includes('url')
) {
const res = await fetch(`/api/config`, {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
headers: {
'Content-Type': 'application/json',
},
@ -366,8 +359,8 @@ const Page = () => {
localStorage.setItem('embeddingModelProvider', value);
} else if (key === 'embeddingModel') {
localStorage.setItem('embeddingModel', value);
} else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value);
} else if (key === 'searchEngineBackends') {
localStorage.setItem('searchEngineBackends', value);
}
} catch (err) {
console.error('Failed to save:', err);
@ -513,19 +506,6 @@ const Page = () => {
</div>
</SettingsSection>
<SettingsSection title="System Instructions">
<div className="flex flex-col space-y-4">
<Textarea
value={systemInstructions}
isSaving={savingStates['systemInstructions']}
onChange={(e) => {
setSystemInstructions(e.target.value);
}}
onSave={(value) => saveConfig('systemInstructions', value)}
/>
</div>
</SettingsSection>
<SettingsSection title="Model Settings">
{config.chatModelProviders && (
<div className="flex flex-col space-y-4">
@ -550,9 +530,8 @@ const Page = () => {
(provider) => ({
value: provider,
label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() +
provider.slice(1),
provider.slice(1),
}),
)}
/>
@ -693,9 +672,8 @@ const Page = () => {
(provider) => ({
value: provider,
label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() +
provider.slice(1),
provider.slice(1),
}),
)}
/>
@ -843,42 +821,232 @@ const Page = () => {
onSave={(value) => saveConfig('geminiApiKey', value)}
/>
</div>
</div>
</SettingsSection>
<SettingsSection title="Search Engine Settings">
<div className="flex flex-col space-y-4">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Deepseek API Key
Default Search Engine
</p>
<Input
type="text"
placeholder="Deepseek API Key"
value={config.deepseekApiKey}
isSaving={savingStates['deepseekApiKey']}
<Select
value={searchEngineBackends.search}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
deepseekApiKey: e.target.value,
const value = e.target.value;
setSearchEngineBackends((prev) => ({
...prev,
search: value,
}));
saveConfig('searchEngineBackends', {
...searchEngineBackends,
search: value,
});
}}
onSave={(value) => saveConfig('deepseekApiKey', value)}
options={[
{ value: 'searxng', label: 'SearXNG' },
{ value: 'google', label: 'Google' },
{ value: 'bing', label: 'Bing' },
{ value: 'brave', label: 'Brave' },
{ value: 'yacy', label: 'YaCy' },
]}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL
Image Search Engine
</p>
<Select
value={searchEngineBackends.image}
onChange={(e) => {
const value = e.target.value;
setSearchEngineBackends((prev) => ({
...prev,
image: value,
}));
saveConfig('searchEngineBackends', {
...searchEngineBackends,
image: value,
});
}}
options={[
{ value: '', label: 'Use Default Search Engine' },
{ value: 'searxng', label: 'SearXNG' },
{ value: 'google', label: 'Google' },
{ value: 'bing', label: 'Bing' },
{ value: 'brave', label: 'Brave' },
]}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Video Search Engine
</p>
<Select
value={searchEngineBackends.video}
onChange={(e) => {
const value = e.target.value;
setSearchEngineBackends((prev) => ({
...prev,
video: value,
}));
saveConfig('searchEngineBackends', {
...searchEngineBackends,
video: value,
});
}}
options={[
{ value: '', label: 'Use Default Search Engine' },
{ value: 'searxng', label: 'SearXNG' },
{ value: 'google', label: 'Google' },
{ value: 'bing', label: 'Bing' },
{ value: 'brave', label: 'Brave' },
]}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
News Search Engine
</p>
<Select
value={searchEngineBackends.news}
onChange={(e) => {
const value = e.target.value;
setSearchEngineBackends((prev) => ({
...prev,
news: value,
}));
saveConfig('searchEngineBackends', {
...searchEngineBackends,
news: value,
});
}}
options={[
{ value: '', label: 'Use Default Search Engine' },
{ value: 'searxng', label: 'SearXNG' },
{ value: 'google', label: 'Google' },
{ value: 'bing', label: 'Bing' },
{ value: 'brave', label: 'Brave' },
]}
/>
</div>
<div className="pt-4 border-t border-light-200 dark:border-dark-200">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
SearXNG Endpoint
</p>
<Input
type="text"
placeholder="SearXNG API Endpoint"
value={config.searxngEndpoint || ''}
isSaving={savingStates['searxngEndpoint']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
searxngEndpoint: e.target.value,
}));
}}
onSave={(value) => saveConfig('searxngEndpoint', value)}
/>
</div>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Google API Key
</p>
<Input
type="text"
placeholder="LM Studio API URL"
value={config.lmStudioApiUrl}
isSaving={savingStates['lmStudioApiUrl']}
placeholder="Google API Key"
value={config.googleApiKey || ''}
isSaving={savingStates['googleApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lmStudioApiUrl: e.target.value,
googleApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
onSave={(value) => saveConfig('googleApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Google CSE ID
</p>
<Input
type="text"
placeholder="Google Custom Search Engine ID"
value={config.googleCseId || ''}
isSaving={savingStates['googleCseId']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
googleCseId: e.target.value,
}));
}}
onSave={(value) => saveConfig('googleCseId', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Bing Subscription Key
</p>
<Input
type="text"
placeholder="Bing Subscription Key"
value={config.bingSubscriptionKey || ''}
isSaving={savingStates['bingSubscriptionKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
bingSubscriptionKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('bingSubscriptionKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Brave API Key
</p>
<Input
type="text"
placeholder="Brave API Key"
value={config.braveApiKey || ''}
isSaving={savingStates['braveApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
braveApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('braveApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
YaCy Endpoint
</p>
<Input
type="text"
placeholder="YaCy API Endpoint"
value={config.yacyEndpoint || ''}
isSaving={savingStates['yacyEndpoint']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
yacyEndpoint: e.target.value,
}));
}}
onSave={(value) => saveConfig('yacyEndpoint', value)}
/>
</div>
</div>

View File

@ -48,17 +48,11 @@ const Chat = ({
});
useEffect(() => {
const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
};
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
if (messages.length === 1) {
document.title = `${messages[0].content.substring(0, 30)} - Perplexica`;
}
if (messages[messages.length - 1]?.role == 'user') {
scroll();
}
}, [messages]);
return (

Some files were not shown because too many files have changed in this diff Show More