Compare commits

...

39 Commits

Author SHA1 Message Date
74f7eaed6e feat(workflow): fix build errors 2025-03-20 13:43:29 +05:30
dddd944a18 feat(workflow): update docker build 2025-03-20 13:22:43 +05:30
7eccd4d75b Merge pull request #679 from ItzCrazyKns/feat/remove-backend
feat(app): fix build errors
2025-03-20 12:48:27 +05:30
62e6c24840 feat(app): fix build errors 2025-03-20 12:47:54 +05:30
04a0342b52 Merge pull request #678 from ItzCrazyKns/feat/remove-backend
Feat/remove backend
2025-03-20 12:42:18 +05:30
5c016127cb feat(package): bump version 2025-03-20 12:41:07 +05:30
8b552010f9 feat(docs): update docs 2025-03-20 12:33:15 +05:30
97804e7b4d feat(config): remove unused vars 2025-03-20 12:30:06 +05:30
33b895b75e feat(app): add search API 2025-03-20 12:29:52 +05:30
048de2cb74 feat(docs): update docs 2025-03-20 12:29:31 +05:30
274e6ca88c feat(sidebar): remove unused state 2025-03-20 11:49:00 +05:30
f628b6e416 feat(groq): remove deprecated model 2025-03-20 11:48:44 +05:30
cf7144db96 feat(providers): add HF transformers 2025-03-20 11:48:26 +05:30
ffa793056d feat(chains): remove think tags 2025-03-20 11:47:54 +05:30
584d02b92a feat(app): add thinking model support 2025-03-20 10:56:03 +05:30
008c7cbec0 feat(chat-window): remove debugging code, 2025-03-20 09:47:32 +05:30
4d1ee79b8d feat(package): migrate db when built 2025-03-20 09:47:12 +05:30
ea638279e5 feat(docker): use standalone build 2025-03-20 09:46:50 +05:30
403d13eb50 feat(package): update scripts 2025-03-19 16:34:55 +05:30
217736d05a feat(app): remove backend 2025-03-19 16:23:27 +05:30
8a24572cd2 feat(app): add upload functionality 2025-03-19 15:32:32 +05:30
649c68f292 feat(ui): fix type errors 2025-03-19 13:42:28 +05:30
bab5dba6e1 feat(app): port history saving features 2025-03-19 13:42:15 +05:30
c24edac16d feat(app): add chat functionality 2025-03-19 13:41:52 +05:30
3150c21f17 feat(icons): fix type errors 2025-03-19 13:41:01 +05:30
c46fd7a9c8 feat(utils): add files utils, remove logger, fix API url 2025-03-19 13:40:35 +05:30
bab32e8d70 feat(app): add suggestions route 2025-03-19 13:40:10 +05:30
1130746f5d feat(app): add image & video search functionality 2025-03-19 13:38:40 +05:30
d1e9361665 feat(routes): add discover route 2025-03-19 13:37:54 +05:30
3bf2337697 feat(app): add db & schema 2025-03-19 13:37:01 +05:30
ee6e197ec0 feat(app): lint & beautify 2025-03-18 11:29:04 +05:30
32f26bb4e8 feat(app): add groq, gemini & anthropic provider 2025-03-18 11:28:47 +05:30
4cb20542a5 feat(config): update file path, add post endpoint 2025-03-18 10:33:32 +05:30
97f6196d9b feat(app): add GET config route 2025-03-18 10:25:09 +05:30
6c227cab6f feat(providers): move providers to UI 2025-03-18 10:24:51 +05:30
e9e34ddff9 feat(ui): add meta search agent 2025-03-18 10:24:33 +05:30
e29a08dc46 feat(ui): add necessary utils 2025-03-18 10:24:16 +05:30
5c313e9bed feat(ui): update packages, add config, add searxng 2025-03-18 10:23:59 +05:30
6b5bd9d79b feat(prompts): move to UI 2025-03-18 10:23:21 +05:30
123 changed files with 5060 additions and 6887 deletions

View File

@ -8,18 +8,12 @@ on:
types: [published] types: [published]
jobs: jobs:
build-and-push: build-amd64:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy:
matrix:
service: [backend, app]
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2
with: with:
@ -36,38 +30,104 @@ jobs:
id: version id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push Docker image for ${{ matrix.service }} - name: Build and push AMD64 Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push' if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: | run: |
docker buildx create --use DOCKERFILE=app.dockerfile
if [[ "${{ matrix.service }}" == "backend" ]]; then \ IMAGE_NAME=perplexica
DOCKERFILE=backend.dockerfile; \ docker buildx build --platform linux/amd64 \
IMAGE_NAME=perplexica-backend; \ --cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:amd64 \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
--cache-to=type=inline \ --cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \ -f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:main \ -t itzcrazykns1337/${IMAGE_NAME}:amd64 \
--push . --push .
- name: Build and push release Docker image for ${{ matrix.service }} - name: Build and push AMD64 release Docker image
if: github.event_name == 'release' if: github.event_name == 'release'
run: | run: |
docker buildx create --use DOCKERFILE=app.dockerfile
if [[ "${{ matrix.service }}" == "backend" ]]; then \ IMAGE_NAME=perplexica
DOCKERFILE=backend.dockerfile; \ docker buildx build --platform linux/amd64 \
IMAGE_NAME=perplexica-backend; \ --cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--cache-to=type=inline \ --cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \ -f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \ -t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--push . --push .
build-arm64:
runs-on: ubuntu-24.04-arm
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push ARM64 Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:arm64 \
--push .
- name: Build and push ARM64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--push .
manifest:
needs: [build-amd64, build-arm64]
runs-on: ubuntu-latest
steps:
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Create and push multi-arch manifest for main
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:main \
--amend itzcrazykns1337/${IMAGE_NAME}:amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:main
- name: Create and push multi-arch manifest for releases
if: github.event_name == 'release'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}

6
.gitignore vendored
View File

@ -4,9 +4,9 @@ npm-debug.log
yarn-error.log yarn-error.log
# Build output # Build output
/.next/ .next/
/out/ out/
/dist/ dist/
# IDE/Editor specific # IDE/Editor specific
.vscode/ .vscode/

View File

@ -6,7 +6,6 @@ const config = {
endOfLine: 'auto', endOfLine: 'auto',
singleQuote: true, singleQuote: true,
tabWidth: 2, tabWidth: 2,
semi: true,
}; };
module.exports = config; module.exports = config;

View File

@ -1,32 +1,43 @@
# How to Contribute to Perplexica # How to Contribute to Perplexica
Hey there, thanks for deciding to contribute to Perplexica. Anything you help with will support the development of Perplexica and will make it better. Let's walk you through the key aspects to ensure your contributions are effective and in harmony with the project's setup. Thanks for your interest in contributing to Perplexica! Your help makes this project better. This guide explains how to contribute effectively.
Perplexica is a modern AI chat application with advanced search capabilities.
## Project Structure ## Project Structure
Perplexica's design consists of two main domains: Perplexica's codebase is organized as follows:
- **Frontend (`ui` directory)**: This is a Next.js application holding all user interface components. It's a self-contained environment that manages everything the user interacts with. - **UI Components and Pages**:
- **Backend (root and `src` directory)**: The backend logic is situated in the `src` folder, but the root directory holds the main `package.json` for backend dependency management. - **Components (`src/components`)**: Reusable UI components.
- All of the focus modes are created using the Meta Search Agent class present in `src/search/metaSearchAgent.ts`. The main logic behind Perplexica lies there. - **Pages and Routes (`src/app`)**: Next.js app directory structure with page components.
- Main app routes include: home (`/`), chat (`/c`), discover (`/discover`), library (`/library`), and settings (`/settings`).
- **API Routes (`src/app/api`)**: API endpoints implemented with Next.js API routes.
- `/api/chat`: Handles chat interactions.
- `/api/search`: Provides direct access to Perplexica's search capabilities.
- Other endpoints for models, files, and suggestions.
- **Backend Logic (`src/lib`)**: Contains all the backend functionality including search, database, and API logic.
- The search functionality is present inside `src/lib/search` directory.
- All of the focus modes are implemented using the Meta Search Agent class in `src/lib/search/metaSearchAgent.ts`.
- Database functionality is in `src/lib/db`.
- Chat model and embedding model providers are managed in `src/lib/providers`.
- Prompt templates and LLM chain definitions are in `src/lib/prompts` and `src/lib/chains` respectively.
## API Documentation
Perplexica exposes several API endpoints for programmatic access, including:
- **Search API**: Access Perplexica's advanced search capabilities directly via the `/api/search` endpoint. For detailed documentation, see `docs/api/search.md`.
## Setting Up Your Environment ## Setting Up Your Environment
Before diving into coding, setting up your local environment is key. Here's what you need to do: Before diving into coding, setting up your local environment is key. Here's what you need to do:
### Backend
1. In the root directory, locate the `sample.config.toml` file. 1. In the root directory, locate the `sample.config.toml` file.
2. Rename it to `config.toml` and fill in the necessary configuration fields specific to the backend. 2. Rename it to `config.toml` and fill in the necessary configuration fields.
3. Run `npm install` to install dependencies. 3. Run `npm install` to install all dependencies.
4. Run `npm run db:push` to set up the local sqlite. 4. Run `npm run db:push` to set up the local sqlite database.
5. Use `npm run dev` to start the backend in development mode. 5. Use `npm run dev` to start the application in development mode.
### Frontend
1. Navigate to the `ui` folder and repeat the process of renaming `.env.example` to `.env`, making sure to provide the frontend-specific variables.
2. Execute `npm install` within the `ui` directory to get the frontend dependencies ready.
3. Launch the frontend development server with `npm run dev`.
**Please note**: Docker configurations are present for setting up production environments, whereas `npm run dev` is used for development purposes. **Please note**: Docker configurations are present for setting up production environments, whereas `npm run dev` is used for development purposes.

View File

@ -109,14 +109,13 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
1. Install SearXNG and allow `JSON` format in the SearXNG settings. 1. Install SearXNG and allow `JSON` format in the SearXNG settings.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file. 2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. Rename the `.env.example` file to `.env` in the `ui` folder and fill in all necessary fields. 3. After populating the configuration run `npm i`.
4. After populating the configuration and environment files, run `npm i` in both the `ui` folder and the root directory. 4. Install the dependencies and then execute `npm run build`.
5. Install the dependencies and then execute `npm run build` in both the `ui` folder and the root directory. 5. Finally, start the app by running `npm rum start`
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies. **Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like exposing it your network, etc. See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
### Ollama Connection Errors ### Ollama Connection Errors

View File

@ -1,15 +1,27 @@
FROM node:20.18.0-alpine FROM node:20.18.0-alpine AS builder
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
WORKDIR /home/perplexica WORKDIR /home/perplexica
COPY ui /home/perplexica/ COPY package.json yarn.lock ./
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn install --frozen-lockfile COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
RUN mkdir -p /home/perplexica/data
RUN yarn build RUN yarn build
CMD ["yarn", "start"] FROM node:20.18.0-alpine
WORKDIR /home/perplexica
COPY --from=builder /home/perplexica/public ./public
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data
RUN mkdir /home/perplexica/uploads
CMD ["node", "server.js"]

View File

@ -1,17 +0,0 @@
FROM node:18-slim
WORKDIR /home/perplexica
COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/
COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn build
CMD ["yarn", "start"]

View File

@ -9,41 +9,21 @@ services:
- perplexica-network - perplexica-network
restart: unless-stopped restart: unless-stopped
perplexica-backend: app:
build: image: itzcrazykns1337/perplexica:main
context: .
dockerfile: backend.dockerfile
image: itzcrazykns1337/perplexica-backend:main
environment:
- SEARXNG_API_URL=http://searxng:8080
depends_on:
- searxng
ports:
- 3001:3001
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- perplexica-network
restart: unless-stopped
perplexica-frontend:
build: build:
context: . context: .
dockerfile: app.dockerfile dockerfile: app.dockerfile
args: environment:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api - SEARXNG_API_URL=http://searxng:8080
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
ports: ports:
- 3000:3000 - 3000:3000
networks: networks:
- perplexica-network - perplexica-network
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
restart: unless-stopped restart: unless-stopped
networks: networks:

View File

@ -6,9 +6,9 @@ Perplexicas Search API makes it easy to use our AI-powered search engine. You
## Endpoint ## Endpoint
### **POST** `http://localhost:3001/api/search` ### **POST** `http://localhost:3000/api/search`
**Note**: Replace `3001` with any other port if you've changed the default PORT **Note**: Replace `3000` with any other port if you've changed the default PORT
### Request ### Request
@ -20,11 +20,11 @@ The API accepts a JSON object in the request body, where you define the focus mo
{ {
"chatModel": { "chatModel": {
"provider": "openai", "provider": "openai",
"model": "gpt-4o-mini" "name": "gpt-4o-mini"
}, },
"embeddingModel": { "embeddingModel": {
"provider": "openai", "provider": "openai",
"model": "text-embedding-3-large" "name": "text-embedding-3-large"
}, },
"optimizationMode": "speed", "optimizationMode": "speed",
"focusMode": "webSearch", "focusMode": "webSearch",
@ -38,18 +38,18 @@ The API accepts a JSON object in the request body, where you define the focus mo
### Request Parameters ### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini"). - **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`). - `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`). - `name`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- Optional fields for custom OpenAI configuration: - Optional fields for custom OpenAI configuration:
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL. - `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance. - `customOpenAIKey`: The API key for a custom OpenAI instance.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large"). - **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- `provider`: The provider for the embedding model (e.g., `openai`). - `provider`: The provider for the embedding model (e.g., `openai`).
- `model`: The specific embedding model (e.g., `text-embedding-3-large`). - `name`: The specific embedding model (e.g., `text-embedding-3-large`).
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes: - **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:

View File

@ -4,7 +4,7 @@ Curious about how Perplexica works? Don't worry, we'll cover it here. Before we
We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows: We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows:
1. The message is sent via WS to the backend server where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode. 1. The message is sent to the `/api/chat` route where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources and searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started. 2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources and searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started.
3. The query returned by the first chain is passed to SearXNG to search the web for information. 3. The query returned by the first chain is passed to SearXNG to search the web for information.
4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query. 4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query.

View File

@ -1,109 +0,0 @@
# Expose Perplexica to a network
This guide will show you how to make Perplexica available over a network. Follow these steps to allow computers on the same network to interact with Perplexica. Choose the instructions that match the operating system you are using.
## Windows
1. Open PowerShell as Administrator
2. Navigate to the directory containing the `docker-compose.yaml` file
3. Stop and remove the existing Perplexica containers and images:
```bash
docker compose down --rmi all
```
4. Open the `docker-compose.yaml` file in a text editor like Notepad++
5. Replace `127.0.0.1` with the IP address of the server Perplexica is running on in these two lines:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and close the `docker-compose.yaml` file
7. Rebuild and restart the Perplexica container:
```bash
docker compose up -d --build
```
## macOS
1. Open the Terminal application
2. Navigate to the directory with the `docker-compose.yaml` file:
```bash
cd /path/to/docker-compose.yaml
```
3. Stop and remove existing containers and images:
```bash
docker compose down --rmi all
```
4. Open `docker-compose.yaml` in a text editor like Sublime Text:
```bash
nano docker-compose.yaml
```
5. Replace `127.0.0.1` with the server IP in these lines:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and exit the editor
7. Rebuild and restart Perplexica:
```bash
docker compose up -d --build
```
## Linux
1. Open the terminal
2. Navigate to the `docker-compose.yaml` directory:
```bash
cd /path/to/docker-compose.yaml
```
3. Stop and remove containers and images:
```bash
docker compose down --rmi all
```
4. Edit `docker-compose.yaml`:
```bash
nano docker-compose.yaml
```
5. Replace `127.0.0.1` with the server IP:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and exit the editor
7. Rebuild and restart Perplexica:
```bash
docker compose up -d --build
```

View File

@ -39,11 +39,8 @@ To update Perplexica to the latest version, follow these steps:
2. Navigate to the project directory. 2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly. 3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`.
4. Execute `npm i` in both the `ui` folder and the root directory. 5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm rum start`
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
--- ---

View File

@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
export default defineConfig({ export default defineConfig({
dialect: 'sqlite', dialect: 'sqlite',
schema: './src/db/schema.ts', schema: './src/lib/db/schema.ts',
out: './drizzle', out: './drizzle',
dbCredentials: { dbCredentials: {
url: './data/db.sqlite', url: './data/db.sqlite',

5
next-env.d.ts vendored Normal file
View File

@ -0,0 +1,5 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@ -1,5 +1,6 @@
/** @type {import('next').NextConfig} */ /** @type {import('next').NextConfig} */
const nextConfig = { const nextConfig = {
output: 'standalone',
images: { images: {
remotePatterns: [ remotePatterns: [
{ {
@ -7,6 +8,7 @@ const nextConfig = {
}, },
], ],
}, },
serverExternalPackages: ['pdf-parse'],
}; };
export default nextConfig; export default nextConfig;

View File

@ -1,53 +1,63 @@
{ {
"name": "perplexica-backend", "name": "perplexica-frontend",
"version": "1.10.0-rc3", "version": "1.10.0",
"license": "MIT", "license": "MIT",
"author": "ItzCrazyKns", "author": "ItzCrazyKns",
"scripts": { "scripts": {
"start": "npm run db:push && node dist/app.js", "dev": "next dev",
"build": "tsc", "build": "npm run db:push && next build",
"dev": "nodemon --ignore uploads/ src/app.ts ", "start": "next start",
"db:push": "drizzle-kit push sqlite", "lint": "next lint",
"format": "prettier . --check", "format:write": "prettier . --write",
"format:write": "prettier . --write" "db:push": "drizzle-kit push"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.10",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/html-to-text": "^9.0.4",
"@types/multer": "^1.4.12",
"@types/pdf-parse": "^1.1.4",
"@types/readable-stream": "^4.0.11",
"@types/ws": "^8.5.12",
"drizzle-kit": "^0.22.7",
"nodemon": "^3.1.0",
"prettier": "^3.2.5",
"ts-node": "^10.9.2",
"typescript": "^5.4.3"
}, },
"dependencies": { "dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5", "@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3", "@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/community": "^0.2.16", "@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/openai": "^0.0.25", "@langchain/openai": "^0.0.25",
"@langchain/google-genai": "^0.0.23", "@langchain/textsplitters": "^0.1.0",
"@xenova/transformers": "^2.17.1", "@tailwindcss/typography": "^0.5.12",
"axios": "^1.6.8", "@xenova/transformers": "^2.17.2",
"better-sqlite3": "^11.0.0", "axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
"compute-cosine-similarity": "^1.1.0", "compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0", "compute-dot": "^1.1.0",
"cors": "^2.8.5", "drizzle-orm": "^0.40.1",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.31.2",
"express": "^4.19.2",
"html-to-text": "^9.0.5", "html-to-text": "^9.0.5",
"langchain": "^0.1.30", "langchain": "^0.1.30",
"mammoth": "^1.8.0", "lucide-react": "^0.363.0",
"multer": "^1.4.5-lts.1", "markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"pdf-parse": "^1.1.1", "pdf-parse": "^1.1.1",
"winston": "^3.13.0", "react": "^18",
"ws": "^8.17.1", "react-dom": "^18",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4" "zod": "^3.22.4"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/html-to-text": "^9.0.4",
"@types/node": "^20",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"drizzle-kit": "^0.30.5",
"eslint": "^8",
"eslint-config-next": "14.1.4",
"postcss": "^8",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"typescript": "^5"
} }
} }

View File

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

Before

Width:  |  Height:  |  Size: 629 B

After

Width:  |  Height:  |  Size: 629 B

View File

@ -1,5 +1,4 @@
[GENERAL] [GENERAL]
PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot" SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m") KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
@ -24,4 +23,4 @@ MODEL_NAME = ""
API_URL = "" # Ollama API URL - http://host.docker.internal:11434 API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[API_ENDPOINTS] [API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@ -1,38 +0,0 @@
import { startWebSocketServer } from './websocket';
import express from 'express';
import cors from 'cors';
import http from 'http';
import routes from './routes';
import { getPort } from './config';
import logger from './utils/logger';
const port = getPort();
const app = express();
const server = http.createServer(app);
const corsOptions = {
origin: '*',
};
app.use(cors(corsOptions));
app.use(express.json());
app.use('/api', routes);
app.get('/api', (_, res) => {
res.status(200).json({ status: 'ok' });
});
server.listen(port, () => {
logger.info(`Server is running on port ${port}`);
});
startWebSocketServer(server);
process.on('uncaughtException', (err, origin) => {
logger.error(`Uncaught Exception at ${origin}: ${err}`);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
});

304
src/app/api/chat/route.ts Normal file
View File

@ -0,0 +1,304 @@
import prompts from '@/lib/prompts';
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream';
import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import db from '@/lib/db';
import { chats, messages as messagesSchema } from '@/lib/db/schema';
import { and, eq, gt } from 'drizzle-orm';
import { getFileDetails } from '@/lib/utils/files';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
type Message = {
messageId: string;
chatId: string;
content: string;
};
type ChatModel = {
provider: string;
name: string;
};
type EmbeddingModel = {
provider: string;
name: string;
};
type Body = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
};
const handleEmitterEvents = async (
stream: EventEmitter,
writer: WritableStreamDefaultWriter,
encoder: TextEncoder,
aiMessageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources: any[] = [];
stream.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'sources',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
sources = parsedData.data;
}
});
stream.on('end', () => {
writer.write(
encoder.encode(
JSON.stringify({
type: 'messageEnd',
messageId: aiMessageId,
}) + '\n',
),
);
writer.close();
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
})
.execute();
});
stream.on('error', (data) => {
const parsedData = JSON.parse(data);
writer.write(
encoder.encode(
JSON.stringify({
type: 'error',
data: parsedData.data,
}),
),
);
writer.close();
});
};
const handleHistorySave = async (
message: Message,
humanMessageId: string,
focusMode: string,
files: string[],
) => {
const chat = await db.query.chats.findFirst({
where: eq(chats.id, message.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: message.chatId,
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: message.content,
chatId: message.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gt(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, message.chatId),
),
)
.execute();
}
};
export const POST = async (req: Request) => {
try {
const body = (await req.json()) as Body;
const { message } = body;
if (message.content === '') {
return Response.json(
{
message: 'Please provide a message to process',
},
{ status: 400 },
);
}
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.name || Object.keys(chatModelProvider)[0]
];
const embeddingProvider =
embeddingModelProviders[
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
];
const embeddingModel =
embeddingProvider[
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
];
let llm: BaseChatModel | undefined;
let embedding = embeddingModel.model;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
if (!embedding) {
return Response.json(
{ error: 'Invalid embedding model' },
{ status: 400 },
);
}
const humanMessageId =
message.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const handler = searchHandlers[body.focusMode];
if (!handler) {
return Response.json(
{
message: 'Invalid focus mode',
},
{ status: 400 },
);
}
const stream = await handler.searchAndAnswer(
message.content,
history,
llm,
embedding,
body.optimizationMode,
body.files,
);
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId);
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
return new Response(responseStream.readable, {
headers: {
'Content-Type': 'text/event-stream',
Connection: 'keep-alive',
'Cache-Control': 'no-cache, no-transform',
},
});
} catch (err) {
console.error('An error ocurred while processing chat request:', err);
return Response.json(
{ message: 'An error ocurred while processing chat request' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,69 @@
import db from '@/lib/db';
import { chats, messages } from '@/lib/db/schema';
import { eq } from 'drizzle-orm';
export const GET = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, id),
});
return Response.json(
{
chat: chatExists,
messages: chatMessages,
},
{ status: 200 },
);
} catch (err) {
console.error('Error in getting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};
export const DELETE = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
await db.delete(chats).where(eq(chats.id, id)).execute();
await db.delete(messages).where(eq(messages.chatId, id)).execute();
return Response.json(
{ message: 'Chat deleted successfully' },
{ status: 200 },
);
} catch (err) {
console.error('Error in deleting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,15 @@
import db from '@/lib/db';
export const GET = async (req: Request) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return Response.json({ chats: chats }, { status: 200 });
} catch (err) {
console.error('Error in getting chats: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -1,26 +1,22 @@
import express from 'express'; import {
getAnthropicApiKey,
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
getGeminiApiKey,
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
updateConfig,
} from '@/lib/config';
import { import {
getAvailableChatModelProviders, getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders, getAvailableEmbeddingModelProviders,
} from '../lib/providers'; } from '@/lib/providers';
import {
getGroqApiKey,
getOllamaApiEndpoint,
getAnthropicApiKey,
getGeminiApiKey,
getOpenaiApiKey,
updateConfig,
getCustomOpenaiApiUrl,
getCustomOpenaiApiKey,
getCustomOpenaiModelName,
} from '../config';
import logger from '../utils/logger';
const router = express.Router(); export const GET = async (req: Request) => {
router.get('/', async (_, res) => {
try { try {
const config = {}; const config: Record<string, any> = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(), getAvailableChatModelProviders(),
@ -61,15 +57,19 @@ router.get('/', async (_, res) => {
config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName(); config['customOpenaiModelName'] = getCustomOpenaiModelName();
res.status(200).json(config); return Response.json({ ...config }, { status: 200 });
} catch (err: any) { } catch (err) {
res.status(500).json({ message: 'An error has occurred.' }); console.error('An error ocurred while getting config:', err);
logger.error(`Error getting config: ${err.message}`); return Response.json(
{ message: 'An error ocurred while getting config' },
{ status: 500 },
);
} }
}); };
router.post('/', async (req, res) => { export const POST = async (req: Request) => {
const config = req.body; try {
const config = await req.json();
const updatedConfig = { const updatedConfig = {
MODELS: { MODELS: {
@ -98,7 +98,12 @@ router.post('/', async (req, res) => {
updateConfig(updatedConfig); updateConfig(updatedConfig);
res.status(200).json({ message: 'Config updated' }); return Response.json({ message: 'Config updated' }, { status: 200 });
}); } catch (err) {
console.error('An error ocurred while updating config:', err);
export default router; return Response.json(
{ message: 'An error ocurred while updating config' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,61 @@
import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
export const GET = async (req: Request) => {
try {
const data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
).results;
}),
])
)
.map((result) => result)
.flat()
.sort(() => Math.random() - 0.5);
return Response.json(
{
blogs: data,
},
{
status: 200,
},
);
} catch (err) {
console.error(`An error ocurred in discover route: ${err}`);
return Response.json(
{
message: 'An error has occurred',
},
{
status: 500,
},
);
}
};

View File

@ -0,0 +1,83 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const images = await handleImageSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ images }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching images: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching images' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,47 @@
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete (chatModelProviders[provider][model] as { model?: unknown })
.model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete (embeddingModelProviders[provider][model] as { model?: unknown })
.model;
});
});
return Response.json(
{
chatModelProviders,
embeddingModelProviders,
},
{
status: 200,
},
);
} catch (err) {
console.error('An error ocurred while fetching models', err);
return Response.json(
{
message: 'An error has occurred.',
},
{
status: 500,
},
);
}
};

View File

@ -1,33 +1,29 @@
import express from 'express';
import logger from '../utils/logger';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings'; import type { Embeddings } from '@langchain/core/embeddings';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { import {
getAvailableChatModelProviders, getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders, getAvailableEmbeddingModelProviders,
} from '../lib/providers'; } from '@/lib/providers';
import { searchHandlers } from '../websocket/messageHandler';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '../search/metaSearchAgent'; import { MetaSearchAgentType } from '@/lib/search/metaSearchAgent';
import { import {
getCustomOpenaiApiKey, getCustomOpenaiApiKey,
getCustomOpenaiApiUrl, getCustomOpenaiApiUrl,
getCustomOpenaiModelName, getCustomOpenaiModelName,
} from '../config'; } from '@/lib/config';
import { searchHandlers } from '@/lib/search';
const router = express.Router();
interface chatModel { interface chatModel {
provider: string; provider: string;
model: string; name: string;
customOpenAIKey?: string; customOpenAIKey?: string;
customOpenAIBaseURL?: string; customOpenAIBaseURL?: string;
} }
interface embeddingModel { interface embeddingModel {
provider: string; provider: string;
model: string; name: string;
} }
interface ChatRequestBody { interface ChatRequestBody {
@ -39,27 +35,24 @@ interface ChatRequestBody {
history: Array<[string, string]>; history: Array<[string, string]>;
} }
router.post('/', async (req, res) => { export const POST = async (req: Request) => {
try { try {
const body: ChatRequestBody = req.body; const body: ChatRequestBody = await req.json();
if (!body.focusMode || !body.query) { if (!body.focusMode || !body.query) {
return res.status(400).json({ message: 'Missing focus mode or query' }); return Response.json(
{ message: 'Missing focus mode or query' },
{ status: 400 },
);
} }
body.history = body.history || []; body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced'; body.optimizationMode = body.optimizationMode || 'balanced';
const history: BaseMessage[] = body.history.map((msg) => { const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') { return msg[0] === 'human'
return new HumanMessage({ ? new HumanMessage({ content: msg[1] })
content: msg[1], : new AIMessage({ content: msg[1] });
});
} else {
return new AIMessage({
content: msg[1],
});
}
}); });
const [chatModelProviders, embeddingModelProviders] = await Promise.all([ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
@ -70,13 +63,13 @@ router.post('/', async (req, res) => {
const chatModelProvider = const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0]; body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel = const chatModel =
body.chatModel?.model || body.chatModel?.name ||
Object.keys(chatModelProviders[chatModelProvider])[0]; Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider = const embeddingModelProvider =
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]; body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
const embeddingModel = const embeddingModel =
body.embeddingModel?.model || body.embeddingModel?.name ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0]; Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined; let llm: BaseChatModel | undefined;
@ -84,7 +77,7 @@ router.post('/', async (req, res) => {
if (body.chatModel?.provider === 'custom_openai') { if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({ llm = new ChatOpenAI({
modelName: body.chatModel?.model || getCustomOpenaiModelName(), modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey: openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(), body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7, temperature: 0.7,
@ -111,13 +104,16 @@ router.post('/', async (req, res) => {
} }
if (!llm || !embeddings) { if (!llm || !embeddings) {
return res.status(400).json({ message: 'Invalid model selected' }); return Response.json(
{ message: 'Invalid model selected' },
{ status: 400 },
);
} }
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode]; const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) { if (!searchHandler) {
return res.status(400).json({ message: 'Invalid focus mode' }); return Response.json({ message: 'Invalid focus mode' }, { status: 400 });
} }
const emitter = await searchHandler.searchAndAnswer( const emitter = await searchHandler.searchAndAnswer(
@ -129,30 +125,45 @@ router.post('/', async (req, res) => {
[], [],
); );
return new Promise(
(
resolve: (value: Response) => void,
reject: (value: Response) => void,
) => {
let message = ''; let message = '';
let sources = []; let sources: any[] = [];
emitter.on('data', (data) => { emitter.on('data', (data) => {
try {
const parsedData = JSON.parse(data); const parsedData = JSON.parse(data);
if (parsedData.type === 'response') { if (parsedData.type === 'response') {
message += parsedData.data; message += parsedData.data;
} else if (parsedData.type === 'sources') { } else if (parsedData.type === 'sources') {
sources = parsedData.data; sources = parsedData.data;
} }
} catch (error) {
reject(
Response.json({ message: 'Error parsing data' }, { status: 500 }),
);
}
}); });
emitter.on('end', () => { emitter.on('end', () => {
res.status(200).json({ message, sources }); resolve(Response.json({ message, sources }, { status: 200 }));
}); });
emitter.on('error', (data) => { emitter.on('error', (error) => {
const parsedData = JSON.parse(data); reject(
res.status(500).json({ message: parsedData.data }); Response.json({ message: 'Search error', error }, { status: 500 }),
);
}); });
},
);
} catch (err: any) { } catch (err: any) {
logger.error(`Error in getting search results: ${err.message}`); console.error(`Error in getting search results: ${err.message}`);
res.status(500).json({ message: 'An error has occurred.' }); return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
} }
}); };
export default router;

View File

@ -0,0 +1,81 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsGenerationBody {
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
},
llm,
);
return Response.json({ suggestions }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while generating suggestions: ${err}`);
return Response.json(
{ message: 'An error ocurred while generating suggestions' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,134 @@
import { NextResponse } from 'next/server';
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import { getAvailableEmbeddingModelProviders } from '@/lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
interface FileRes {
fileName: string;
fileExtension: string;
fileId: string;
}
const uploadDir = path.join(process.cwd(), 'uploads');
if (!fs.existsSync(uploadDir)) {
fs.mkdirSync(uploadDir, { recursive: true });
}
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
export async function POST(req: Request) {
try {
const formData = await req.formData();
const files = formData.getAll('files') as File[];
const embedding_model = formData.get('embedding_model');
const embedding_model_provider = formData.get('embedding_model_provider');
if (!embedding_model || !embedding_model_provider) {
return NextResponse.json(
{ message: 'Missing embedding model or provider' },
{ status: 400 },
);
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel =
embedding_model ?? Object.keys(embeddingModels[provider as string])[0];
let embeddingsModel =
embeddingModels[provider as string]?.[embeddingModel as string]?.model;
if (!embeddingsModel) {
return NextResponse.json(
{ message: 'Invalid embedding model selected' },
{ status: 400 },
);
}
const processedFiles: FileRes[] = [];
await Promise.all(
files.map(async (file: any) => {
const fileExtension = file.name.split('.').pop();
if (!['pdf', 'docx', 'txt'].includes(fileExtension!)) {
return NextResponse.json(
{ message: 'File type not supported' },
{ status: 400 },
);
}
const uniqueFileName = `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`;
const filePath = path.join(uploadDir, uniqueFileName);
const buffer = Buffer.from(await file.arrayBuffer());
fs.writeFileSync(filePath, new Uint8Array(buffer));
let docs: any[] = [];
if (fileExtension === 'pdf') {
const loader = new PDFLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'docx') {
const loader = new DocxLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'txt') {
const text = fs.readFileSync(filePath, 'utf-8');
docs = [
new Document({ pageContent: text, metadata: { title: file.name } }),
];
}
const splitted = await splitter.splitDocuments(docs);
const extractedDataPath = filePath.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(
extractedDataPath,
JSON.stringify({
title: file.name,
contents: splitted.map((doc) => doc.pageContent),
}),
);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsDataPath = filePath.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(
embeddingsDataPath,
JSON.stringify({
title: file.name,
embeddings,
}),
);
processedFiles.push({
fileName: file.name,
fileExtension: fileExtension,
fileId: uniqueFileName.replace(/\.\w+$/, ''),
});
}),
);
return NextResponse.json({
files: processedFiles,
});
} catch (error) {
console.error('Error uploading file:', error);
return NextResponse.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
}

View File

@ -0,0 +1,83 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ videos }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching videos: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching videos' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,9 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
};
export default Page;

View File

@ -19,7 +19,7 @@ const Page = () => {
useEffect(() => { useEffect(() => {
const fetchData = async () => { const fetchData = async () => {
try { try {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, { const res = await fetch(`/api/discover`, {
method: 'GET', method: 'GET',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

@ -21,7 +21,7 @@ const Page = () => {
const fetchChats = async () => { const fetchChats = async () => {
setLoading(true); setLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats`, { const res = await fetch(`/api/chats`, {
method: 'GET', method: 'GET',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',

View File

@ -116,7 +116,7 @@ const Page = () => {
useEffect(() => { useEffect(() => {
const fetchConfig = async () => { const fetchConfig = async () => {
setIsLoading(true); setIsLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, { const res = await fetch(`/api/config`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
@ -187,16 +187,13 @@ const Page = () => {
[key]: value, [key]: value,
} as SettingsType; } as SettingsType;
const response = await fetch( const response = await fetch(`/api/config`, {
`${process.env.NEXT_PUBLIC_API_URL}/config`,
{
method: 'POST', method: 'POST',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
body: JSON.stringify(updatedConfig), body: JSON.stringify(updatedConfig),
}, });
);
if (!response.ok) { if (!response.ok) {
throw new Error('Failed to update config'); throw new Error('Failed to update config');
@ -208,7 +205,7 @@ const Page = () => {
key.toLowerCase().includes('api') || key.toLowerCase().includes('api') ||
key.toLowerCase().includes('url') key.toLowerCase().includes('url')
) { ) {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, { const res = await fetch(`/api/config`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },

View File

@ -29,36 +29,27 @@ export interface File {
fileId: string; fileId: string;
} }
const useSocket = ( interface ChatModelProvider {
url: string, name: string;
setIsWSReady: (ready: boolean) => void, provider: string;
setError: (error: boolean) => void, }
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => { ) => {
const wsRef = useRef<WebSocket | null>(null);
const reconnectTimeoutRef = useRef<NodeJS.Timeout>();
const retryCountRef = useRef(0);
const isCleaningUpRef = useRef(false);
const MAX_RETRIES = 3;
const INITIAL_BACKOFF = 1000; // 1 second
const isConnectionErrorRef = useRef(false);
const getBackoffDelay = (retryCount: number) => {
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000); // Cap at 10 seconds
};
useEffect(() => {
const connectWs = async () => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
}
try { try {
let chatModel = localStorage.getItem('chatModel'); let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider'); let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel'); let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem( let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
'embeddingModelProvider',
);
const autoImageSearch = localStorage.getItem('autoImageSearch'); const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch'); const autoVideoSearch = localStorage.getItem('autoVideoSearch');
@ -71,14 +62,11 @@ const useSocket = (
localStorage.setItem('autoVideoSearch', 'false'); localStorage.setItem('autoVideoSearch', 'false');
} }
const providers = await fetch( const providers = await fetch(`/api/models`, {
`${process.env.NEXT_PUBLIC_API_URL}/models`,
{
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}, }).then(async (res) => {
).then(async (res) => {
if (!res.ok) if (!res.ok)
throw new Error( throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`, `Failed to fetch models: ${res.status} ${res.statusText}`,
@ -100,10 +88,7 @@ const useSocket = (
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if ( if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return toast.error('No chat models available'); return toast.error('No chat models available');
} }
@ -125,10 +110,7 @@ const useSocket = (
localStorage.setItem('chatModel', chatModel!); localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider); localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!); localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem( localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
'embeddingModelProvider',
embeddingModelProvider,
);
} else { } else {
const chatModelProviders = providers.chatModelProviders; const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders; const embeddingModelProviders = providers.embeddingModelProviders;
@ -165,10 +147,7 @@ const useSocket = (
!embeddingModelProviders[embeddingModelProvider] !embeddingModelProviders[embeddingModelProvider]
) { ) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0]; embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem( localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
'embeddingModelProvider',
embeddingModelProvider,
);
} }
if ( if (
@ -182,127 +161,22 @@ const useSocket = (
} }
} }
const wsURL = new URL(url); setChatModelProvider({
const searchParams = new URLSearchParams({}); name: chatModel!,
provider: chatModelProvider,
searchParams.append('chatModel', chatModel!);
searchParams.append('chatModelProvider', chatModelProvider);
if (chatModelProvider === 'custom_openai') {
searchParams.append(
'openAIApiKey',
localStorage.getItem('openAIApiKey')!,
);
searchParams.append(
'openAIBaseURL',
localStorage.getItem('openAIBaseURL')!,
);
}
searchParams.append('embeddingModel', embeddingModel!);
searchParams.append('embeddingModelProvider', embeddingModelProvider);
wsURL.search = searchParams.toString();
const ws = new WebSocket(wsURL.toString());
wsRef.current = ws;
const timeoutId = setTimeout(() => {
if (ws.readyState !== 1) {
toast.error(
'Failed to connect to the server. Please try again later.',
);
}
}, 10000);
ws.addEventListener('message', (e) => {
const data = JSON.parse(e.data);
if (data.type === 'signal' && data.data === 'open') {
const interval = setInterval(() => {
if (ws.readyState === 1) {
setIsWSReady(true);
setError(false);
if (retryCountRef.current > 0) {
toast.success('Connection restored.');
}
retryCountRef.current = 0;
clearInterval(interval);
}
}, 5);
clearTimeout(timeoutId);
console.debug(new Date(), 'ws:connected');
}
if (data.type === 'error') {
isConnectionErrorRef.current = true;
setError(true);
toast.error(data.data);
}
}); });
ws.onerror = () => { setEmbeddingModelProvider({
clearTimeout(timeoutId); name: embeddingModel!,
setIsWSReady(false); provider: embeddingModelProvider,
toast.error('WebSocket connection error.'); });
};
ws.onclose = () => { setIsConfigReady(true);
clearTimeout(timeoutId); } catch (err) {
setIsWSReady(false); console.error('An error occurred while checking the configuration:', err);
console.debug(new Date(), 'ws:disconnected'); setIsConfigReady(false);
if (!isCleaningUpRef.current && !isConnectionErrorRef.current) { setHasError(true);
toast.error('Connection lost. Attempting to reconnect...');
attemptReconnect();
} }
};
} catch (error) {
console.debug(new Date(), 'ws:error', error);
setIsWSReady(false);
attemptReconnect();
}
};
const attemptReconnect = () => {
retryCountRef.current += 1;
if (retryCountRef.current > MAX_RETRIES) {
console.debug(new Date(), 'ws:max_retries');
setError(true);
toast.error(
'Unable to connect to server after multiple attempts. Please refresh the page to try again.',
);
return;
}
const backoffDelay = getBackoffDelay(retryCountRef.current);
console.debug(
new Date(),
`ws:retry attempt=${retryCountRef.current}/${MAX_RETRIES} delay=${backoffDelay}ms`,
);
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
}
reconnectTimeoutRef.current = setTimeout(() => {
connectWs();
}, backoffDelay);
};
connectWs();
return () => {
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
}
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
isCleaningUpRef.current = true;
console.debug(new Date(), 'ws:cleanup');
}
};
}, [url, setIsWSReady, setError]);
return wsRef.current;
}; };
const loadMessages = async ( const loadMessages = async (
@ -315,15 +189,12 @@ const loadMessages = async (
setFiles: (files: File[]) => void, setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void, setFileIds: (fileIds: string[]) => void,
) => { ) => {
const res = await fetch( const res = await fetch(`/api/chats/${chatId}`, {
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
{
method: 'GET', method: 'GET',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}, });
);
if (res.status === 404) { if (res.status === 404) {
setNotFound(true); setNotFound(true);
@ -373,15 +244,32 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [chatId, setChatId] = useState<string | undefined>(id); const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false); const [newChatCreated, setNewChatCreated] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false); const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false); const [isReady, setIsReady] = useState(false);
const [isWSReady, setIsWSReady] = useState(false); useEffect(() => {
const ws = useSocket( checkConfig(
process.env.NEXT_PUBLIC_WS_URL!, setChatModelProvider,
setIsWSReady, setEmbeddingModelProvider,
setIsConfigReady,
setHasError, setHasError,
); );
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false); const [messageAppeared, setMessageAppeared] = useState(false);
@ -399,8 +287,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [notFound, setNotFound] = useState(false); const [notFound, setNotFound] = useState(false);
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
useEffect(() => { useEffect(() => {
if ( if (
chatId && chatId &&
@ -426,16 +312,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, []); }, []);
useEffect(() => {
return () => {
if (ws?.readyState === 1) {
ws.close();
console.debug(new Date(), 'ws:cleanup');
}
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const messagesRef = useRef<Message[]>([]); const messagesRef = useRef<Message[]>([]);
useEffect(() => { useEffect(() => {
@ -443,18 +319,18 @@ const ChatWindow = ({ id }: { id?: string }) => {
}, [messages]); }, [messages]);
useEffect(() => { useEffect(() => {
if (isMessagesLoaded && isWSReady) { if (isMessagesLoaded && isConfigReady) {
setIsReady(true); setIsReady(true);
console.debug(new Date(), 'app:ready'); console.debug(new Date(), 'app:ready');
} else { } else {
setIsReady(false); setIsReady(false);
} }
}, [isMessagesLoaded, isWSReady]); }, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => { const sendMessage = async (message: string, messageId?: string) => {
if (loading) return; if (loading) return;
if (!ws || ws.readyState !== WebSocket.OPEN) { if (!isConfigReady) {
toast.error('Cannot send message while disconnected'); toast.error('Cannot send message before the configuration is ready');
return; return;
} }
@ -467,21 +343,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
messageId = messageId ?? crypto.randomBytes(7).toString('hex'); messageId = messageId ?? crypto.randomBytes(7).toString('hex');
ws.send(
JSON.stringify({
type: 'message',
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: [...chatHistory, ['human', message]],
}),
);
setMessages((prevMessages) => [ setMessages((prevMessages) => [
...prevMessages, ...prevMessages,
{ {
@ -493,9 +354,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
}, },
]); ]);
const messageHandler = async (e: MessageEvent) => { const messageHandler = async (data: any) => {
const data = JSON.parse(e.data);
if (data.type === 'error') { if (data.type === 'error') {
toast.error(data.data); toast.error(data.data);
setLoading(false); setLoading(false);
@ -558,11 +417,25 @@ const ChatWindow = ({ id }: { id?: string }) => {
['assistant', recievedMessage], ['assistant', recievedMessage],
]); ]);
ws?.removeEventListener('message', messageHandler);
setLoading(false); setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1]; const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
}
if ( if (
lastMsg.role === 'assistant' && lastMsg.role === 'assistant' &&
lastMsg.sources && lastMsg.sources &&
@ -579,21 +452,62 @@ const ChatWindow = ({ id }: { id?: string }) => {
}), }),
); );
} }
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document.getElementById('search-images')?.click();
}
if (autoVideoSearch === 'true') {
document.getElementById('search-videos')?.click();
}
} }
}; };
ws?.addEventListener('message', messageHandler); const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
}; };
const rewrite = (messageId: string) => { const rewrite = (messageId: string) => {
@ -614,11 +528,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
}; };
useEffect(() => { useEffect(() => {
if (isReady && initialMessage && ws?.readyState === 1) { if (isReady && initialMessage && isConfigReady) {
sendMessage(initialMessage); sendMessage(initialMessage);
} }
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [ws?.readyState, isReady, initialMessage, isWSReady]); }, [isConfigReady, isReady, initialMessage]);
if (hasError) { if (hasError) {
return ( return (

View File

@ -29,15 +29,12 @@ const DeleteChat = ({
const handleDelete = async () => { const handleDelete = async () => {
setLoading(true); setLoading(true);
try { try {
const res = await fetch( const res = await fetch(`/api/chats/${chatId}`, {
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
{
method: 'DELETE', method: 'DELETE',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}, });
);
if (res.status != 200) { if (res.status != 200) {
throw new Error('Failed to delete chat'); throw new Error('Failed to delete chat');

View File

@ -12,13 +12,18 @@ import {
Layers3, Layers3,
Plus, Plus,
} from 'lucide-react'; } from 'lucide-react';
import Markdown from 'markdown-to-jsx'; import Markdown, { MarkdownToJSX } from 'markdown-to-jsx';
import Copy from './MessageActions/Copy'; import Copy from './MessageActions/Copy';
import Rewrite from './MessageActions/Rewrite'; import Rewrite from './MessageActions/Rewrite';
import MessageSources from './MessageSources'; import MessageSources from './MessageSources';
import SearchImages from './SearchImages'; import SearchImages from './SearchImages';
import SearchVideos from './SearchVideos'; import SearchVideos from './SearchVideos';
import { useSpeech } from 'react-text-to-speech'; import { useSpeech } from 'react-text-to-speech';
import ThinkBox from './ThinkBox';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
return <ThinkBox content={children as string} />;
};
const MessageBox = ({ const MessageBox = ({
message, message,
@ -44,27 +49,48 @@ const MessageBox = ({
useEffect(() => { useEffect(() => {
const regex = /\[(\d+)\]/g; const regex = /\[(\d+)\]/g;
let processedMessage = message.content;
if (message.role === 'assistant' && message.content.includes('<think>')) {
const openThinkTag = processedMessage.match(/<think>/g)?.length || 0;
const closeThinkTag = processedMessage.match(/<\/think>/g)?.length || 0;
if (openThinkTag > closeThinkTag) {
processedMessage += '</think> <a> </a>'; // The extra <a> </a> is to prevent the the think component from looking bad
}
}
if ( if (
message.role === 'assistant' && message.role === 'assistant' &&
message?.sources && message?.sources &&
message.sources.length > 0 message.sources.length > 0
) { ) {
return setParsedMessage( setParsedMessage(
message.content.replace( processedMessage.replace(
regex, regex,
(_, number) => (_, number) =>
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`, `<a href="${
message.sources?.[number - 1]?.metadata?.url
}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
), ),
); );
return;
} }
setSpeechMessage(message.content.replace(regex, '')); setSpeechMessage(message.content.replace(regex, ''));
setParsedMessage(message.content); setParsedMessage(processedMessage);
}, [message.content, message.sources, message.role]); }, [message.content, message.sources, message.role]);
const { speechStatus, start, stop } = useSpeech({ text: speechMessage }); const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
const markdownOverrides: MarkdownToJSX.Options = {
overrides: {
think: {
component: ThinkTagProcessor,
},
},
};
return ( return (
<div> <div>
{message.role === 'user' && ( {message.role === 'user' && (
@ -111,11 +137,13 @@ const MessageBox = ({
Answer Answer
</h3> </h3>
</div> </div>
<Markdown <Markdown
className={cn( className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]', 'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white', 'max-w-none break-words text-black dark:text-white',
)} )}
options={markdownOverrides}
> >
{parsedMessage} {parsedMessage}
</Markdown> </Markdown>
@ -193,10 +221,12 @@ const MessageBox = ({
<SearchImages <SearchImages
query={history[messageIndex - 1].content} query={history[messageIndex - 1].content}
chatHistory={history.slice(0, messageIndex - 1)} chatHistory={history.slice(0, messageIndex - 1)}
messageId={message.messageId}
/> />
<SearchVideos <SearchVideos
chatHistory={history.slice(0, messageIndex - 1)} chatHistory={history.slice(0, messageIndex - 1)}
query={history[messageIndex - 1].content} query={history[messageIndex - 1].content}
messageId={message.messageId}
/> />
</div> </div>
</div> </div>

View File

@ -41,7 +41,7 @@ const Attach = ({
data.append('embedding_model_provider', embeddingModelProvider!); data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!); data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, { const res = await fetch(`/api/uploads`, {
method: 'POST', method: 'POST',
body: data, body: data,
}); });

View File

@ -39,7 +39,7 @@ const AttachSmall = ({
data.append('embedding_model_provider', embeddingModelProvider!); data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!); data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, { const res = await fetch(`/api/uploads`, {
method: 'POST', method: 'POST',
body: data, body: data,
}); });

View File

@ -45,25 +45,13 @@ const focusModes = [
key: 'youtubeSearch', key: 'youtubeSearch',
title: 'Youtube', title: 'Youtube',
description: 'Search and watch videos', description: 'Search and watch videos',
icon: ( icon: <SiYoutube className="h-5 w-auto mr-0.5" />,
<SiYoutube
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
}, },
{ {
key: 'redditSearch', key: 'redditSearch',
title: 'Reddit', title: 'Reddit',
description: 'Search for discussions and opinions', description: 'Search for discussions and opinions',
icon: ( icon: <SiReddit className="h-5 w-auto mr-0.5" />,
<SiReddit
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
}, },
]; ];

View File

@ -69,11 +69,15 @@ const MessageSources = ({ sources }: { sources: Document[] }) => {
<div className="flex flex-row items-center space-x-1"> <div className="flex flex-row items-center space-x-1">
{sources.slice(3, 6).map((source, i) => { {sources.slice(3, 6).map((source, i) => {
return source.metadata.url === 'File' ? ( return source.metadata.url === 'File' ? (
<div className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full"> <div
key={i}
className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full"
>
<File size={12} className="text-white/70" /> <File size={12} className="text-white/70" />
</div> </div>
) : ( ) : (
<img <img
key={i}
src={`https://s2.googleusercontent.com/s2/favicons?domain_url=${source.metadata.url}`} src={`https://s2.googleusercontent.com/s2/favicons?domain_url=${source.metadata.url}`}
width={16} width={16}
height={16} height={16}

View File

@ -14,9 +14,11 @@ type Image = {
const SearchImages = ({ const SearchImages = ({
query, query,
chatHistory, chatHistory,
messageId,
}: { }: {
query: string; query: string;
chatHistory: Message[]; chatHistory: Message[];
messageId: string;
}) => { }) => {
const [images, setImages] = useState<Image[] | null>(null); const [images, setImages] = useState<Image[] | null>(null);
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
@ -27,7 +29,7 @@ const SearchImages = ({
<> <>
{!loading && images === null && ( {!loading && images === null && (
<button <button
id="search-images" id={`search-images-${messageId}`}
onClick={async () => { onClick={async () => {
setLoading(true); setLoading(true);
@ -37,9 +39,7 @@ const SearchImages = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL'); const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey'); const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch( const res = await fetch(`/api/images`, {
`${process.env.NEXT_PUBLIC_API_URL}/images`,
{
method: 'POST', method: 'POST',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
@ -56,8 +56,7 @@ const SearchImages = ({
}), }),
}, },
}), }),
}, });
);
const data = await res.json(); const data = await res.json();

View File

@ -27,9 +27,11 @@ declare module 'yet-another-react-lightbox' {
const Searchvideos = ({ const Searchvideos = ({
query, query,
chatHistory, chatHistory,
messageId,
}: { }: {
query: string; query: string;
chatHistory: Message[]; chatHistory: Message[];
messageId: string;
}) => { }) => {
const [videos, setVideos] = useState<Video[] | null>(null); const [videos, setVideos] = useState<Video[] | null>(null);
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
@ -42,7 +44,7 @@ const Searchvideos = ({
<> <>
{!loading && videos === null && ( {!loading && videos === null && (
<button <button
id="search-videos" id={`search-videos-${messageId}`}
onClick={async () => { onClick={async () => {
setLoading(true); setLoading(true);
@ -52,9 +54,7 @@ const Searchvideos = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL'); const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey'); const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch( const res = await fetch(`/api/videos`, {
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
{
method: 'POST', method: 'POST',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
@ -71,8 +71,7 @@ const Searchvideos = ({
}), }),
}, },
}), }),
}, });
);
const data = await res.json(); const data = await res.json();

View File

@ -16,8 +16,6 @@ const VerticalIconContainer = ({ children }: { children: ReactNode }) => {
const Sidebar = ({ children }: { children: React.ReactNode }) => { const Sidebar = ({ children }: { children: React.ReactNode }) => {
const segments = useSelectedLayoutSegments(); const segments = useSelectedLayoutSegments();
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
const navLinks = [ const navLinks = [
{ {
icon: Home, icon: Home,

View File

@ -0,0 +1,43 @@
'use client';
import { useState } from 'react';
import { cn } from '@/lib/utils';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps {
content: string;
}
const ThinkBox = ({ content }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false);
return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">
<button
onClick={() => setIsExpanded(!isExpanded)}
className="w-full flex items-center justify-between px-4 py-1 text-black/90 dark:text-white/90 hover:bg-light-200 dark:hover:bg-dark-200 transition duration-200"
>
<div className="flex items-center space-x-2">
<BrainCircuit
size={20}
className="text-[#9C27B0] dark:text-[#CE93D8]"
/>
<p className="font-medium text-sm">Thinking Process</p>
</div>
{isExpanded ? (
<ChevronUp size={18} className="text-black/70 dark:text-white/70" />
) : (
<ChevronDown size={18} className="text-black/70 dark:text-white/70" />
)}
</button>
{isExpanded && (
<div className="px-4 py-3 text-black/80 dark:text-white/80 text-sm border-t border-light-200 dark:border-dark-200 bg-light-100/50 dark:bg-dark-100/50 whitespace-pre-wrap">
{content}
</div>
)}
</div>
);
};
export default ThinkBox;

View File

@ -7,7 +7,7 @@ export const getSuggestions = async (chatHisory: Message[]) => {
const customOpenAIKey = localStorage.getItem('openAIApiKey'); const customOpenAIKey = localStorage.getItem('openAIApiKey');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL'); const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/suggestions`, { const res = await fetch(`/api/suggestions`, {
method: 'POST', method: 'POST',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',

View File

@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng'; import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = ` const imageSearchChainPrompt = `
@ -36,6 +36,12 @@ type ImageSearchChainInput = {
query: string; query: string;
}; };
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser(); const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => { const createImageSearchChain = (llm: BaseChatModel) => {
@ -52,11 +58,13 @@ const createImageSearchChain = (llm: BaseChatModel) => {
llm, llm,
strParser, strParser,
RunnableLambda.from(async (input: string) => { RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, { const res = await searchSearxng(input, {
engines: ['bing images', 'google images'], engines: ['bing images', 'google images'],
}); });
const images = []; const images: ImageSearchResult[] = [];
res.results.forEach((result) => { res.results.forEach((result) => {
if (result.img_src && result.url && result.title) { if (result.img_src && result.url && result.title) {

View File

@ -1,5 +1,5 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables'; import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser'; import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts'; import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';

View File

@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng'; import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = ` const VideoSearchChainPrompt = `
@ -36,6 +36,13 @@ type VideoSearchChainInput = {
query: string; query: string;
}; };
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser(); const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => { const createVideoSearchChain = (llm: BaseChatModel) => {
@ -52,11 +59,13 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
llm, llm,
strParser, strParser,
RunnableLambda.from(async (input: string) => { RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, { const res = await searchSearxng(input, {
engines: ['youtube'], engines: ['youtube'],
}); });
const videos = []; const videos: VideoSearchResult[] = [];
res.results.forEach((result) => { res.results.forEach((result) => {
if ( if (

View File

@ -6,7 +6,6 @@ const configFileName = 'config.toml';
interface Config { interface Config {
GENERAL: { GENERAL: {
PORT: number;
SIMILARITY_MEASURE: string; SIMILARITY_MEASURE: string;
KEEP_ALIVE: string; KEEP_ALIVE: string;
}; };
@ -43,11 +42,9 @@ type RecursivePartial<T> = {
const loadConfig = () => const loadConfig = () =>
toml.parse( toml.parse(
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'), fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config; ) as any as Config;
export const getPort = () => loadConfig().GENERAL.PORT;
export const getSimilarityMeasure = () => export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE; loadConfig().GENERAL.SIMILARITY_MEASURE;
@ -109,9 +106,8 @@ const mergeConfigs = (current: any, update: any): any => {
export const updateConfig = (config: RecursivePartial<Config>) => { export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig(); const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config); const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync( fs.writeFileSync(
path.join(__dirname, `../${configFileName}`), path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig), toml.stringify(mergedConfig),
); );
}; };

View File

@ -1,8 +1,9 @@
import { drizzle } from 'drizzle-orm/better-sqlite3'; import { drizzle } from 'drizzle-orm/better-sqlite3';
import Database from 'better-sqlite3'; import Database from 'better-sqlite3';
import * as schema from './schema'; import * as schema from './schema';
import path from 'path';
const sqlite = new Database('data/db.sqlite'); const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite'));
const db = drizzle(sqlite, { const db = drizzle(sqlite, {
schema: schema, schema: schema,
}); });

View File

@ -28,7 +28,7 @@ export class HuggingFaceTransformersEmbeddings
timeout?: number; timeout?: number;
private pipelinePromise: Promise<any>; private pipelinePromise: Promise<any> | undefined;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) { constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {}); super(fields ?? {});

View File

@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
constructor(args?: LineOutputParserArgs) { constructor(args?: LineOutputParserArgs) {
super(); super();
this.key = args.key ?? this.key; this.key = args?.key ?? this.key;
} }
static lc_name() { static lc_name() {

View File

@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
constructor(args?: LineListOutputParserArgs) { constructor(args?: LineListOutputParserArgs) {
super(); super();
this.key = args.key ?? this.key; this.key = args?.key ?? this.key;
} }
static lc_name() { static lc_name() {

View File

@ -1,6 +1,38 @@
import { ChatAnthropic } from '@langchain/anthropic'; import { ChatOpenAI } from '@langchain/openai';
import { getAnthropicApiKey } from '../../config'; import { ChatModel } from '.';
import logger from '../../utils/logger'; import { getAnthropicApiKey } from '../config';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
export const loadAnthropicChatModels = async () => { export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey(); const anthropicApiKey = getAnthropicApiKey();
@ -8,52 +40,25 @@ export const loadAnthropicChatModels = async () => {
if (!anthropicApiKey) return {}; if (!anthropicApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'claude-3-5-sonnet-20241022': {
displayName: 'Claude 3.5 Sonnet', anthropicChatModels.forEach((model) => {
model: new ChatAnthropic({ chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7, temperature: 0.7,
anthropicApiKey: anthropicApiKey, configuration: {
model: 'claude-3-5-sonnet-20241022', baseURL: 'https://api.anthropic.com/v1/',
}),
},
'claude-3-5-haiku-20241022': {
displayName: 'Claude 3.5 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-haiku-20241022',
}),
},
'claude-3-opus-20240229': {
displayName: 'Claude 3 Opus',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-opus-20240229',
}),
},
'claude-3-sonnet-20240229': {
displayName: 'Claude 3 Sonnet',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-sonnet-20240229',
}),
},
'claude-3-haiku-20240307': {
displayName: 'Claude 3 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-haiku-20240307',
}),
}, },
}) as unknown as BaseChatModel,
}; };
});
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Anthropic models: ${err}`); console.error(`Error loading Anthropic models: ${err}`);
return {}; return {};
} }
}; };

View File

@ -1,9 +1,42 @@
import { import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
ChatGoogleGenerativeAI, import { getGeminiApiKey } from '../config';
GoogleGenerativeAIEmbeddings, import { ChatModel, EmbeddingModel } from '.';
} from '@langchain/google-genai'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getGeminiApiKey } from '../../config'; import { Embeddings } from '@langchain/core/embeddings';
import logger from '../../utils/logger';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Pro Experimental',
key: 'gemini-2.0-pro-exp-02-05',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Gemini Embedding',
key: 'gemini-embedding-exp',
},
];
export const loadGeminiChatModels = async () => { export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey(); const geminiApiKey = getGeminiApiKey();
@ -11,75 +44,53 @@ export const loadGeminiChatModels = async () => {
if (!geminiApiKey) return {}; if (!geminiApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash', geminiChatModels.forEach((model) => {
model: new ChatGoogleGenerativeAI({ chatModels[model.key] = {
modelName: 'gemini-1.5-flash', displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: geminiApiKey,
modelName: model.key,
temperature: 0.7, temperature: 0.7,
apiKey: geminiApiKey, configuration: {
}), baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
'gemini-1.5-flash-8b': {
displayName: 'Gemini 1.5 Flash 8B',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash-8b',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-exp': {
displayName: 'Gemini 2.0 Flash Exp',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-exp',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-thinking-exp-01-21': {
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
temperature: 0.7,
apiKey: geminiApiKey,
}),
}, },
}) as unknown as BaseChatModel,
}; };
});
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Gemini models: ${err}`); console.error(`Error loading Gemini models: ${err}`);
return {}; return {};
} }
}; };
export const loadGeminiEmbeddingsModels = async () => { export const loadGeminiEmbeddingModels = async () => {
const geminiApiKey = getGeminiApiKey(); const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {}; if (!geminiApiKey) return {};
try { try {
const embeddingModels = { const embeddingModels: Record<string, EmbeddingModel> = {};
'text-embedding-004': {
displayName: 'Text Embedding', geminiEmbeddingModels.forEach((model) => {
model: new GoogleGenerativeAIEmbeddings({ embeddingModels[model.key] = {
apiKey: geminiApiKey, displayName: model.displayName,
modelName: 'text-embedding-004', model: new OpenAIEmbeddings({
}), openAIApiKey: geminiApiKey,
modelName: model.key,
configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
}, },
}) as unknown as Embeddings,
}; };
});
return embeddingModels; return embeddingModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Gemini embeddings model: ${err}`); console.error(`Error loading OpenAI embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@ -1,6 +1,78 @@
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../../config'; import { getGroqApiKey } from '../config';
import logger from '../../utils/logger'; import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
];
export const loadGroqChatModels = async () => { export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey(); const groqApiKey = getGroqApiKey();
@ -8,129 +80,25 @@ export const loadGroqChatModels = async () => {
if (!groqApiKey) return {}; if (!groqApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'llama-3.3-70b-versatile': {
displayName: 'Llama 3.3 70B', groqChatModels.forEach((model) => {
model: new ChatOpenAI( chatModels[model.key] = {
{ displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: groqApiKey, openAIApiKey: groqApiKey,
modelName: 'llama-3.3-70b-versatile', modelName: model.key,
temperature: 0.7, temperature: 0.7,
}, configuration: {
{
baseURL: 'https://api.groq.com/openai/v1', baseURL: 'https://api.groq.com/openai/v1',
}, },
), }) as unknown as BaseChatModel,
},
'llama-3.2-3b-preview': {
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-vision-preview': {
displayName: 'Llama 3.2 11B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-vision-preview': {
displayName: 'Llama 3.2 90B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-8b-instant': {
displayName: 'Llama 3.1 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.1-8b-instant',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-8b-8192': {
displayName: 'LLaMA3 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-70b-8192': {
displayName: 'LLaMA3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'mixtral-8x7b-32768': {
displayName: 'Mixtral 8x7B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'gemma2-9b-it': {
displayName: 'Gemma2 9B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma2-9b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
}; };
});
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Groq models: ${err}`); console.error(`Error loading Groq models: ${err}`);
return {}; return {};
} }
}; };

View File

@ -1,33 +1,51 @@
import { loadGroqChatModels } from './groq'; import { Embeddings } from '@langchain/core/embeddings';
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai'; import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import { import {
getCustomOpenaiApiKey, getCustomOpenaiApiKey,
getCustomOpenaiApiUrl, getCustomOpenaiApiUrl,
getCustomOpenaiModelName, getCustomOpenaiModelName,
} from '../../config'; } from '../config';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
import { loadGroqChatModels } from './groq';
import { loadAnthropicChatModels } from './anthropic';
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
import { loadTransformersEmbeddingsModels } from './transformers';
const chatModelProviders = { export interface ChatModel {
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
string,
() => Promise<Record<string, ChatModel>>
> = {
openai: loadOpenAIChatModels, openai: loadOpenAIChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels, ollama: loadOllamaChatModels,
groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
}; };
const embeddingModelProviders = { export const embeddingModelProviders: Record<
openai: loadOpenAIEmbeddingsModels, string,
local: loadTransformersEmbeddingsModels, () => Promise<Record<string, EmbeddingModel>>
ollama: loadOllamaEmbeddingsModels, > = {
gemini: loadGeminiEmbeddingsModels, openai: loadOpenAIEmbeddingModels,
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels,
}; };
export const getAvailableChatModelProviders = async () => { export const getAvailableChatModelProviders = async () => {
const models = {}; const models: Record<string, Record<string, ChatModel>> = {};
for (const provider in chatModelProviders) { for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider](); const providerModels = await chatModelProviders[provider]();
@ -52,7 +70,7 @@ export const getAvailableChatModelProviders = async () => {
configuration: { configuration: {
baseURL: customOpenAiApiUrl, baseURL: customOpenAiApiUrl,
}, },
}), }) as unknown as BaseChatModel,
}, },
} }
: {}), : {}),
@ -62,7 +80,7 @@ export const getAvailableChatModelProviders = async () => {
}; };
export const getAvailableEmbeddingModelProviders = async () => { export const getAvailableEmbeddingModelProviders = async () => {
const models = {}; const models: Record<string, Record<string, EmbeddingModel>> = {};
for (const provider in embeddingModelProviders) { for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider](); const providerModels = await embeddingModelProviders[provider]();

View File

@ -1,74 +1,73 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import axios from 'axios'; import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
export const loadOllamaChatModels = async () => { export const loadOllamaChatModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaApiEndpoint = getOllamaApiEndpoint();
const keepAlive = getKeepAlive();
if (!ollamaEndpoint) return {}; if (!ollamaApiEndpoint) return {};
try { try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, { const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}); });
const { models: ollamaModels } = response.data; const { models } = res.data;
const chatModels = ollamaModels.reduce((acc, model) => { const chatModels: Record<string, ChatModel> = {};
acc[model.model] = {
models.forEach((model: any) => {
chatModels[model.model] = {
displayName: model.name, displayName: model.name,
model: new ChatOllama({ model: new ChatOllama({
baseUrl: ollamaEndpoint, baseUrl: ollamaApiEndpoint,
model: model.model, model: model.model,
temperature: 0.7, temperature: 0.7,
keepAlive: keepAlive, keepAlive: getKeepAlive(),
}), }),
}; };
});
return acc;
}, {});
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Ollama models: ${err}`); console.error(`Error loading Ollama models: ${err}`);
return {}; return {};
} }
}; };
export const loadOllamaEmbeddingsModels = async () => { export const loadOllamaEmbeddingModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {}; if (!ollamaApiEndpoint) return {};
try { try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, { const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}); });
const { models: ollamaModels } = response.data; const { models } = res.data;
const embeddingsModels = ollamaModels.reduce((acc, model) => { const embeddingModels: Record<string, EmbeddingModel> = {};
acc[model.model] = {
models.forEach((model: any) => {
embeddingModels[model.model] = {
displayName: model.name, displayName: model.name,
model: new OllamaEmbeddings({ model: new OllamaEmbeddings({
baseUrl: ollamaEndpoint, baseUrl: ollamaApiEndpoint,
model: model.model, model: model.model,
}), }),
}; };
});
return acc; return embeddingModels;
}, {});
return embeddingsModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Ollama embeddings model: ${err}`); console.error(`Error loading Ollama embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@ -1,89 +1,90 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../../config'; import { getOpenaiApiKey } from '../config';
import logger from '../../utils/logger'; import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
export const loadOpenAIChatModels = async () => { export const loadOpenAIChatModels = async () => {
const openAIApiKey = getOpenaiApiKey(); const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {}; if (!openaiApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'gpt-3.5-turbo': {
displayName: 'GPT-3.5 Turbo', openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({ model: new ChatOpenAI({
openAIApiKey, openAIApiKey: openaiApiKey,
modelName: 'gpt-3.5-turbo', modelName: model.key,
temperature: 0.7, temperature: 0.7,
}), }) as unknown as BaseChatModel,
},
'gpt-4': {
displayName: 'GPT-4',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
},
'gpt-4-turbo': {
displayName: 'GPT-4 turbo',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
},
'gpt-4o': {
displayName: 'GPT-4 omni',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o',
temperature: 0.7,
}),
},
'gpt-4o-mini': {
displayName: 'GPT-4 omni mini',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o-mini',
temperature: 0.7,
}),
},
}; };
});
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading OpenAI models: ${err}`); console.error(`Error loading OpenAI models: ${err}`);
return {}; return {};
} }
}; };
export const loadOpenAIEmbeddingsModels = async () => { export const loadOpenAIEmbeddingModels = async () => {
const openAIApiKey = getOpenaiApiKey(); const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {}; if (!openaiApiKey) return {};
try { try {
const embeddingModels = { const embeddingModels: Record<string, EmbeddingModel> = {};
'text-embedding-3-small': {
displayName: 'Text Embedding 3 Small', openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({ model: new OpenAIEmbeddings({
openAIApiKey, openAIApiKey: openaiApiKey,
modelName: 'text-embedding-3-small', modelName: model.key,
}), }) as unknown as Embeddings,
},
'text-embedding-3-large': {
displayName: 'Text Embedding 3 Large',
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
},
}; };
});
return embeddingModels; return embeddingModels;
} catch (err) { } catch (err) {
logger.error(`Error loading OpenAI embeddings model: ${err}`); console.error(`Error loading OpenAI embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@ -1,4 +1,3 @@
import logger from '../../utils/logger';
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const loadTransformersEmbeddingsModels = async () => { export const loadTransformersEmbeddingsModels = async () => {
@ -26,7 +25,7 @@ export const loadTransformersEmbeddingsModels = async () => {
return embeddingModels; return embeddingModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Transformers embeddings model: ${err}`); console.error(`Error loading Transformers embeddings model: ${err}`);
return {}; return {};
} }
}; };

59
src/lib/search/index.ts Normal file
View File

@ -0,0 +1,59 @@
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import prompts from '../prompts';
export const searchHandlers: Record<string, MetaSearchAgent> = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};

View File

@ -13,18 +13,17 @@ import {
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser'; import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../lib/outputParsers/lineOutputParser'; import LineOutputParser from '../outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents'; import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document'; import { Document } from 'langchain/document';
import { searchSearxng } from '../lib/searxng'; import { searchSearxng } from '../searxng';
import path from 'path'; import path from 'node:path';
import fs from 'fs'; import fs from 'node:fs';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import { StreamEvent } from '@langchain/core/tracers/log_stream'; import { StreamEvent } from '@langchain/core/tracers/log_stream';
import { IterableReadableStream } from '@langchain/core/utils/stream';
export interface MetaSearchAgentType { export interface MetaSearchAgentType {
searchAndAnswer: ( searchAndAnswer: (
@ -90,7 +89,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
question = 'summarize'; question = 'summarize';
} }
let docs = []; let docs: Document[] = [];
const linkDocs = await getDocumentsFromLinks({ links }); const linkDocs = await getDocumentsFromLinks({ links });
@ -203,6 +202,8 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: docs }; return { query: question, docs: docs };
} else { } else {
question = question.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(question, { const res = await searchSearxng(question, {
language: 'en', language: 'en',
engines: this.config.activeEngines, engines: this.config.activeEngines,
@ -311,7 +312,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8')); const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
const fileSimilaritySearchObject = content.contents.map( const fileSimilaritySearchObject = content.contents.map(
(c: string, i) => { (c: string, i: number) => {
return { return {
fileName: content.title, fileName: content.title,
content: c, content: c,
@ -414,6 +415,8 @@ class MetaSearchAgent implements MetaSearchAgentType {
return sortedDocs; return sortedDocs;
} }
return [];
} }
private processDocs(docs: Document[]) { private processDocs(docs: Document[]) {
@ -426,7 +429,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
} }
private async handleStream( private async handleStream(
stream: IterableReadableStream<StreamEvent>, stream: AsyncGenerator<StreamEvent, any, any>,
emitter: eventEmitter, emitter: eventEmitter,
) { ) {
for await (const event of stream) { for await (const event of stream) {

View File

@ -1,5 +1,5 @@
import axios from 'axios'; import axios from 'axios';
import { getSearxngApiEndpoint } from '../config'; import { getSearxngApiEndpoint } from './config';
interface SearxngSearchOptions { interface SearxngSearchOptions {
categories?: string[]; categories?: string[];
@ -30,11 +30,12 @@ export const searchSearxng = async (
if (opts) { if (opts) {
Object.keys(opts).forEach((key) => { Object.keys(opts).forEach((key) => {
if (Array.isArray(opts[key])) { const value = opts[key as keyof SearxngSearchOptions];
url.searchParams.append(key, opts[key].join(',')); if (Array.isArray(value)) {
url.searchParams.append(key, value.join(','));
return; return;
} }
url.searchParams.append(key, opts[key]); url.searchParams.append(key, value as string);
}); });
} }

5
src/lib/types/compute-dot.d.ts vendored Normal file
View File

@ -0,0 +1,5 @@
declare function computeDot(vectorA: number[], vectorB: number[]): number;
declare module 'compute-dot' {
export default computeDot;
}

View File

@ -6,7 +6,7 @@ const computeSimilarity = (x: number[], y: number[]): number => {
const similarityMeasure = getSimilarityMeasure(); const similarityMeasure = getSimilarityMeasure();
if (similarityMeasure === 'cosine') { if (similarityMeasure === 'cosine') {
return cosineSimilarity(x, y); return cosineSimilarity(x, y) as number;
} else if (similarityMeasure === 'dot') { } else if (similarityMeasure === 'dot') {
return dot(x, y); return dot(x, y);
} }

View File

@ -3,7 +3,6 @@ import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse'; import pdfParse from 'pdf-parse';
import logger from './logger';
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => { export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splitter = new RecursiveCharacterTextSplitter(); const splitter = new RecursiveCharacterTextSplitter();
@ -79,12 +78,13 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
docs.push(...linkDocs); docs.push(...linkDocs);
} catch (err) { } catch (err) {
logger.error( console.error(
`Error at generating documents from links: ${err.message}`, 'An error occurred while getting documents from links: ',
err,
); );
docs.push( docs.push(
new Document({ new Document({
pageContent: `Failed to retrieve content from the link: ${err.message}`, pageContent: `Failed to retrieve content from the link: ${err}`,
metadata: { metadata: {
title: 'Failed to retrieve content', title: 'Failed to retrieve content',
url: link, url: link,

Some files were not shown because too many files have changed in this diff Show More