mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-19 08:18:48 +00:00
Compare commits
3 Commits
v1.10.0
...
562a6e890d
Author | SHA1 | Date | |
---|---|---|---|
562a6e890d | |||
47c7bb688f | |||
dd2f4effca |
122
.github/workflows/docker-build.yaml
vendored
122
.github/workflows/docker-build.yaml
vendored
@ -8,12 +8,18 @@ on:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
service: [backend, app]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
@ -30,104 +36,38 @@ jobs:
|
||||
id: version
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push AMD64 Docker image
|
||||
- name: Build and push Docker image for ${{ matrix.service }}
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
run: |
|
||||
DOCKERFILE=app.dockerfile
|
||||
IMAGE_NAME=perplexica
|
||||
docker buildx build --platform linux/amd64 \
|
||||
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:amd64 \
|
||||
docker buildx create --use
|
||||
if [[ "${{ matrix.service }}" == "backend" ]]; then \
|
||||
DOCKERFILE=backend.dockerfile; \
|
||||
IMAGE_NAME=perplexica-backend; \
|
||||
else \
|
||||
DOCKERFILE=app.dockerfile; \
|
||||
IMAGE_NAME=perplexica-frontend; \
|
||||
fi
|
||||
docker buildx build --platform linux/amd64,linux/arm64 \
|
||||
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
|
||||
--cache-to=type=inline \
|
||||
--provenance false \
|
||||
-f $DOCKERFILE \
|
||||
-t itzcrazykns1337/${IMAGE_NAME}:amd64 \
|
||||
-t itzcrazykns1337/${IMAGE_NAME}:main \
|
||||
--push .
|
||||
|
||||
- name: Build and push AMD64 release Docker image
|
||||
- name: Build and push release Docker image for ${{ matrix.service }}
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
DOCKERFILE=app.dockerfile
|
||||
IMAGE_NAME=perplexica
|
||||
docker buildx build --platform linux/amd64 \
|
||||
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
|
||||
docker buildx create --use
|
||||
if [[ "${{ matrix.service }}" == "backend" ]]; then \
|
||||
DOCKERFILE=backend.dockerfile; \
|
||||
IMAGE_NAME=perplexica-backend; \
|
||||
else \
|
||||
DOCKERFILE=app.dockerfile; \
|
||||
IMAGE_NAME=perplexica-frontend; \
|
||||
fi
|
||||
docker buildx build --platform linux/amd64,linux/arm64 \
|
||||
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
|
||||
--cache-to=type=inline \
|
||||
--provenance false \
|
||||
-f $DOCKERFILE \
|
||||
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
|
||||
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
|
||||
--push .
|
||||
|
||||
build-arm64:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Extract version from release tag
|
||||
if: github.event_name == 'release'
|
||||
id: version
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push ARM64 Docker image
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
run: |
|
||||
DOCKERFILE=app.dockerfile
|
||||
IMAGE_NAME=perplexica
|
||||
docker buildx build --platform linux/arm64 \
|
||||
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:arm64 \
|
||||
--cache-to=type=inline \
|
||||
--provenance false \
|
||||
-f $DOCKERFILE \
|
||||
-t itzcrazykns1337/${IMAGE_NAME}:arm64 \
|
||||
--push .
|
||||
|
||||
- name: Build and push ARM64 release Docker image
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
DOCKERFILE=app.dockerfile
|
||||
IMAGE_NAME=perplexica
|
||||
docker buildx build --platform linux/arm64 \
|
||||
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
|
||||
--cache-to=type=inline \
|
||||
--provenance false \
|
||||
-f $DOCKERFILE \
|
||||
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
|
||||
--push .
|
||||
|
||||
manifest:
|
||||
needs: [build-amd64, build-arm64]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Create and push multi-arch manifest for main
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
run: |
|
||||
IMAGE_NAME=perplexica
|
||||
docker manifest create itzcrazykns1337/${IMAGE_NAME}:main \
|
||||
--amend itzcrazykns1337/${IMAGE_NAME}:amd64 \
|
||||
--amend itzcrazykns1337/${IMAGE_NAME}:arm64
|
||||
docker manifest push itzcrazykns1337/${IMAGE_NAME}:main
|
||||
|
||||
- name: Create and push multi-arch manifest for releases
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
IMAGE_NAME=perplexica
|
||||
docker manifest create itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
|
||||
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
|
||||
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64
|
||||
docker manifest push itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -4,9 +4,9 @@ npm-debug.log
|
||||
yarn-error.log
|
||||
|
||||
# Build output
|
||||
.next/
|
||||
out/
|
||||
dist/
|
||||
/.next/
|
||||
/out/
|
||||
/dist/
|
||||
|
||||
# IDE/Editor specific
|
||||
.vscode/
|
||||
|
@ -6,6 +6,7 @@ const config = {
|
||||
endOfLine: 'auto',
|
||||
singleQuote: true,
|
||||
tabWidth: 2,
|
||||
semi: true,
|
||||
};
|
||||
|
||||
module.exports = config;
|
||||
|
@ -1,43 +1,32 @@
|
||||
# How to Contribute to Perplexica
|
||||
|
||||
Thanks for your interest in contributing to Perplexica! Your help makes this project better. This guide explains how to contribute effectively.
|
||||
|
||||
Perplexica is a modern AI chat application with advanced search capabilities.
|
||||
Hey there, thanks for deciding to contribute to Perplexica. Anything you help with will support the development of Perplexica and will make it better. Let's walk you through the key aspects to ensure your contributions are effective and in harmony with the project's setup.
|
||||
|
||||
## Project Structure
|
||||
|
||||
Perplexica's codebase is organized as follows:
|
||||
Perplexica's design consists of two main domains:
|
||||
|
||||
- **UI Components and Pages**:
|
||||
- **Components (`src/components`)**: Reusable UI components.
|
||||
- **Pages and Routes (`src/app`)**: Next.js app directory structure with page components.
|
||||
- Main app routes include: home (`/`), chat (`/c`), discover (`/discover`), library (`/library`), and settings (`/settings`).
|
||||
- **API Routes (`src/app/api`)**: API endpoints implemented with Next.js API routes.
|
||||
- `/api/chat`: Handles chat interactions.
|
||||
- `/api/search`: Provides direct access to Perplexica's search capabilities.
|
||||
- Other endpoints for models, files, and suggestions.
|
||||
- **Backend Logic (`src/lib`)**: Contains all the backend functionality including search, database, and API logic.
|
||||
- The search functionality is present inside `src/lib/search` directory.
|
||||
- All of the focus modes are implemented using the Meta Search Agent class in `src/lib/search/metaSearchAgent.ts`.
|
||||
- Database functionality is in `src/lib/db`.
|
||||
- Chat model and embedding model providers are managed in `src/lib/providers`.
|
||||
- Prompt templates and LLM chain definitions are in `src/lib/prompts` and `src/lib/chains` respectively.
|
||||
|
||||
## API Documentation
|
||||
|
||||
Perplexica exposes several API endpoints for programmatic access, including:
|
||||
|
||||
- **Search API**: Access Perplexica's advanced search capabilities directly via the `/api/search` endpoint. For detailed documentation, see `docs/api/search.md`.
|
||||
- **Frontend (`ui` directory)**: This is a Next.js application holding all user interface components. It's a self-contained environment that manages everything the user interacts with.
|
||||
- **Backend (root and `src` directory)**: The backend logic is situated in the `src` folder, but the root directory holds the main `package.json` for backend dependency management.
|
||||
- All of the focus modes are created using the Meta Search Agent class present in `src/search/metaSearchAgent.ts`. The main logic behind Perplexica lies there.
|
||||
|
||||
## Setting Up Your Environment
|
||||
|
||||
Before diving into coding, setting up your local environment is key. Here's what you need to do:
|
||||
|
||||
### Backend
|
||||
|
||||
1. In the root directory, locate the `sample.config.toml` file.
|
||||
2. Rename it to `config.toml` and fill in the necessary configuration fields.
|
||||
3. Run `npm install` to install all dependencies.
|
||||
4. Run `npm run db:push` to set up the local sqlite database.
|
||||
5. Use `npm run dev` to start the application in development mode.
|
||||
2. Rename it to `config.toml` and fill in the necessary configuration fields specific to the backend.
|
||||
3. Run `npm install` to install dependencies.
|
||||
4. Run `npm run db:push` to set up the local sqlite.
|
||||
5. Use `npm run dev` to start the backend in development mode.
|
||||
|
||||
### Frontend
|
||||
|
||||
1. Navigate to the `ui` folder and repeat the process of renaming `.env.example` to `.env`, making sure to provide the frontend-specific variables.
|
||||
2. Execute `npm install` within the `ui` directory to get the frontend dependencies ready.
|
||||
3. Launch the frontend development server with `npm run dev`.
|
||||
|
||||
**Please note**: Docker configurations are present for setting up production environments, whereas `npm run dev` is used for development purposes.
|
||||
|
||||
|
@ -109,13 +109,14 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
||||
|
||||
1. Install SearXNG and allow `JSON` format in the SearXNG settings.
|
||||
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
|
||||
3. After populating the configuration run `npm i`.
|
||||
4. Install the dependencies and then execute `npm run build`.
|
||||
5. Finally, start the app by running `npm rum start`
|
||||
3. Rename the `.env.example` file to `.env` in the `ui` folder and fill in all necessary fields.
|
||||
4. After populating the configuration and environment files, run `npm i` in both the `ui` folder and the root directory.
|
||||
5. Install the dependencies and then execute `npm run build` in both the `ui` folder and the root directory.
|
||||
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
|
||||
|
||||
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
|
||||
|
||||
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
|
||||
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like exposing it your network, etc.
|
||||
|
||||
### Ollama Connection Errors
|
||||
|
||||
|
@ -1,27 +1,15 @@
|
||||
FROM node:20.18.0-alpine AS builder
|
||||
|
||||
WORKDIR /home/perplexica
|
||||
|
||||
COPY package.json yarn.lock ./
|
||||
RUN yarn install --frozen-lockfile --network-timeout 600000
|
||||
|
||||
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
|
||||
COPY src ./src
|
||||
COPY public ./public
|
||||
|
||||
RUN mkdir -p /home/perplexica/data
|
||||
RUN yarn build
|
||||
|
||||
FROM node:20.18.0-alpine
|
||||
|
||||
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
|
||||
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
|
||||
|
||||
WORKDIR /home/perplexica
|
||||
|
||||
COPY --from=builder /home/perplexica/public ./public
|
||||
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
|
||||
COPY ui /home/perplexica/
|
||||
|
||||
COPY --from=builder /home/perplexica/.next/standalone ./
|
||||
COPY --from=builder /home/perplexica/data ./data
|
||||
RUN yarn install --frozen-lockfile
|
||||
RUN yarn build
|
||||
|
||||
RUN mkdir /home/perplexica/uploads
|
||||
|
||||
CMD ["node", "server.js"]
|
||||
CMD ["yarn", "start"]
|
17
backend.dockerfile
Normal file
17
backend.dockerfile
Normal file
@ -0,0 +1,17 @@
|
||||
FROM node:18-slim
|
||||
|
||||
WORKDIR /home/perplexica
|
||||
|
||||
COPY src /home/perplexica/src
|
||||
COPY tsconfig.json /home/perplexica/
|
||||
COPY drizzle.config.ts /home/perplexica/
|
||||
COPY package.json /home/perplexica/
|
||||
COPY yarn.lock /home/perplexica/
|
||||
|
||||
RUN mkdir /home/perplexica/data
|
||||
RUN mkdir /home/perplexica/uploads
|
||||
|
||||
RUN yarn install --frozen-lockfile --network-timeout 600000
|
||||
RUN yarn build
|
||||
|
||||
CMD ["yarn", "start"]
|
@ -9,21 +9,41 @@ services:
|
||||
- perplexica-network
|
||||
restart: unless-stopped
|
||||
|
||||
app:
|
||||
image: itzcrazykns1337/perplexica:main
|
||||
perplexica-backend:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: app.dockerfile
|
||||
dockerfile: backend.dockerfile
|
||||
image: itzcrazykns1337/perplexica-backend:main
|
||||
environment:
|
||||
- SEARXNG_API_URL=http://searxng:8080
|
||||
depends_on:
|
||||
- searxng
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
- perplexica-network
|
||||
- 3001:3001
|
||||
volumes:
|
||||
- backend-dbstore:/home/perplexica/data
|
||||
- uploads:/home/perplexica/uploads
|
||||
- ./config.toml:/home/perplexica/config.toml
|
||||
extra_hosts:
|
||||
- 'host.docker.internal:host-gateway'
|
||||
networks:
|
||||
- perplexica-network
|
||||
restart: unless-stopped
|
||||
|
||||
perplexica-frontend:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: app.dockerfile
|
||||
args:
|
||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
image: itzcrazykns1337/perplexica-frontend:main
|
||||
depends_on:
|
||||
- perplexica-backend
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
- perplexica-network
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
|
@ -6,9 +6,9 @@ Perplexica’s Search API makes it easy to use our AI-powered search engine. You
|
||||
|
||||
## Endpoint
|
||||
|
||||
### **POST** `http://localhost:3000/api/search`
|
||||
### **POST** `http://localhost:3001/api/search`
|
||||
|
||||
**Note**: Replace `3000` with any other port if you've changed the default PORT
|
||||
**Note**: Replace `3001` with any other port if you've changed the default PORT
|
||||
|
||||
### Request
|
||||
|
||||
@ -20,11 +20,11 @@ The API accepts a JSON object in the request body, where you define the focus mo
|
||||
{
|
||||
"chatModel": {
|
||||
"provider": "openai",
|
||||
"name": "gpt-4o-mini"
|
||||
"model": "gpt-4o-mini"
|
||||
},
|
||||
"embeddingModel": {
|
||||
"provider": "openai",
|
||||
"name": "text-embedding-3-large"
|
||||
"model": "text-embedding-3-large"
|
||||
},
|
||||
"optimizationMode": "speed",
|
||||
"focusMode": "webSearch",
|
||||
@ -38,18 +38,18 @@ The API accepts a JSON object in the request body, where you define the focus mo
|
||||
|
||||
### Request Parameters
|
||||
|
||||
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
|
||||
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
|
||||
|
||||
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
|
||||
- `name`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
|
||||
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
|
||||
- Optional fields for custom OpenAI configuration:
|
||||
- `customOpenAIBaseURL`: If you’re using a custom OpenAI instance, provide the base URL.
|
||||
- `customOpenAIKey`: The API key for a custom OpenAI instance.
|
||||
|
||||
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
|
||||
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
|
||||
|
||||
- `provider`: The provider for the embedding model (e.g., `openai`).
|
||||
- `name`: The specific embedding model (e.g., `text-embedding-3-large`).
|
||||
- `model`: The specific embedding model (e.g., `text-embedding-3-large`).
|
||||
|
||||
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:
|
||||
|
||||
|
@ -4,7 +4,7 @@ Curious about how Perplexica works? Don't worry, we'll cover it here. Before we
|
||||
|
||||
We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows:
|
||||
|
||||
1. The message is sent to the `/api/chat` route where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
|
||||
1. The message is sent via WS to the backend server where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
|
||||
2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources and searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started.
|
||||
3. The query returned by the first chain is passed to SearXNG to search the web for information.
|
||||
4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query.
|
||||
|
109
docs/installation/NETWORKING.md
Normal file
109
docs/installation/NETWORKING.md
Normal file
@ -0,0 +1,109 @@
|
||||
# Expose Perplexica to a network
|
||||
|
||||
This guide will show you how to make Perplexica available over a network. Follow these steps to allow computers on the same network to interact with Perplexica. Choose the instructions that match the operating system you are using.
|
||||
|
||||
## Windows
|
||||
|
||||
1. Open PowerShell as Administrator
|
||||
|
||||
2. Navigate to the directory containing the `docker-compose.yaml` file
|
||||
|
||||
3. Stop and remove the existing Perplexica containers and images:
|
||||
|
||||
```bash
|
||||
docker compose down --rmi all
|
||||
```
|
||||
|
||||
4. Open the `docker-compose.yaml` file in a text editor like Notepad++
|
||||
|
||||
5. Replace `127.0.0.1` with the IP address of the server Perplexica is running on in these two lines:
|
||||
|
||||
```bash
|
||||
args:
|
||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
```
|
||||
|
||||
6. Save and close the `docker-compose.yaml` file
|
||||
|
||||
7. Rebuild and restart the Perplexica container:
|
||||
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
|
||||
## macOS
|
||||
|
||||
1. Open the Terminal application
|
||||
|
||||
2. Navigate to the directory with the `docker-compose.yaml` file:
|
||||
|
||||
```bash
|
||||
cd /path/to/docker-compose.yaml
|
||||
```
|
||||
|
||||
3. Stop and remove existing containers and images:
|
||||
|
||||
```bash
|
||||
docker compose down --rmi all
|
||||
```
|
||||
|
||||
4. Open `docker-compose.yaml` in a text editor like Sublime Text:
|
||||
|
||||
```bash
|
||||
nano docker-compose.yaml
|
||||
```
|
||||
|
||||
5. Replace `127.0.0.1` with the server IP in these lines:
|
||||
|
||||
```bash
|
||||
args:
|
||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
```
|
||||
|
||||
6. Save and exit the editor
|
||||
|
||||
7. Rebuild and restart Perplexica:
|
||||
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
|
||||
## Linux
|
||||
|
||||
1. Open the terminal
|
||||
|
||||
2. Navigate to the `docker-compose.yaml` directory:
|
||||
|
||||
```bash
|
||||
cd /path/to/docker-compose.yaml
|
||||
```
|
||||
|
||||
3. Stop and remove containers and images:
|
||||
|
||||
```bash
|
||||
docker compose down --rmi all
|
||||
```
|
||||
|
||||
4. Edit `docker-compose.yaml`:
|
||||
|
||||
```bash
|
||||
nano docker-compose.yaml
|
||||
```
|
||||
|
||||
5. Replace `127.0.0.1` with the server IP:
|
||||
|
||||
```bash
|
||||
args:
|
||||
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
|
||||
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
|
||||
```
|
||||
|
||||
6. Save and exit the editor
|
||||
|
||||
7. Rebuild and restart Perplexica:
|
||||
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
@ -39,8 +39,11 @@ To update Perplexica to the latest version, follow these steps:
|
||||
2. Navigate to the project directory.
|
||||
|
||||
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
||||
4. After populating the configuration run `npm i`.
|
||||
5. Install the dependencies and then execute `npm run build`.
|
||||
6. Finally, start the app by running `npm rum start`
|
||||
|
||||
4. Execute `npm i` in both the `ui` folder and the root directory.
|
||||
|
||||
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
|
||||
|
||||
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
|
||||
|
||||
---
|
||||
|
@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
|
||||
|
||||
export default defineConfig({
|
||||
dialect: 'sqlite',
|
||||
schema: './src/lib/db/schema.ts',
|
||||
schema: './src/db/schema.ts',
|
||||
out: './drizzle',
|
||||
dbCredentials: {
|
||||
url: './data/db.sqlite',
|
||||
|
5
next-env.d.ts
vendored
5
next-env.d.ts
vendored
@ -1,5 +0,0 @@
|
||||
/// <reference types="next" />
|
||||
/// <reference types="next/image-types/global" />
|
||||
|
||||
// NOTE: This file should not be edited
|
||||
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.
|
94
package.json
94
package.json
@ -1,63 +1,53 @@
|
||||
{
|
||||
"name": "perplexica-frontend",
|
||||
"version": "1.10.0",
|
||||
"name": "perplexica-backend",
|
||||
"version": "1.10.0-rc3",
|
||||
"license": "MIT",
|
||||
"author": "ItzCrazyKns",
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
"build": "npm run db:push && next build",
|
||||
"start": "next start",
|
||||
"lint": "next lint",
|
||||
"format:write": "prettier . --write",
|
||||
"db:push": "drizzle-kit push"
|
||||
},
|
||||
"dependencies": {
|
||||
"@headlessui/react": "^2.2.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"@icons-pack/react-simple-icons": "^12.3.0",
|
||||
"@langchain/community": "^0.3.36",
|
||||
"@langchain/core": "^0.3.42",
|
||||
"@langchain/openai": "^0.0.25",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@tailwindcss/typography": "^0.5.12",
|
||||
"@xenova/transformers": "^2.17.2",
|
||||
"axios": "^1.8.3",
|
||||
"better-sqlite3": "^11.9.1",
|
||||
"clsx": "^2.1.0",
|
||||
"compute-cosine-similarity": "^1.1.0",
|
||||
"compute-dot": "^1.1.0",
|
||||
"drizzle-orm": "^0.40.1",
|
||||
"html-to-text": "^9.0.5",
|
||||
"langchain": "^0.1.30",
|
||||
"lucide-react": "^0.363.0",
|
||||
"markdown-to-jsx": "^7.7.2",
|
||||
"next": "^15.2.2",
|
||||
"next-themes": "^0.3.0",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"react": "^18",
|
||||
"react-dom": "^18",
|
||||
"react-text-to-speech": "^0.14.5",
|
||||
"react-textarea-autosize": "^8.5.3",
|
||||
"sonner": "^1.4.41",
|
||||
"tailwind-merge": "^2.2.2",
|
||||
"winston": "^3.17.0",
|
||||
"yet-another-react-lightbox": "^3.17.2",
|
||||
"zod": "^3.22.4"
|
||||
"start": "npm run db:push && node dist/app.js",
|
||||
"build": "tsc",
|
||||
"dev": "nodemon --ignore uploads/ src/app.ts ",
|
||||
"db:push": "drizzle-kit push sqlite",
|
||||
"format": "prettier . --check",
|
||||
"format:write": "prettier . --write"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/better-sqlite3": "^7.6.12",
|
||||
"@types/better-sqlite3": "^7.6.10",
|
||||
"@types/cors": "^2.8.17",
|
||||
"@types/express": "^4.17.21",
|
||||
"@types/html-to-text": "^9.0.4",
|
||||
"@types/node": "^20",
|
||||
"@types/multer": "^1.4.12",
|
||||
"@types/pdf-parse": "^1.1.4",
|
||||
"@types/react": "^18",
|
||||
"@types/react-dom": "^18",
|
||||
"autoprefixer": "^10.0.1",
|
||||
"drizzle-kit": "^0.30.5",
|
||||
"eslint": "^8",
|
||||
"eslint-config-next": "14.1.4",
|
||||
"postcss": "^8",
|
||||
"@types/readable-stream": "^4.0.11",
|
||||
"@types/ws": "^8.5.12",
|
||||
"drizzle-kit": "^0.22.7",
|
||||
"nodemon": "^3.1.0",
|
||||
"prettier": "^3.2.5",
|
||||
"tailwindcss": "^3.3.0",
|
||||
"typescript": "^5"
|
||||
"ts-node": "^10.9.2",
|
||||
"typescript": "^5.4.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"@langchain/anthropic": "^0.2.3",
|
||||
"@langchain/community": "^0.2.16",
|
||||
"@langchain/openai": "^0.0.25",
|
||||
"@langchain/google-genai": "^0.0.23",
|
||||
"@xenova/transformers": "^2.17.1",
|
||||
"axios": "^1.6.8",
|
||||
"better-sqlite3": "^11.0.0",
|
||||
"compute-cosine-similarity": "^1.1.0",
|
||||
"compute-dot": "^1.1.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.4.5",
|
||||
"drizzle-orm": "^0.31.2",
|
||||
"express": "^4.19.2",
|
||||
"html-to-text": "^9.0.5",
|
||||
"langchain": "^0.1.30",
|
||||
"mammoth": "^1.8.0",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"winston": "^3.13.0",
|
||||
"ws": "^8.17.1",
|
||||
"zod": "^3.22.4"
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
[GENERAL]
|
||||
PORT = 3001 # Port to run the server on
|
||||
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
|
||||
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
|
||||
|
||||
@ -23,4 +24,7 @@ MODEL_NAME = ""
|
||||
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
||||
|
||||
[API_ENDPOINTS]
|
||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
||||
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
||||
|
||||
[API_KEYS]
|
||||
JINA = ""
|
||||
|
38
src/app.ts
Normal file
38
src/app.ts
Normal file
@ -0,0 +1,38 @@
|
||||
import { startWebSocketServer } from './websocket';
|
||||
import express from 'express';
|
||||
import cors from 'cors';
|
||||
import http from 'http';
|
||||
import routes from './routes';
|
||||
import { getPort } from './config';
|
||||
import logger from './utils/logger';
|
||||
|
||||
const port = getPort();
|
||||
|
||||
const app = express();
|
||||
const server = http.createServer(app);
|
||||
|
||||
const corsOptions = {
|
||||
origin: '*',
|
||||
};
|
||||
|
||||
app.use(cors(corsOptions));
|
||||
app.use(express.json());
|
||||
|
||||
app.use('/api', routes);
|
||||
app.get('/api', (_, res) => {
|
||||
res.status(200).json({ status: 'ok' });
|
||||
});
|
||||
|
||||
server.listen(port, () => {
|
||||
logger.info(`Server is running on port ${port}`);
|
||||
});
|
||||
|
||||
startWebSocketServer(server);
|
||||
|
||||
process.on('uncaughtException', (err, origin) => {
|
||||
logger.error(`Uncaught Exception at ${origin}: ${err}`);
|
||||
});
|
||||
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
|
||||
});
|
@ -1,304 +0,0 @@
|
||||
import prompts from '@/lib/prompts';
|
||||
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
|
||||
import crypto from 'crypto';
|
||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import { EventEmitter } from 'stream';
|
||||
import {
|
||||
chatModelProviders,
|
||||
embeddingModelProviders,
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '@/lib/providers';
|
||||
import db from '@/lib/db';
|
||||
import { chats, messages as messagesSchema } from '@/lib/db/schema';
|
||||
import { and, eq, gt } from 'drizzle-orm';
|
||||
import { getFileDetails } from '@/lib/utils/files';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '@/lib/config';
|
||||
import { searchHandlers } from '@/lib/search';
|
||||
|
||||
export const runtime = 'nodejs';
|
||||
export const dynamic = 'force-dynamic';
|
||||
|
||||
type Message = {
|
||||
messageId: string;
|
||||
chatId: string;
|
||||
content: string;
|
||||
};
|
||||
|
||||
type ChatModel = {
|
||||
provider: string;
|
||||
name: string;
|
||||
};
|
||||
|
||||
type EmbeddingModel = {
|
||||
provider: string;
|
||||
name: string;
|
||||
};
|
||||
|
||||
type Body = {
|
||||
message: Message;
|
||||
optimizationMode: 'speed' | 'balanced' | 'quality';
|
||||
focusMode: string;
|
||||
history: Array<[string, string]>;
|
||||
files: Array<string>;
|
||||
chatModel: ChatModel;
|
||||
embeddingModel: EmbeddingModel;
|
||||
};
|
||||
|
||||
const handleEmitterEvents = async (
|
||||
stream: EventEmitter,
|
||||
writer: WritableStreamDefaultWriter,
|
||||
encoder: TextEncoder,
|
||||
aiMessageId: string,
|
||||
chatId: string,
|
||||
) => {
|
||||
let recievedMessage = '';
|
||||
let sources: any[] = [];
|
||||
|
||||
stream.on('data', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
if (parsedData.type === 'response') {
|
||||
writer.write(
|
||||
encoder.encode(
|
||||
JSON.stringify({
|
||||
type: 'message',
|
||||
data: parsedData.data,
|
||||
messageId: aiMessageId,
|
||||
}) + '\n',
|
||||
),
|
||||
);
|
||||
|
||||
recievedMessage += parsedData.data;
|
||||
} else if (parsedData.type === 'sources') {
|
||||
writer.write(
|
||||
encoder.encode(
|
||||
JSON.stringify({
|
||||
type: 'sources',
|
||||
data: parsedData.data,
|
||||
messageId: aiMessageId,
|
||||
}) + '\n',
|
||||
),
|
||||
);
|
||||
|
||||
sources = parsedData.data;
|
||||
}
|
||||
});
|
||||
stream.on('end', () => {
|
||||
writer.write(
|
||||
encoder.encode(
|
||||
JSON.stringify({
|
||||
type: 'messageEnd',
|
||||
messageId: aiMessageId,
|
||||
}) + '\n',
|
||||
),
|
||||
);
|
||||
writer.close();
|
||||
|
||||
db.insert(messagesSchema)
|
||||
.values({
|
||||
content: recievedMessage,
|
||||
chatId: chatId,
|
||||
messageId: aiMessageId,
|
||||
role: 'assistant',
|
||||
metadata: JSON.stringify({
|
||||
createdAt: new Date(),
|
||||
...(sources && sources.length > 0 && { sources }),
|
||||
}),
|
||||
})
|
||||
.execute();
|
||||
});
|
||||
stream.on('error', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
writer.write(
|
||||
encoder.encode(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: parsedData.data,
|
||||
}),
|
||||
),
|
||||
);
|
||||
writer.close();
|
||||
});
|
||||
};
|
||||
|
||||
const handleHistorySave = async (
|
||||
message: Message,
|
||||
humanMessageId: string,
|
||||
focusMode: string,
|
||||
files: string[],
|
||||
) => {
|
||||
const chat = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, message.chatId),
|
||||
});
|
||||
|
||||
if (!chat) {
|
||||
await db
|
||||
.insert(chats)
|
||||
.values({
|
||||
id: message.chatId,
|
||||
title: message.content,
|
||||
createdAt: new Date().toString(),
|
||||
focusMode: focusMode,
|
||||
files: files.map(getFileDetails),
|
||||
})
|
||||
.execute();
|
||||
}
|
||||
|
||||
const messageExists = await db.query.messages.findFirst({
|
||||
where: eq(messagesSchema.messageId, humanMessageId),
|
||||
});
|
||||
|
||||
if (!messageExists) {
|
||||
await db
|
||||
.insert(messagesSchema)
|
||||
.values({
|
||||
content: message.content,
|
||||
chatId: message.chatId,
|
||||
messageId: humanMessageId,
|
||||
role: 'user',
|
||||
metadata: JSON.stringify({
|
||||
createdAt: new Date(),
|
||||
}),
|
||||
})
|
||||
.execute();
|
||||
} else {
|
||||
await db
|
||||
.delete(messagesSchema)
|
||||
.where(
|
||||
and(
|
||||
gt(messagesSchema.id, messageExists.id),
|
||||
eq(messagesSchema.chatId, message.chatId),
|
||||
),
|
||||
)
|
||||
.execute();
|
||||
}
|
||||
};
|
||||
|
||||
export const POST = async (req: Request) => {
|
||||
try {
|
||||
const body = (await req.json()) as Body;
|
||||
const { message } = body;
|
||||
|
||||
if (message.content === '') {
|
||||
return Response.json(
|
||||
{
|
||||
message: 'Please provide a message to process',
|
||||
},
|
||||
{ status: 400 },
|
||||
);
|
||||
}
|
||||
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
const chatModelProvider =
|
||||
chatModelProviders[
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
|
||||
];
|
||||
const chatModel =
|
||||
chatModelProvider[
|
||||
body.chatModel?.name || Object.keys(chatModelProvider)[0]
|
||||
];
|
||||
|
||||
const embeddingProvider =
|
||||
embeddingModelProviders[
|
||||
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
|
||||
];
|
||||
const embeddingModel =
|
||||
embeddingProvider[
|
||||
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
|
||||
];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
let embedding = embeddingModel.model;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: getCustomOpenaiApiUrl(),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (chatModelProvider && chatModel) {
|
||||
llm = chatModel.model;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
|
||||
}
|
||||
|
||||
if (!embedding) {
|
||||
return Response.json(
|
||||
{ error: 'Invalid embedding model' },
|
||||
{ status: 400 },
|
||||
);
|
||||
}
|
||||
|
||||
const humanMessageId =
|
||||
message.messageId ?? crypto.randomBytes(7).toString('hex');
|
||||
const aiMessageId = crypto.randomBytes(7).toString('hex');
|
||||
|
||||
const history: BaseMessage[] = body.history.map((msg) => {
|
||||
if (msg[0] === 'human') {
|
||||
return new HumanMessage({
|
||||
content: msg[1],
|
||||
});
|
||||
} else {
|
||||
return new AIMessage({
|
||||
content: msg[1],
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const handler = searchHandlers[body.focusMode];
|
||||
|
||||
if (!handler) {
|
||||
return Response.json(
|
||||
{
|
||||
message: 'Invalid focus mode',
|
||||
},
|
||||
{ status: 400 },
|
||||
);
|
||||
}
|
||||
|
||||
const stream = await handler.searchAndAnswer(
|
||||
message.content,
|
||||
history,
|
||||
llm,
|
||||
embedding,
|
||||
body.optimizationMode,
|
||||
body.files,
|
||||
);
|
||||
|
||||
const responseStream = new TransformStream();
|
||||
const writer = responseStream.writable.getWriter();
|
||||
const encoder = new TextEncoder();
|
||||
|
||||
handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId);
|
||||
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
|
||||
|
||||
return new Response(responseStream.readable, {
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
Connection: 'keep-alive',
|
||||
'Cache-Control': 'no-cache, no-transform',
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('An error ocurred while processing chat request:', err);
|
||||
return Response.json(
|
||||
{ message: 'An error ocurred while processing chat request' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
};
|
@ -1,69 +0,0 @@
|
||||
import db from '@/lib/db';
|
||||
import { chats, messages } from '@/lib/db/schema';
|
||||
import { eq } from 'drizzle-orm';
|
||||
|
||||
export const GET = async (
|
||||
req: Request,
|
||||
{ params }: { params: Promise<{ id: string }> },
|
||||
) => {
|
||||
try {
|
||||
const { id } = await params;
|
||||
|
||||
const chatExists = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, id),
|
||||
});
|
||||
|
||||
if (!chatExists) {
|
||||
return Response.json({ message: 'Chat not found' }, { status: 404 });
|
||||
}
|
||||
|
||||
const chatMessages = await db.query.messages.findMany({
|
||||
where: eq(messages.chatId, id),
|
||||
});
|
||||
|
||||
return Response.json(
|
||||
{
|
||||
chat: chatExists,
|
||||
messages: chatMessages,
|
||||
},
|
||||
{ status: 200 },
|
||||
);
|
||||
} catch (err) {
|
||||
console.error('Error in getting chat by id: ', err);
|
||||
return Response.json(
|
||||
{ message: 'An error has occurred.' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
export const DELETE = async (
|
||||
req: Request,
|
||||
{ params }: { params: Promise<{ id: string }> },
|
||||
) => {
|
||||
try {
|
||||
const { id } = await params;
|
||||
|
||||
const chatExists = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, id),
|
||||
});
|
||||
|
||||
if (!chatExists) {
|
||||
return Response.json({ message: 'Chat not found' }, { status: 404 });
|
||||
}
|
||||
|
||||
await db.delete(chats).where(eq(chats.id, id)).execute();
|
||||
await db.delete(messages).where(eq(messages.chatId, id)).execute();
|
||||
|
||||
return Response.json(
|
||||
{ message: 'Chat deleted successfully' },
|
||||
{ status: 200 },
|
||||
);
|
||||
} catch (err) {
|
||||
console.error('Error in deleting chat by id: ', err);
|
||||
return Response.json(
|
||||
{ message: 'An error has occurred.' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
};
|
@ -1,15 +0,0 @@
|
||||
import db from '@/lib/db';
|
||||
|
||||
export const GET = async (req: Request) => {
|
||||
try {
|
||||
let chats = await db.query.chats.findMany();
|
||||
chats = chats.reverse();
|
||||
return Response.json({ chats: chats }, { status: 200 });
|
||||
} catch (err) {
|
||||
console.error('Error in getting chats: ', err);
|
||||
return Response.json(
|
||||
{ message: 'An error has occurred.' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
};
|
@ -1,61 +0,0 @@
|
||||
import { searchSearxng } from '@/lib/searxng';
|
||||
|
||||
const articleWebsites = [
|
||||
'yahoo.com',
|
||||
'www.exchangewire.com',
|
||||
'businessinsider.com',
|
||||
/* 'wired.com',
|
||||
'mashable.com',
|
||||
'theverge.com',
|
||||
'gizmodo.com',
|
||||
'cnet.com',
|
||||
'venturebeat.com', */
|
||||
];
|
||||
|
||||
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
|
||||
|
||||
export const GET = async (req: Request) => {
|
||||
try {
|
||||
const data = (
|
||||
await Promise.all([
|
||||
...new Array(articleWebsites.length * topics.length)
|
||||
.fill(0)
|
||||
.map(async (_, i) => {
|
||||
return (
|
||||
await searchSearxng(
|
||||
`site:${articleWebsites[i % articleWebsites.length]} ${
|
||||
topics[i % topics.length]
|
||||
}`,
|
||||
{
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
},
|
||||
)
|
||||
).results;
|
||||
}),
|
||||
])
|
||||
)
|
||||
.map((result) => result)
|
||||
.flat()
|
||||
.sort(() => Math.random() - 0.5);
|
||||
|
||||
return Response.json(
|
||||
{
|
||||
blogs: data,
|
||||
},
|
||||
{
|
||||
status: 200,
|
||||
},
|
||||
);
|
||||
} catch (err) {
|
||||
console.error(`An error ocurred in discover route: ${err}`);
|
||||
return Response.json(
|
||||
{
|
||||
message: 'An error has occurred',
|
||||
},
|
||||
{
|
||||
status: 500,
|
||||
},
|
||||
);
|
||||
}
|
||||
};
|
@ -1,83 +0,0 @@
|
||||
import handleImageSearch from '@/lib/chains/imageSearchAgent';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '@/lib/config';
|
||||
import { getAvailableChatModelProviders } from '@/lib/providers';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface ImageSearchBody {
|
||||
query: string;
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
export const POST = async (req: Request) => {
|
||||
try {
|
||||
const body: ImageSearchBody = await req.json();
|
||||
|
||||
const chatHistory = body.chatHistory
|
||||
.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
})
|
||||
.filter((msg) => msg !== undefined) as BaseMessage[];
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
chatModelProviders[
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
|
||||
];
|
||||
const chatModel =
|
||||
chatModelProvider[
|
||||
body.chatModel?.model || Object.keys(chatModelProvider)[0]
|
||||
];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: getCustomOpenaiApiUrl(),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (chatModelProvider && chatModel) {
|
||||
llm = chatModel.model;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
|
||||
}
|
||||
|
||||
const images = await handleImageSearch(
|
||||
{
|
||||
chat_history: chatHistory,
|
||||
query: body.query,
|
||||
},
|
||||
llm,
|
||||
);
|
||||
|
||||
return Response.json({ images }, { status: 200 });
|
||||
} catch (err) {
|
||||
console.error(`An error ocurred while searching images: ${err}`);
|
||||
return Response.json(
|
||||
{ message: 'An error ocurred while searching images' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
};
|
@ -1,47 +0,0 @@
|
||||
import {
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '@/lib/providers';
|
||||
|
||||
export const GET = async (req: Request) => {
|
||||
try {
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
Object.keys(chatModelProviders).forEach((provider) => {
|
||||
Object.keys(chatModelProviders[provider]).forEach((model) => {
|
||||
delete (chatModelProviders[provider][model] as { model?: unknown })
|
||||
.model;
|
||||
});
|
||||
});
|
||||
|
||||
Object.keys(embeddingModelProviders).forEach((provider) => {
|
||||
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
|
||||
delete (embeddingModelProviders[provider][model] as { model?: unknown })
|
||||
.model;
|
||||
});
|
||||
});
|
||||
|
||||
return Response.json(
|
||||
{
|
||||
chatModelProviders,
|
||||
embeddingModelProviders,
|
||||
},
|
||||
{
|
||||
status: 200,
|
||||
},
|
||||
);
|
||||
} catch (err) {
|
||||
console.error('An error ocurred while fetching models', err);
|
||||
return Response.json(
|
||||
{
|
||||
message: 'An error has occurred.',
|
||||
},
|
||||
{
|
||||
status: 500,
|
||||
},
|
||||
);
|
||||
}
|
||||
};
|
@ -1,81 +0,0 @@
|
||||
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '@/lib/config';
|
||||
import { getAvailableChatModelProviders } from '@/lib/providers';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface SuggestionsGenerationBody {
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
export const POST = async (req: Request) => {
|
||||
try {
|
||||
const body: SuggestionsGenerationBody = await req.json();
|
||||
|
||||
const chatHistory = body.chatHistory
|
||||
.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
})
|
||||
.filter((msg) => msg !== undefined) as BaseMessage[];
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
chatModelProviders[
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
|
||||
];
|
||||
const chatModel =
|
||||
chatModelProvider[
|
||||
body.chatModel?.model || Object.keys(chatModelProvider)[0]
|
||||
];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: getCustomOpenaiApiUrl(),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (chatModelProvider && chatModel) {
|
||||
llm = chatModel.model;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
|
||||
}
|
||||
|
||||
const suggestions = await generateSuggestions(
|
||||
{
|
||||
chat_history: chatHistory,
|
||||
},
|
||||
llm,
|
||||
);
|
||||
|
||||
return Response.json({ suggestions }, { status: 200 });
|
||||
} catch (err) {
|
||||
console.error(`An error ocurred while generating suggestions: ${err}`);
|
||||
return Response.json(
|
||||
{ message: 'An error ocurred while generating suggestions' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
};
|
@ -1,134 +0,0 @@
|
||||
import { NextResponse } from 'next/server';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import crypto from 'crypto';
|
||||
import { getAvailableEmbeddingModelProviders } from '@/lib/providers';
|
||||
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
|
||||
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
|
||||
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
|
||||
import { Document } from 'langchain/document';
|
||||
|
||||
interface FileRes {
|
||||
fileName: string;
|
||||
fileExtension: string;
|
||||
fileId: string;
|
||||
}
|
||||
|
||||
const uploadDir = path.join(process.cwd(), 'uploads');
|
||||
|
||||
if (!fs.existsSync(uploadDir)) {
|
||||
fs.mkdirSync(uploadDir, { recursive: true });
|
||||
}
|
||||
|
||||
const splitter = new RecursiveCharacterTextSplitter({
|
||||
chunkSize: 500,
|
||||
chunkOverlap: 100,
|
||||
});
|
||||
|
||||
export async function POST(req: Request) {
|
||||
try {
|
||||
const formData = await req.formData();
|
||||
|
||||
const files = formData.getAll('files') as File[];
|
||||
const embedding_model = formData.get('embedding_model');
|
||||
const embedding_model_provider = formData.get('embedding_model_provider');
|
||||
|
||||
if (!embedding_model || !embedding_model_provider) {
|
||||
return NextResponse.json(
|
||||
{ message: 'Missing embedding model or provider' },
|
||||
{ status: 400 },
|
||||
);
|
||||
}
|
||||
|
||||
const embeddingModels = await getAvailableEmbeddingModelProviders();
|
||||
const provider =
|
||||
embedding_model_provider ?? Object.keys(embeddingModels)[0];
|
||||
const embeddingModel =
|
||||
embedding_model ?? Object.keys(embeddingModels[provider as string])[0];
|
||||
|
||||
let embeddingsModel =
|
||||
embeddingModels[provider as string]?.[embeddingModel as string]?.model;
|
||||
if (!embeddingsModel) {
|
||||
return NextResponse.json(
|
||||
{ message: 'Invalid embedding model selected' },
|
||||
{ status: 400 },
|
||||
);
|
||||
}
|
||||
|
||||
const processedFiles: FileRes[] = [];
|
||||
|
||||
await Promise.all(
|
||||
files.map(async (file: any) => {
|
||||
const fileExtension = file.name.split('.').pop();
|
||||
if (!['pdf', 'docx', 'txt'].includes(fileExtension!)) {
|
||||
return NextResponse.json(
|
||||
{ message: 'File type not supported' },
|
||||
{ status: 400 },
|
||||
);
|
||||
}
|
||||
|
||||
const uniqueFileName = `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`;
|
||||
const filePath = path.join(uploadDir, uniqueFileName);
|
||||
|
||||
const buffer = Buffer.from(await file.arrayBuffer());
|
||||
fs.writeFileSync(filePath, new Uint8Array(buffer));
|
||||
|
||||
let docs: any[] = [];
|
||||
if (fileExtension === 'pdf') {
|
||||
const loader = new PDFLoader(filePath);
|
||||
docs = await loader.load();
|
||||
} else if (fileExtension === 'docx') {
|
||||
const loader = new DocxLoader(filePath);
|
||||
docs = await loader.load();
|
||||
} else if (fileExtension === 'txt') {
|
||||
const text = fs.readFileSync(filePath, 'utf-8');
|
||||
docs = [
|
||||
new Document({ pageContent: text, metadata: { title: file.name } }),
|
||||
];
|
||||
}
|
||||
|
||||
const splitted = await splitter.splitDocuments(docs);
|
||||
|
||||
const extractedDataPath = filePath.replace(/\.\w+$/, '-extracted.json');
|
||||
fs.writeFileSync(
|
||||
extractedDataPath,
|
||||
JSON.stringify({
|
||||
title: file.name,
|
||||
contents: splitted.map((doc) => doc.pageContent),
|
||||
}),
|
||||
);
|
||||
|
||||
const embeddings = await embeddingsModel.embedDocuments(
|
||||
splitted.map((doc) => doc.pageContent),
|
||||
);
|
||||
const embeddingsDataPath = filePath.replace(
|
||||
/\.\w+$/,
|
||||
'-embeddings.json',
|
||||
);
|
||||
fs.writeFileSync(
|
||||
embeddingsDataPath,
|
||||
JSON.stringify({
|
||||
title: file.name,
|
||||
embeddings,
|
||||
}),
|
||||
);
|
||||
|
||||
processedFiles.push({
|
||||
fileName: file.name,
|
||||
fileExtension: fileExtension,
|
||||
fileId: uniqueFileName.replace(/\.\w+$/, ''),
|
||||
});
|
||||
}),
|
||||
);
|
||||
|
||||
return NextResponse.json({
|
||||
files: processedFiles,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error uploading file:', error);
|
||||
return NextResponse.json(
|
||||
{ message: 'An error has occurred.' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '@/lib/config';
|
||||
import { getAvailableChatModelProviders } from '@/lib/providers';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface VideoSearchBody {
|
||||
query: string;
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
export const POST = async (req: Request) => {
|
||||
try {
|
||||
const body: VideoSearchBody = await req.json();
|
||||
|
||||
const chatHistory = body.chatHistory
|
||||
.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
})
|
||||
.filter((msg) => msg !== undefined) as BaseMessage[];
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
chatModelProviders[
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
|
||||
];
|
||||
const chatModel =
|
||||
chatModelProvider[
|
||||
body.chatModel?.model || Object.keys(chatModelProvider)[0]
|
||||
];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: getCustomOpenaiApiUrl(),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (chatModelProvider && chatModel) {
|
||||
llm = chatModel.model;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
|
||||
}
|
||||
|
||||
const videos = await handleVideoSearch(
|
||||
{
|
||||
chat_history: chatHistory,
|
||||
query: body.query,
|
||||
},
|
||||
llm,
|
||||
);
|
||||
|
||||
return Response.json({ videos }, { status: 200 });
|
||||
} catch (err) {
|
||||
console.error(`An error ocurred while searching videos: ${err}`);
|
||||
return Response.json(
|
||||
{ message: 'An error ocurred while searching videos' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
};
|
@ -1,9 +0,0 @@
|
||||
import ChatWindow from '@/components/ChatWindow';
|
||||
import React from 'react';
|
||||
|
||||
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
|
||||
const { chatId } = React.use(params);
|
||||
return <ChatWindow id={chatId} />;
|
||||
};
|
||||
|
||||
export default Page;
|
@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../searxng';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const imageSearchChainPrompt = `
|
||||
@ -36,12 +36,6 @@ type ImageSearchChainInput = {
|
||||
query: string;
|
||||
};
|
||||
|
||||
interface ImageSearchResult {
|
||||
img_src: string;
|
||||
url: string;
|
||||
title: string;
|
||||
}
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const createImageSearchChain = (llm: BaseChatModel) => {
|
||||
@ -58,13 +52,11 @@ const createImageSearchChain = (llm: BaseChatModel) => {
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
input = input.replace(/<think>.*?<\/think>/g, '');
|
||||
|
||||
const res = await searchSearxng(input, {
|
||||
engines: ['bing images', 'google images'],
|
||||
});
|
||||
|
||||
const images: ImageSearchResult[] = [];
|
||||
const images = [];
|
||||
|
||||
res.results.forEach((result) => {
|
||||
if (result.img_src && result.url && result.title) {
|
@ -1,5 +1,5 @@
|
||||
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
|
||||
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
|
||||
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../searxng';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const VideoSearchChainPrompt = `
|
||||
@ -36,13 +36,6 @@ type VideoSearchChainInput = {
|
||||
query: string;
|
||||
};
|
||||
|
||||
interface VideoSearchResult {
|
||||
img_src: string;
|
||||
url: string;
|
||||
title: string;
|
||||
iframe_src: string;
|
||||
}
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const createVideoSearchChain = (llm: BaseChatModel) => {
|
||||
@ -59,13 +52,11 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
input = input.replace(/<think>.*?<\/think>/g, '');
|
||||
|
||||
const res = await searchSearxng(input, {
|
||||
engines: ['youtube'],
|
||||
});
|
||||
|
||||
const videos: VideoSearchResult[] = [];
|
||||
const videos = [];
|
||||
|
||||
res.results.forEach((result) => {
|
||||
if (
|
@ -1,43 +0,0 @@
|
||||
'use client';
|
||||
|
||||
import { useState } from 'react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
|
||||
|
||||
interface ThinkBoxProps {
|
||||
content: string;
|
||||
}
|
||||
|
||||
const ThinkBox = ({ content }: ThinkBoxProps) => {
|
||||
const [isExpanded, setIsExpanded] = useState(false);
|
||||
|
||||
return (
|
||||
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">
|
||||
<button
|
||||
onClick={() => setIsExpanded(!isExpanded)}
|
||||
className="w-full flex items-center justify-between px-4 py-1 text-black/90 dark:text-white/90 hover:bg-light-200 dark:hover:bg-dark-200 transition duration-200"
|
||||
>
|
||||
<div className="flex items-center space-x-2">
|
||||
<BrainCircuit
|
||||
size={20}
|
||||
className="text-[#9C27B0] dark:text-[#CE93D8]"
|
||||
/>
|
||||
<p className="font-medium text-sm">Thinking Process</p>
|
||||
</div>
|
||||
{isExpanded ? (
|
||||
<ChevronUp size={18} className="text-black/70 dark:text-white/70" />
|
||||
) : (
|
||||
<ChevronDown size={18} className="text-black/70 dark:text-white/70" />
|
||||
)}
|
||||
</button>
|
||||
|
||||
{isExpanded && (
|
||||
<div className="px-4 py-3 text-black/80 dark:text-white/80 text-sm border-t border-light-200 dark:border-dark-200 bg-light-100/50 dark:bg-dark-100/50 whitespace-pre-wrap">
|
||||
{content}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default ThinkBox;
|
@ -6,6 +6,7 @@ const configFileName = 'config.toml';
|
||||
|
||||
interface Config {
|
||||
GENERAL: {
|
||||
PORT: number;
|
||||
SIMILARITY_MEASURE: string;
|
||||
KEEP_ALIVE: string;
|
||||
};
|
||||
@ -34,6 +35,9 @@ interface Config {
|
||||
API_ENDPOINTS: {
|
||||
SEARXNG: string;
|
||||
};
|
||||
API_KEYS: {
|
||||
JINA: string;
|
||||
}
|
||||
}
|
||||
|
||||
type RecursivePartial<T> = {
|
||||
@ -42,9 +46,11 @@ type RecursivePartial<T> = {
|
||||
|
||||
const loadConfig = () =>
|
||||
toml.parse(
|
||||
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
|
||||
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
|
||||
) as any as Config;
|
||||
|
||||
export const getPort = () => loadConfig().GENERAL.PORT;
|
||||
|
||||
export const getSimilarityMeasure = () =>
|
||||
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
||||
|
||||
@ -72,6 +78,9 @@ export const getCustomOpenaiApiUrl = () =>
|
||||
export const getCustomOpenaiModelName = () =>
|
||||
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
||||
|
||||
export const getJinaApiKey = () =>
|
||||
loadConfig().API_KEYS.JINA;
|
||||
|
||||
const mergeConfigs = (current: any, update: any): any => {
|
||||
if (update === null || update === undefined) {
|
||||
return current;
|
||||
@ -106,8 +115,9 @@ const mergeConfigs = (current: any, update: any): any => {
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
const mergedConfig = mergeConfigs(currentConfig, config);
|
||||
|
||||
fs.writeFileSync(
|
||||
path.join(path.join(process.cwd(), `${configFileName}`)),
|
||||
path.join(__dirname, `../${configFileName}`),
|
||||
toml.stringify(mergedConfig),
|
||||
);
|
||||
};
|
@ -1,9 +1,8 @@
|
||||
import { drizzle } from 'drizzle-orm/better-sqlite3';
|
||||
import Database from 'better-sqlite3';
|
||||
import * as schema from './schema';
|
||||
import path from 'path';
|
||||
|
||||
const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite'));
|
||||
const sqlite = new Database('data/db.sqlite');
|
||||
const db = drizzle(sqlite, {
|
||||
schema: schema,
|
||||
});
|
@ -28,7 +28,7 @@ export class HuggingFaceTransformersEmbeddings
|
||||
|
||||
timeout?: number;
|
||||
|
||||
private pipelinePromise: Promise<any> | undefined;
|
||||
private pipelinePromise: Promise<any>;
|
||||
|
||||
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
|
||||
super(fields ?? {});
|
||||
|
@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
|
||||
|
||||
constructor(args?: LineOutputParserArgs) {
|
||||
super();
|
||||
this.key = args?.key ?? this.key;
|
||||
this.key = args.key ?? this.key;
|
||||
}
|
||||
|
||||
static lc_name() {
|
||||
|
@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
|
||||
|
||||
constructor(args?: LineListOutputParserArgs) {
|
||||
super();
|
||||
this.key = args?.key ?? this.key;
|
||||
this.key = args.key ?? this.key;
|
||||
}
|
||||
|
||||
static lc_name() {
|
||||
|
@ -1,38 +1,6 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { ChatModel } from '.';
|
||||
import { getAnthropicApiKey } from '../config';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const anthropicChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Claude 3.7 Sonnet',
|
||||
key: 'claude-3-7-sonnet-20250219',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3.5 Haiku',
|
||||
key: 'claude-3-5-haiku-20241022',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3.5 Sonnet v2',
|
||||
key: 'claude-3-5-sonnet-20241022',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3.5 Sonnet',
|
||||
key: 'claude-3-5-sonnet-20240620',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3 Opus',
|
||||
key: 'claude-3-opus-20240229',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3 Sonnet',
|
||||
key: 'claude-3-sonnet-20240229',
|
||||
},
|
||||
{
|
||||
displayName: 'Claude 3 Haiku',
|
||||
key: 'claude-3-haiku-20240307',
|
||||
},
|
||||
];
|
||||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import { getAnthropicApiKey } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
|
||||
export const loadAnthropicChatModels = async () => {
|
||||
const anthropicApiKey = getAnthropicApiKey();
|
||||
@ -40,25 +8,52 @@ export const loadAnthropicChatModels = async () => {
|
||||
if (!anthropicApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
anthropicChatModels.forEach((model) => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: anthropicApiKey,
|
||||
modelName: model.key,
|
||||
const chatModels = {
|
||||
'claude-3-5-sonnet-20241022': {
|
||||
displayName: 'Claude 3.5 Sonnet',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: 'https://api.anthropic.com/v1/',
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-5-sonnet-20241022',
|
||||
}),
|
||||
},
|
||||
'claude-3-5-haiku-20241022': {
|
||||
displayName: 'Claude 3.5 Haiku',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-5-haiku-20241022',
|
||||
}),
|
||||
},
|
||||
'claude-3-opus-20240229': {
|
||||
displayName: 'Claude 3 Opus',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-opus-20240229',
|
||||
}),
|
||||
},
|
||||
'claude-3-sonnet-20240229': {
|
||||
displayName: 'Claude 3 Sonnet',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-sonnet-20240229',
|
||||
}),
|
||||
},
|
||||
'claude-3-haiku-20240307': {
|
||||
displayName: 'Claude 3 Haiku',
|
||||
model: new ChatAnthropic({
|
||||
temperature: 0.7,
|
||||
anthropicApiKey: anthropicApiKey,
|
||||
model: 'claude-3-haiku-20240307',
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Anthropic models: ${err}`);
|
||||
logger.error(`Error loading Anthropic models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -1,42 +1,9 @@
|
||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getGeminiApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
const geminiChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Gemini 2.0 Flash',
|
||||
key: 'gemini-2.0-flash',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 2.0 Flash-Lite',
|
||||
key: 'gemini-2.0-flash-lite',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 2.0 Pro Experimental',
|
||||
key: 'gemini-2.0-pro-exp-02-05',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 1.5 Flash',
|
||||
key: 'gemini-1.5-flash',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 1.5 Flash-8B',
|
||||
key: 'gemini-1.5-flash-8b',
|
||||
},
|
||||
{
|
||||
displayName: 'Gemini 1.5 Pro',
|
||||
key: 'gemini-1.5-pro',
|
||||
},
|
||||
];
|
||||
|
||||
const geminiEmbeddingModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Gemini Embedding',
|
||||
key: 'gemini-embedding-exp',
|
||||
},
|
||||
];
|
||||
import {
|
||||
ChatGoogleGenerativeAI,
|
||||
GoogleGenerativeAIEmbeddings,
|
||||
} from '@langchain/google-genai';
|
||||
import { getGeminiApiKey } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
|
||||
export const loadGeminiChatModels = async () => {
|
||||
const geminiApiKey = getGeminiApiKey();
|
||||
@ -44,53 +11,75 @@ export const loadGeminiChatModels = async () => {
|
||||
if (!geminiApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
geminiChatModels.forEach((model) => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: geminiApiKey,
|
||||
modelName: model.key,
|
||||
const chatModels = {
|
||||
'gemini-1.5-flash': {
|
||||
displayName: 'Gemini 1.5 Flash',
|
||||
model: new ChatGoogleGenerativeAI({
|
||||
modelName: 'gemini-1.5-flash',
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
apiKey: geminiApiKey,
|
||||
}),
|
||||
},
|
||||
'gemini-1.5-flash-8b': {
|
||||
displayName: 'Gemini 1.5 Flash 8B',
|
||||
model: new ChatGoogleGenerativeAI({
|
||||
modelName: 'gemini-1.5-flash-8b',
|
||||
temperature: 0.7,
|
||||
apiKey: geminiApiKey,
|
||||
}),
|
||||
},
|
||||
'gemini-1.5-pro': {
|
||||
displayName: 'Gemini 1.5 Pro',
|
||||
model: new ChatGoogleGenerativeAI({
|
||||
modelName: 'gemini-1.5-pro',
|
||||
temperature: 0.7,
|
||||
apiKey: geminiApiKey,
|
||||
}),
|
||||
},
|
||||
'gemini-2.0-flash-exp': {
|
||||
displayName: 'Gemini 2.0 Flash Exp',
|
||||
model: new ChatGoogleGenerativeAI({
|
||||
modelName: 'gemini-2.0-flash-exp',
|
||||
temperature: 0.7,
|
||||
apiKey: geminiApiKey,
|
||||
}),
|
||||
},
|
||||
'gemini-2.0-flash-thinking-exp-01-21': {
|
||||
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
|
||||
model: new ChatGoogleGenerativeAI({
|
||||
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
|
||||
temperature: 0.7,
|
||||
apiKey: geminiApiKey,
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Gemini models: ${err}`);
|
||||
logger.error(`Error loading Gemini models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadGeminiEmbeddingModels = async () => {
|
||||
export const loadGeminiEmbeddingsModels = async () => {
|
||||
const geminiApiKey = getGeminiApiKey();
|
||||
|
||||
if (!geminiApiKey) return {};
|
||||
|
||||
try {
|
||||
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
geminiEmbeddingModels.forEach((model) => {
|
||||
embeddingModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: geminiApiKey,
|
||||
modelName: model.key,
|
||||
configuration: {
|
||||
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
|
||||
},
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
});
|
||||
const embeddingModels = {
|
||||
'text-embedding-004': {
|
||||
displayName: 'Text Embedding',
|
||||
model: new GoogleGenerativeAIEmbeddings({
|
||||
apiKey: geminiApiKey,
|
||||
modelName: 'text-embedding-004',
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading OpenAI embeddings models: ${err}`);
|
||||
logger.error(`Error loading Gemini embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -1,78 +1,6 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { getGroqApiKey } from '../config';
|
||||
import { ChatModel } from '.';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const groqChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Gemma2 9B IT',
|
||||
key: 'gemma2-9b-it',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama 3.3 70B Versatile',
|
||||
key: 'llama-3.3-70b-versatile',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama 3.1 8B Instant',
|
||||
key: 'llama-3.1-8b-instant',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama3 70B 8192',
|
||||
key: 'llama3-70b-8192',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama3 8B 8192',
|
||||
key: 'llama3-8b-8192',
|
||||
},
|
||||
{
|
||||
displayName: 'Mixtral 8x7B 32768',
|
||||
key: 'mixtral-8x7b-32768',
|
||||
},
|
||||
{
|
||||
displayName: 'Qwen QWQ 32B (Preview)',
|
||||
key: 'qwen-qwq-32b',
|
||||
},
|
||||
{
|
||||
displayName: 'Mistral Saba 24B (Preview)',
|
||||
key: 'mistral-saba-24b',
|
||||
},
|
||||
{
|
||||
displayName: 'Qwen 2.5 Coder 32B (Preview)',
|
||||
key: 'qwen-2.5-coder-32b',
|
||||
},
|
||||
{
|
||||
displayName: 'Qwen 2.5 32B (Preview)',
|
||||
key: 'qwen-2.5-32b',
|
||||
},
|
||||
{
|
||||
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
|
||||
key: 'deepseek-r1-distill-qwen-32b',
|
||||
},
|
||||
{
|
||||
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
|
||||
key: 'deepseek-r1-distill-llama-70b',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama 3.3 70B SpecDec (Preview)',
|
||||
key: 'llama-3.3-70b-specdec',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama 3.2 1B Preview (Preview)',
|
||||
key: 'llama-3.2-1b-preview',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama 3.2 3B Preview (Preview)',
|
||||
key: 'llama-3.2-3b-preview',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
|
||||
key: 'llama-3.2-11b-vision-preview',
|
||||
},
|
||||
{
|
||||
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
|
||||
key: 'llama-3.2-90b-vision-preview',
|
||||
},
|
||||
];
|
||||
import { getGroqApiKey } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
|
||||
export const loadGroqChatModels = async () => {
|
||||
const groqApiKey = getGroqApiKey();
|
||||
@ -80,25 +8,129 @@ export const loadGroqChatModels = async () => {
|
||||
if (!groqApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
groqChatModels.forEach((model) => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: model.key,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
const chatModels = {
|
||||
'llama-3.3-70b-versatile': {
|
||||
displayName: 'Llama 3.3 70B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.3-70b-versatile',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
),
|
||||
},
|
||||
'llama-3.2-3b-preview': {
|
||||
displayName: 'Llama 3.2 3B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.2-3b-preview',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama-3.2-11b-vision-preview': {
|
||||
displayName: 'Llama 3.2 11B Vision',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.2-11b-vision-preview',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama-3.2-90b-vision-preview': {
|
||||
displayName: 'Llama 3.2 90B Vision',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.2-90b-vision-preview',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama-3.1-8b-instant': {
|
||||
displayName: 'Llama 3.1 8B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama-3.1-8b-instant',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama3-8b-8192': {
|
||||
displayName: 'LLaMA3 8B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama3-8b-8192',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'llama3-70b-8192': {
|
||||
displayName: 'LLaMA3 70B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'llama3-70b-8192',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'mixtral-8x7b-32768': {
|
||||
displayName: 'Mixtral 8x7B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'mixtral-8x7b-32768',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
'gemma2-9b-it': {
|
||||
displayName: 'Gemma2 9B',
|
||||
model: new ChatOpenAI(
|
||||
{
|
||||
openAIApiKey: groqApiKey,
|
||||
modelName: 'gemma2-9b-it',
|
||||
temperature: 0.7,
|
||||
},
|
||||
{
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
),
|
||||
},
|
||||
};
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Groq models: ${err}`);
|
||||
logger.error(`Error loading Groq models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -1,51 +1,33 @@
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
|
||||
import { loadGroqChatModels } from './groq';
|
||||
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
|
||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
||||
import { loadAnthropicChatModels } from './anthropic';
|
||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
} from '../../config';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
|
||||
import { loadGroqChatModels } from './groq';
|
||||
import { loadAnthropicChatModels } from './anthropic';
|
||||
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
|
||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||
|
||||
export interface ChatModel {
|
||||
displayName: string;
|
||||
model: BaseChatModel;
|
||||
}
|
||||
|
||||
export interface EmbeddingModel {
|
||||
displayName: string;
|
||||
model: Embeddings;
|
||||
}
|
||||
|
||||
export const chatModelProviders: Record<
|
||||
string,
|
||||
() => Promise<Record<string, ChatModel>>
|
||||
> = {
|
||||
const chatModelProviders = {
|
||||
openai: loadOpenAIChatModels,
|
||||
ollama: loadOllamaChatModels,
|
||||
groq: loadGroqChatModels,
|
||||
ollama: loadOllamaChatModels,
|
||||
anthropic: loadAnthropicChatModels,
|
||||
gemini: loadGeminiChatModels,
|
||||
};
|
||||
|
||||
export const embeddingModelProviders: Record<
|
||||
string,
|
||||
() => Promise<Record<string, EmbeddingModel>>
|
||||
> = {
|
||||
openai: loadOpenAIEmbeddingModels,
|
||||
ollama: loadOllamaEmbeddingModels,
|
||||
gemini: loadGeminiEmbeddingModels,
|
||||
transformers: loadTransformersEmbeddingsModels,
|
||||
const embeddingModelProviders = {
|
||||
openai: loadOpenAIEmbeddingsModels,
|
||||
local: loadTransformersEmbeddingsModels,
|
||||
ollama: loadOllamaEmbeddingsModels,
|
||||
gemini: loadGeminiEmbeddingsModels,
|
||||
};
|
||||
|
||||
export const getAvailableChatModelProviders = async () => {
|
||||
const models: Record<string, Record<string, ChatModel>> = {};
|
||||
const models = {};
|
||||
|
||||
for (const provider in chatModelProviders) {
|
||||
const providerModels = await chatModelProviders[provider]();
|
||||
@ -70,7 +52,7 @@ export const getAvailableChatModelProviders = async () => {
|
||||
configuration: {
|
||||
baseURL: customOpenAiApiUrl,
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
}),
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
@ -80,7 +62,7 @@ export const getAvailableChatModelProviders = async () => {
|
||||
};
|
||||
|
||||
export const getAvailableEmbeddingModelProviders = async () => {
|
||||
const models: Record<string, Record<string, EmbeddingModel>> = {};
|
||||
const models = {};
|
||||
|
||||
for (const provider in embeddingModelProviders) {
|
||||
const providerModels = await embeddingModelProviders[provider]();
|
||||
|
@ -1,73 +1,74 @@
|
||||
import axios from 'axios';
|
||||
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import axios from 'axios';
|
||||
|
||||
export const loadOllamaChatModels = async () => {
|
||||
const ollamaApiEndpoint = getOllamaApiEndpoint();
|
||||
const ollamaEndpoint = getOllamaApiEndpoint();
|
||||
const keepAlive = getKeepAlive();
|
||||
|
||||
if (!ollamaApiEndpoint) return {};
|
||||
if (!ollamaEndpoint) return {};
|
||||
|
||||
try {
|
||||
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
|
||||
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const { models } = res.data;
|
||||
const { models: ollamaModels } = response.data;
|
||||
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
models.forEach((model: any) => {
|
||||
chatModels[model.model] = {
|
||||
const chatModels = ollamaModels.reduce((acc, model) => {
|
||||
acc[model.model] = {
|
||||
displayName: model.name,
|
||||
model: new ChatOllama({
|
||||
baseUrl: ollamaApiEndpoint,
|
||||
baseUrl: ollamaEndpoint,
|
||||
model: model.model,
|
||||
temperature: 0.7,
|
||||
keepAlive: getKeepAlive(),
|
||||
keepAlive: keepAlive,
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Ollama models: ${err}`);
|
||||
logger.error(`Error loading Ollama models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadOllamaEmbeddingModels = async () => {
|
||||
const ollamaApiEndpoint = getOllamaApiEndpoint();
|
||||
export const loadOllamaEmbeddingsModels = async () => {
|
||||
const ollamaEndpoint = getOllamaApiEndpoint();
|
||||
|
||||
if (!ollamaApiEndpoint) return {};
|
||||
if (!ollamaEndpoint) return {};
|
||||
|
||||
try {
|
||||
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
|
||||
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const { models } = res.data;
|
||||
const { models: ollamaModels } = response.data;
|
||||
|
||||
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
models.forEach((model: any) => {
|
||||
embeddingModels[model.model] = {
|
||||
const embeddingsModels = ollamaModels.reduce((acc, model) => {
|
||||
acc[model.model] = {
|
||||
displayName: model.name,
|
||||
model: new OllamaEmbeddings({
|
||||
baseUrl: ollamaApiEndpoint,
|
||||
baseUrl: ollamaEndpoint,
|
||||
model: model.model,
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
return embeddingModels;
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return embeddingsModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Ollama embeddings models: ${err}`);
|
||||
logger.error(`Error loading Ollama embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -1,90 +1,89 @@
|
||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getOpenaiApiKey } from '../config';
|
||||
import { ChatModel, EmbeddingModel } from '.';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
|
||||
const openaiChatModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo',
|
||||
key: 'gpt-3.5-turbo',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4',
|
||||
key: 'gpt-4',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4 turbo',
|
||||
key: 'gpt-4-turbo',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4 omni',
|
||||
key: 'gpt-4o',
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-4 omni mini',
|
||||
key: 'gpt-4o-mini',
|
||||
},
|
||||
];
|
||||
|
||||
const openaiEmbeddingModels: Record<string, string>[] = [
|
||||
{
|
||||
displayName: 'Text Embedding 3 Small',
|
||||
key: 'text-embedding-3-small',
|
||||
},
|
||||
{
|
||||
displayName: 'Text Embedding 3 Large',
|
||||
key: 'text-embedding-3-large',
|
||||
},
|
||||
];
|
||||
import { getOpenaiApiKey } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
|
||||
export const loadOpenAIChatModels = async () => {
|
||||
const openaiApiKey = getOpenaiApiKey();
|
||||
const openAIApiKey = getOpenaiApiKey();
|
||||
|
||||
if (!openaiApiKey) return {};
|
||||
if (!openAIApiKey) return {};
|
||||
|
||||
try {
|
||||
const chatModels: Record<string, ChatModel> = {};
|
||||
|
||||
openaiChatModels.forEach((model) => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
const chatModels = {
|
||||
'gpt-3.5-turbo': {
|
||||
displayName: 'GPT-3.5 Turbo',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: openaiApiKey,
|
||||
modelName: model.key,
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-3.5-turbo',
|
||||
temperature: 0.7,
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
}),
|
||||
},
|
||||
'gpt-4': {
|
||||
displayName: 'GPT-4',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-4',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
'gpt-4-turbo': {
|
||||
displayName: 'GPT-4 turbo',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-4-turbo',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
'gpt-4o': {
|
||||
displayName: 'GPT-4 omni',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-4o',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
'gpt-4o-mini': {
|
||||
displayName: 'GPT-4 omni mini',
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey,
|
||||
modelName: 'gpt-4o-mini',
|
||||
temperature: 0.7,
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading OpenAI models: ${err}`);
|
||||
logger.error(`Error loading OpenAI models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadOpenAIEmbeddingModels = async () => {
|
||||
const openaiApiKey = getOpenaiApiKey();
|
||||
export const loadOpenAIEmbeddingsModels = async () => {
|
||||
const openAIApiKey = getOpenaiApiKey();
|
||||
|
||||
if (!openaiApiKey) return {};
|
||||
if (!openAIApiKey) return {};
|
||||
|
||||
try {
|
||||
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||
|
||||
openaiEmbeddingModels.forEach((model) => {
|
||||
embeddingModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
const embeddingModels = {
|
||||
'text-embedding-3-small': {
|
||||
displayName: 'Text Embedding 3 Small',
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: openaiApiKey,
|
||||
modelName: model.key,
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
});
|
||||
openAIApiKey,
|
||||
modelName: 'text-embedding-3-small',
|
||||
}),
|
||||
},
|
||||
'text-embedding-3-large': {
|
||||
displayName: 'Text Embedding 3 Large',
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey,
|
||||
modelName: 'text-embedding-3-large',
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading OpenAI embeddings models: ${err}`);
|
||||
logger.error(`Error loading OpenAI embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -1,3 +1,4 @@
|
||||
import logger from '../../utils/logger';
|
||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
|
||||
|
||||
export const loadTransformersEmbeddingsModels = async () => {
|
||||
@ -25,7 +26,7 @@ export const loadTransformersEmbeddingsModels = async () => {
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading Transformers embeddings model: ${err}`);
|
||||
logger.error(`Error loading Transformers embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -1,59 +0,0 @@
|
||||
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
|
||||
import prompts from '../prompts';
|
||||
|
||||
export const searchHandlers: Record<string, MetaSearchAgent> = {
|
||||
webSearch: new MetaSearchAgent({
|
||||
activeEngines: [],
|
||||
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.webSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: true,
|
||||
}),
|
||||
academicSearch: new MetaSearchAgent({
|
||||
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
|
||||
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.academicSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
writingAssistant: new MetaSearchAgent({
|
||||
activeEngines: [],
|
||||
queryGeneratorPrompt: '',
|
||||
responsePrompt: prompts.writingAssistantPrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: false,
|
||||
summarizer: false,
|
||||
}),
|
||||
wolframAlphaSearch: new MetaSearchAgent({
|
||||
activeEngines: ['wolframalpha'],
|
||||
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
|
||||
rerank: false,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
youtubeSearch: new MetaSearchAgent({
|
||||
activeEngines: ['youtube'],
|
||||
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.youtubeSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
redditSearch: new MetaSearchAgent({
|
||||
activeEngines: ['reddit'],
|
||||
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.redditSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
};
|
@ -1,5 +1,5 @@
|
||||
import axios from 'axios';
|
||||
import { getSearxngApiEndpoint } from './config';
|
||||
import { getSearxngApiEndpoint } from '../config';
|
||||
|
||||
interface SearxngSearchOptions {
|
||||
categories?: string[];
|
||||
@ -30,12 +30,11 @@ export const searchSearxng = async (
|
||||
|
||||
if (opts) {
|
||||
Object.keys(opts).forEach((key) => {
|
||||
const value = opts[key as keyof SearxngSearchOptions];
|
||||
if (Array.isArray(value)) {
|
||||
url.searchParams.append(key, value.join(','));
|
||||
if (Array.isArray(opts[key])) {
|
||||
url.searchParams.append(key, opts[key].join(','));
|
||||
return;
|
||||
}
|
||||
url.searchParams.append(key, value as string);
|
||||
url.searchParams.append(key, opts[key]);
|
||||
});
|
||||
}
|
||||
|
||||
|
5
src/lib/types/compute-dot.d.ts
vendored
5
src/lib/types/compute-dot.d.ts
vendored
@ -1,5 +0,0 @@
|
||||
declare function computeDot(vectorA: number[], vectorB: number[]): number;
|
||||
|
||||
declare module 'compute-dot' {
|
||||
export default computeDot;
|
||||
}
|
66
src/routes/chats.ts
Normal file
66
src/routes/chats.ts
Normal file
@ -0,0 +1,66 @@
|
||||
import express from 'express';
|
||||
import logger from '../utils/logger';
|
||||
import db from '../db/index';
|
||||
import { eq } from 'drizzle-orm';
|
||||
import { chats, messages } from '../db/schema';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', async (_, res) => {
|
||||
try {
|
||||
let chats = await db.query.chats.findMany();
|
||||
|
||||
chats = chats.reverse();
|
||||
|
||||
return res.status(200).json({ chats: chats });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in getting chats: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
router.get('/:id', async (req, res) => {
|
||||
try {
|
||||
const chatExists = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, req.params.id),
|
||||
});
|
||||
|
||||
if (!chatExists) {
|
||||
return res.status(404).json({ message: 'Chat not found' });
|
||||
}
|
||||
|
||||
const chatMessages = await db.query.messages.findMany({
|
||||
where: eq(messages.chatId, req.params.id),
|
||||
});
|
||||
|
||||
return res.status(200).json({ chat: chatExists, messages: chatMessages });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in getting chat: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete(`/:id`, async (req, res) => {
|
||||
try {
|
||||
const chatExists = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, req.params.id),
|
||||
});
|
||||
|
||||
if (!chatExists) {
|
||||
return res.status(404).json({ message: 'Chat not found' });
|
||||
}
|
||||
|
||||
await db.delete(chats).where(eq(chats.id, req.params.id)).execute();
|
||||
await db
|
||||
.delete(messages)
|
||||
.where(eq(messages.chatId, req.params.id))
|
||||
.execute();
|
||||
|
||||
return res.status(200).json({ message: 'Chat deleted successfully' });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in deleting chat: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
@ -1,22 +1,26 @@
|
||||
import {
|
||||
getAnthropicApiKey,
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
getGeminiApiKey,
|
||||
getGroqApiKey,
|
||||
getOllamaApiEndpoint,
|
||||
getOpenaiApiKey,
|
||||
updateConfig,
|
||||
} from '@/lib/config';
|
||||
import express from 'express';
|
||||
import {
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '@/lib/providers';
|
||||
} from '../lib/providers';
|
||||
import {
|
||||
getGroqApiKey,
|
||||
getOllamaApiEndpoint,
|
||||
getAnthropicApiKey,
|
||||
getGeminiApiKey,
|
||||
getOpenaiApiKey,
|
||||
updateConfig,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import logger from '../utils/logger';
|
||||
|
||||
export const GET = async (req: Request) => {
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', async (_, res) => {
|
||||
try {
|
||||
const config: Record<string, any> = {};
|
||||
const config = {};
|
||||
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
@ -57,53 +61,44 @@ export const GET = async (req: Request) => {
|
||||
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
|
||||
config['customOpenaiModelName'] = getCustomOpenaiModelName();
|
||||
|
||||
return Response.json({ ...config }, { status: 200 });
|
||||
} catch (err) {
|
||||
console.error('An error ocurred while getting config:', err);
|
||||
return Response.json(
|
||||
{ message: 'An error ocurred while getting config' },
|
||||
{ status: 500 },
|
||||
);
|
||||
res.status(200).json(config);
|
||||
} catch (err: any) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error getting config: ${err.message}`);
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
export const POST = async (req: Request) => {
|
||||
try {
|
||||
const config = await req.json();
|
||||
router.post('/', async (req, res) => {
|
||||
const config = req.body;
|
||||
|
||||
const updatedConfig = {
|
||||
MODELS: {
|
||||
OPENAI: {
|
||||
API_KEY: config.openaiApiKey,
|
||||
},
|
||||
GROQ: {
|
||||
API_KEY: config.groqApiKey,
|
||||
},
|
||||
ANTHROPIC: {
|
||||
API_KEY: config.anthropicApiKey,
|
||||
},
|
||||
GEMINI: {
|
||||
API_KEY: config.geminiApiKey,
|
||||
},
|
||||
OLLAMA: {
|
||||
API_URL: config.ollamaApiUrl,
|
||||
},
|
||||
CUSTOM_OPENAI: {
|
||||
API_URL: config.customOpenaiApiUrl,
|
||||
API_KEY: config.customOpenaiApiKey,
|
||||
MODEL_NAME: config.customOpenaiModelName,
|
||||
},
|
||||
const updatedConfig = {
|
||||
MODELS: {
|
||||
OPENAI: {
|
||||
API_KEY: config.openaiApiKey,
|
||||
},
|
||||
};
|
||||
GROQ: {
|
||||
API_KEY: config.groqApiKey,
|
||||
},
|
||||
ANTHROPIC: {
|
||||
API_KEY: config.anthropicApiKey,
|
||||
},
|
||||
GEMINI: {
|
||||
API_KEY: config.geminiApiKey,
|
||||
},
|
||||
OLLAMA: {
|
||||
API_URL: config.ollamaApiUrl,
|
||||
},
|
||||
CUSTOM_OPENAI: {
|
||||
API_URL: config.customOpenaiApiUrl,
|
||||
API_KEY: config.customOpenaiApiKey,
|
||||
MODEL_NAME: config.customOpenaiModelName,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
updateConfig(updatedConfig);
|
||||
updateConfig(updatedConfig);
|
||||
|
||||
return Response.json({ message: 'Config updated' }, { status: 200 });
|
||||
} catch (err) {
|
||||
console.error('An error ocurred while updating config:', err);
|
||||
return Response.json(
|
||||
{ message: 'An error ocurred while updating config' },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
};
|
||||
res.status(200).json({ message: 'Config updated' });
|
||||
});
|
||||
|
||||
export default router;
|
48
src/routes/discover.ts
Normal file
48
src/routes/discover.ts
Normal file
@ -0,0 +1,48 @@
|
||||
import express from 'express';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import logger from '../utils/logger';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', async (req, res) => {
|
||||
try {
|
||||
const data = (
|
||||
await Promise.all([
|
||||
searchSearxng('site:businessinsider.com AI', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:www.exchangewire.com AI', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:yahoo.com AI', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:businessinsider.com tech', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:www.exchangewire.com tech', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
searchSearxng('site:yahoo.com tech', {
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
}),
|
||||
])
|
||||
)
|
||||
.map((result) => result.results)
|
||||
.flat()
|
||||
.sort(() => Math.random() - 0.5);
|
||||
|
||||
return res.json({ blogs: data });
|
||||
} catch (err: any) {
|
||||
logger.error(`Error in discover route: ${err.message}`);
|
||||
return res.status(500).json({ message: 'An error has occurred' });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
82
src/routes/images.ts
Normal file
82
src/routes/images.ts
Normal file
@ -0,0 +1,82 @@
|
||||
import express from 'express';
|
||||
import handleImageSearch from '../chains/imageSearchAgent';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { getAvailableChatModelProviders } from '../lib/providers';
|
||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||
import logger from '../utils/logger';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface ImageSearchBody {
|
||||
query: string;
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
let body: ImageSearchBody = req.body;
|
||||
|
||||
const chatHistory = body.chatHistory.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
});
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
body.chatModel?.model ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: getCustomOpenaiApiUrl(),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return res.status(400).json({ message: 'Invalid model selected' });
|
||||
}
|
||||
|
||||
const images = await handleImageSearch(
|
||||
{ query: body.query, chat_history: chatHistory },
|
||||
llm,
|
||||
);
|
||||
|
||||
res.status(200).json({ images });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in image search: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
24
src/routes/index.ts
Normal file
24
src/routes/index.ts
Normal file
@ -0,0 +1,24 @@
|
||||
import express from 'express';
|
||||
import imagesRouter from './images';
|
||||
import videosRouter from './videos';
|
||||
import configRouter from './config';
|
||||
import modelsRouter from './models';
|
||||
import suggestionsRouter from './suggestions';
|
||||
import chatsRouter from './chats';
|
||||
import searchRouter from './search';
|
||||
import discoverRouter from './discover';
|
||||
import uploadsRouter from './uploads';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.use('/images', imagesRouter);
|
||||
router.use('/videos', videosRouter);
|
||||
router.use('/config', configRouter);
|
||||
router.use('/models', modelsRouter);
|
||||
router.use('/suggestions', suggestionsRouter);
|
||||
router.use('/chats', chatsRouter);
|
||||
router.use('/search', searchRouter);
|
||||
router.use('/discover', discoverRouter);
|
||||
router.use('/uploads', uploadsRouter);
|
||||
|
||||
export default router;
|
36
src/routes/models.ts
Normal file
36
src/routes/models.ts
Normal file
@ -0,0 +1,36 @@
|
||||
import express from 'express';
|
||||
import logger from '../utils/logger';
|
||||
import {
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '../lib/providers';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/', async (req, res) => {
|
||||
try {
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
Object.keys(chatModelProviders).forEach((provider) => {
|
||||
Object.keys(chatModelProviders[provider]).forEach((model) => {
|
||||
delete chatModelProviders[provider][model].model;
|
||||
});
|
||||
});
|
||||
|
||||
Object.keys(embeddingModelProviders).forEach((provider) => {
|
||||
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
|
||||
delete embeddingModelProviders[provider][model].model;
|
||||
});
|
||||
});
|
||||
|
||||
res.status(200).json({ chatModelProviders, embeddingModelProviders });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(err.message);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
@ -1,29 +1,33 @@
|
||||
import express from 'express';
|
||||
import logger from '../utils/logger';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { Embeddings } from '@langchain/core/embeddings';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import {
|
||||
getAvailableChatModelProviders,
|
||||
getAvailableEmbeddingModelProviders,
|
||||
} from '@/lib/providers';
|
||||
} from '../lib/providers';
|
||||
import { searchHandlers } from '../websocket/messageHandler';
|
||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import { MetaSearchAgentType } from '@/lib/search/metaSearchAgent';
|
||||
import { MetaSearchAgentType } from '../search/metaSearchAgent';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '@/lib/config';
|
||||
import { searchHandlers } from '@/lib/search';
|
||||
} from '../config';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface chatModel {
|
||||
provider: string;
|
||||
name: string;
|
||||
model: string;
|
||||
customOpenAIKey?: string;
|
||||
customOpenAIBaseURL?: string;
|
||||
}
|
||||
|
||||
interface embeddingModel {
|
||||
provider: string;
|
||||
name: string;
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface ChatRequestBody {
|
||||
@ -35,24 +39,27 @@ interface ChatRequestBody {
|
||||
history: Array<[string, string]>;
|
||||
}
|
||||
|
||||
export const POST = async (req: Request) => {
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
const body: ChatRequestBody = await req.json();
|
||||
const body: ChatRequestBody = req.body;
|
||||
|
||||
if (!body.focusMode || !body.query) {
|
||||
return Response.json(
|
||||
{ message: 'Missing focus mode or query' },
|
||||
{ status: 400 },
|
||||
);
|
||||
return res.status(400).json({ message: 'Missing focus mode or query' });
|
||||
}
|
||||
|
||||
body.history = body.history || [];
|
||||
body.optimizationMode = body.optimizationMode || 'balanced';
|
||||
|
||||
const history: BaseMessage[] = body.history.map((msg) => {
|
||||
return msg[0] === 'human'
|
||||
? new HumanMessage({ content: msg[1] })
|
||||
: new AIMessage({ content: msg[1] });
|
||||
if (msg[0] === 'human') {
|
||||
return new HumanMessage({
|
||||
content: msg[1],
|
||||
});
|
||||
} else {
|
||||
return new AIMessage({
|
||||
content: msg[1],
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
@ -63,13 +70,13 @@ export const POST = async (req: Request) => {
|
||||
const chatModelProvider =
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
body.chatModel?.name ||
|
||||
body.chatModel?.model ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
const embeddingModelProvider =
|
||||
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
|
||||
const embeddingModel =
|
||||
body.embeddingModel?.name ||
|
||||
body.embeddingModel?.model ||
|
||||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
@ -77,7 +84,7 @@ export const POST = async (req: Request) => {
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
|
||||
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
|
||||
openAIApiKey:
|
||||
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
||||
temperature: 0.7,
|
||||
@ -104,16 +111,13 @@ export const POST = async (req: Request) => {
|
||||
}
|
||||
|
||||
if (!llm || !embeddings) {
|
||||
return Response.json(
|
||||
{ message: 'Invalid model selected' },
|
||||
{ status: 400 },
|
||||
);
|
||||
return res.status(400).json({ message: 'Invalid model selected' });
|
||||
}
|
||||
|
||||
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
|
||||
|
||||
if (!searchHandler) {
|
||||
return Response.json({ message: 'Invalid focus mode' }, { status: 400 });
|
||||
return res.status(400).json({ message: 'Invalid focus mode' });
|
||||
}
|
||||
|
||||
const emitter = await searchHandler.searchAndAnswer(
|
||||
@ -125,45 +129,30 @@ export const POST = async (req: Request) => {
|
||||
[],
|
||||
);
|
||||
|
||||
return new Promise(
|
||||
(
|
||||
resolve: (value: Response) => void,
|
||||
reject: (value: Response) => void,
|
||||
) => {
|
||||
let message = '';
|
||||
let sources: any[] = [];
|
||||
let message = '';
|
||||
let sources = [];
|
||||
|
||||
emitter.on('data', (data) => {
|
||||
try {
|
||||
const parsedData = JSON.parse(data);
|
||||
if (parsedData.type === 'response') {
|
||||
message += parsedData.data;
|
||||
} else if (parsedData.type === 'sources') {
|
||||
sources = parsedData.data;
|
||||
}
|
||||
} catch (error) {
|
||||
reject(
|
||||
Response.json({ message: 'Error parsing data' }, { status: 500 }),
|
||||
);
|
||||
}
|
||||
});
|
||||
emitter.on('data', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
if (parsedData.type === 'response') {
|
||||
message += parsedData.data;
|
||||
} else if (parsedData.type === 'sources') {
|
||||
sources = parsedData.data;
|
||||
}
|
||||
});
|
||||
|
||||
emitter.on('end', () => {
|
||||
resolve(Response.json({ message, sources }, { status: 200 }));
|
||||
});
|
||||
emitter.on('end', () => {
|
||||
res.status(200).json({ message, sources });
|
||||
});
|
||||
|
||||
emitter.on('error', (error) => {
|
||||
reject(
|
||||
Response.json({ message: 'Search error', error }, { status: 500 }),
|
||||
);
|
||||
});
|
||||
},
|
||||
);
|
||||
emitter.on('error', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
res.status(500).json({ message: parsedData.data });
|
||||
});
|
||||
} catch (err: any) {
|
||||
console.error(`Error in getting search results: ${err.message}`);
|
||||
return Response.json(
|
||||
{ message: 'An error has occurred.' },
|
||||
{ status: 500 },
|
||||
);
|
||||
logger.error(`Error in getting search results: ${err.message}`);
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
export default router;
|
81
src/routes/suggestions.ts
Normal file
81
src/routes/suggestions.ts
Normal file
@ -0,0 +1,81 @@
|
||||
import express from 'express';
|
||||
import generateSuggestions from '../chains/suggestionGeneratorAgent';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { getAvailableChatModelProviders } from '../lib/providers';
|
||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||
import logger from '../utils/logger';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface SuggestionsBody {
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
let body: SuggestionsBody = req.body;
|
||||
|
||||
const chatHistory = body.chatHistory.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
});
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
body.chatModel?.model ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: getCustomOpenaiApiUrl(),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return res.status(400).json({ message: 'Invalid model selected' });
|
||||
}
|
||||
|
||||
const suggestions = await generateSuggestions(
|
||||
{ chat_history: chatHistory },
|
||||
llm,
|
||||
);
|
||||
|
||||
res.status(200).json({ suggestions: suggestions });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in generating suggestions: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
151
src/routes/uploads.ts
Normal file
151
src/routes/uploads.ts
Normal file
@ -0,0 +1,151 @@
|
||||
import express from 'express';
|
||||
import logger from '../utils/logger';
|
||||
import multer from 'multer';
|
||||
import path from 'path';
|
||||
import crypto from 'crypto';
|
||||
import fs from 'fs';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
import { getAvailableEmbeddingModelProviders } from '../lib/providers';
|
||||
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
|
||||
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
|
||||
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
|
||||
import { Document } from 'langchain/document';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const splitter = new RecursiveCharacterTextSplitter({
|
||||
chunkSize: 500,
|
||||
chunkOverlap: 100,
|
||||
});
|
||||
|
||||
const storage = multer.diskStorage({
|
||||
destination: (req, file, cb) => {
|
||||
cb(null, path.join(process.cwd(), './uploads'));
|
||||
},
|
||||
filename: (req, file, cb) => {
|
||||
const splitedFileName = file.originalname.split('.');
|
||||
const fileExtension = splitedFileName[splitedFileName.length - 1];
|
||||
if (!['pdf', 'docx', 'txt'].includes(fileExtension)) {
|
||||
return cb(new Error('File type is not supported'), '');
|
||||
}
|
||||
cb(null, `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`);
|
||||
},
|
||||
});
|
||||
|
||||
const upload = multer({ storage });
|
||||
|
||||
router.post(
|
||||
'/',
|
||||
upload.fields([
|
||||
{ name: 'files' },
|
||||
{ name: 'embedding_model', maxCount: 1 },
|
||||
{ name: 'embedding_model_provider', maxCount: 1 },
|
||||
]),
|
||||
async (req, res) => {
|
||||
try {
|
||||
const { embedding_model, embedding_model_provider } = req.body;
|
||||
|
||||
if (!embedding_model || !embedding_model_provider) {
|
||||
res
|
||||
.status(400)
|
||||
.json({ message: 'Missing embedding model or provider' });
|
||||
return;
|
||||
}
|
||||
|
||||
const embeddingModels = await getAvailableEmbeddingModelProviders();
|
||||
const provider =
|
||||
embedding_model_provider ?? Object.keys(embeddingModels)[0];
|
||||
const embeddingModel: Embeddings =
|
||||
embedding_model ?? Object.keys(embeddingModels[provider])[0];
|
||||
|
||||
let embeddingsModel: Embeddings | undefined;
|
||||
|
||||
if (
|
||||
embeddingModels[provider] &&
|
||||
embeddingModels[provider][embeddingModel]
|
||||
) {
|
||||
embeddingsModel = embeddingModels[provider][embeddingModel].model as
|
||||
| Embeddings
|
||||
| undefined;
|
||||
}
|
||||
|
||||
if (!embeddingsModel) {
|
||||
res.status(400).json({ message: 'Invalid LLM model selected' });
|
||||
return;
|
||||
}
|
||||
|
||||
const files = req.files['files'] as Express.Multer.File[];
|
||||
if (!files || files.length === 0) {
|
||||
res.status(400).json({ message: 'No files uploaded' });
|
||||
return;
|
||||
}
|
||||
|
||||
await Promise.all(
|
||||
files.map(async (file) => {
|
||||
let docs: Document[] = [];
|
||||
|
||||
if (file.mimetype === 'application/pdf') {
|
||||
const loader = new PDFLoader(file.path);
|
||||
docs = await loader.load();
|
||||
} else if (
|
||||
file.mimetype ===
|
||||
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
|
||||
) {
|
||||
const loader = new DocxLoader(file.path);
|
||||
docs = await loader.load();
|
||||
} else if (file.mimetype === 'text/plain') {
|
||||
const text = fs.readFileSync(file.path, 'utf-8');
|
||||
docs = [
|
||||
new Document({
|
||||
pageContent: text,
|
||||
metadata: {
|
||||
title: file.originalname,
|
||||
},
|
||||
}),
|
||||
];
|
||||
}
|
||||
|
||||
const splitted = await splitter.splitDocuments(docs);
|
||||
|
||||
const json = JSON.stringify({
|
||||
title: file.originalname,
|
||||
contents: splitted.map((doc) => doc.pageContent),
|
||||
});
|
||||
|
||||
const pathToSave = file.path.replace(/\.\w+$/, '-extracted.json');
|
||||
fs.writeFileSync(pathToSave, json);
|
||||
|
||||
const embeddings = await embeddingsModel.embedDocuments(
|
||||
splitted.map((doc) => doc.pageContent),
|
||||
);
|
||||
|
||||
const embeddingsJSON = JSON.stringify({
|
||||
title: file.originalname,
|
||||
embeddings: embeddings,
|
||||
});
|
||||
|
||||
const pathToSaveEmbeddings = file.path.replace(
|
||||
/\.\w+$/,
|
||||
'-embeddings.json',
|
||||
);
|
||||
fs.writeFileSync(pathToSaveEmbeddings, embeddingsJSON);
|
||||
}),
|
||||
);
|
||||
|
||||
res.status(200).json({
|
||||
files: files.map((file) => {
|
||||
return {
|
||||
fileName: file.originalname,
|
||||
fileExtension: file.filename.split('.').pop(),
|
||||
fileId: file.filename.replace(/\.\w+$/, ''),
|
||||
};
|
||||
}),
|
||||
});
|
||||
} catch (err: any) {
|
||||
logger.error(`Error in uploading file results: ${err.message}`);
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
export default router;
|
82
src/routes/videos.ts
Normal file
82
src/routes/videos.ts
Normal file
@ -0,0 +1,82 @@
|
||||
import express from 'express';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { getAvailableChatModelProviders } from '../lib/providers';
|
||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||
import logger from '../utils/logger';
|
||||
import handleVideoSearch from '../chains/videoSearchAgent';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface VideoSearchBody {
|
||||
query: string;
|
||||
chatHistory: any[];
|
||||
chatModel?: ChatModel;
|
||||
}
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
let body: VideoSearchBody = req.body;
|
||||
|
||||
const chatHistory = body.chatHistory.map((msg: any) => {
|
||||
if (msg.role === 'user') {
|
||||
return new HumanMessage(msg.content);
|
||||
} else if (msg.role === 'assistant') {
|
||||
return new AIMessage(msg.content);
|
||||
}
|
||||
});
|
||||
|
||||
const chatModelProviders = await getAvailableChatModelProviders();
|
||||
|
||||
const chatModelProvider =
|
||||
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
body.chatModel?.model ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: getCustomOpenaiApiUrl(),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
return res.status(400).json({ message: 'Invalid model selected' });
|
||||
}
|
||||
|
||||
const videos = await handleVideoSearch(
|
||||
{ chat_history: chatHistory, query: body.query },
|
||||
llm,
|
||||
);
|
||||
|
||||
res.status(200).json({ videos });
|
||||
} catch (err) {
|
||||
res.status(500).json({ message: 'An error has occurred.' });
|
||||
logger.error(`Error in video search: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
@ -13,17 +13,18 @@ import {
|
||||
} from '@langchain/core/runnables';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import LineListOutputParser from '../outputParsers/listLineOutputParser';
|
||||
import LineOutputParser from '../outputParsers/lineOutputParser';
|
||||
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
|
||||
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
|
||||
import { getDocumentsFromLinks } from '../utils/documents';
|
||||
import { Document } from 'langchain/document';
|
||||
import { searchSearxng } from '../searxng';
|
||||
import path from 'node:path';
|
||||
import fs from 'node:fs';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import eventEmitter from 'events';
|
||||
import { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import { IterableReadableStream } from '@langchain/core/utils/stream';
|
||||
|
||||
export interface MetaSearchAgentType {
|
||||
searchAndAnswer: (
|
||||
@ -89,7 +90,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
||||
question = 'summarize';
|
||||
}
|
||||
|
||||
let docs: Document[] = [];
|
||||
let docs = [];
|
||||
|
||||
const linkDocs = await getDocumentsFromLinks({ links });
|
||||
|
||||
@ -202,8 +203,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
||||
|
||||
return { query: question, docs: docs };
|
||||
} else {
|
||||
question = question.replace(/<think>.*?<\/think>/g, '');
|
||||
|
||||
const res = await searchSearxng(question, {
|
||||
language: 'en',
|
||||
engines: this.config.activeEngines,
|
||||
@ -312,7 +311,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
||||
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
|
||||
|
||||
const fileSimilaritySearchObject = content.contents.map(
|
||||
(c: string, i: number) => {
|
||||
(c: string, i) => {
|
||||
return {
|
||||
fileName: content.title,
|
||||
content: c,
|
||||
@ -415,8 +414,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
||||
|
||||
return sortedDocs;
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
private processDocs(docs: Document[]) {
|
||||
@ -429,7 +426,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
|
||||
}
|
||||
|
||||
private async handleStream(
|
||||
stream: AsyncGenerator<StreamEvent, any, any>,
|
||||
stream: IterableReadableStream<StreamEvent>,
|
||||
emitter: eventEmitter,
|
||||
) {
|
||||
for await (const event of stream) {
|
@ -6,7 +6,7 @@ const computeSimilarity = (x: number[], y: number[]): number => {
|
||||
const similarityMeasure = getSimilarityMeasure();
|
||||
|
||||
if (similarityMeasure === 'cosine') {
|
||||
return cosineSimilarity(x, y) as number;
|
||||
return cosineSimilarity(x, y);
|
||||
} else if (similarityMeasure === 'dot') {
|
||||
return dot(x, y);
|
||||
}
|
@ -1,21 +1,30 @@
|
||||
import axios from 'axios';
|
||||
import { htmlToText } from 'html-to-text';
|
||||
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
|
||||
import { RecursiveCharacterTextSplitter, MarkdownTextSplitter } from 'langchain/text_splitter';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import pdfParse from 'pdf-parse';
|
||||
import logger from './logger';
|
||||
import { getJinaApiKey } from '../config';
|
||||
|
||||
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
||||
const splitter = new RecursiveCharacterTextSplitter();
|
||||
|
||||
links = links.map(link => link.startsWith('http://') || link.startsWith('https://')
|
||||
? link
|
||||
: `https://${link}`)
|
||||
|
||||
if (getJinaApiKey()) {
|
||||
return await getDocumentsFromJinaReader({ links });
|
||||
}
|
||||
|
||||
return await getDocumentsFromLocal({ links });
|
||||
};
|
||||
|
||||
const getDocumentsFromLocal = async ({links}: {links: string[]}) => {
|
||||
const splitter = new RecursiveCharacterTextSplitter();
|
||||
let docs: Document[] = [];
|
||||
|
||||
await Promise.all(
|
||||
links.map(async (link) => {
|
||||
link =
|
||||
link.startsWith('http://') || link.startsWith('https://')
|
||||
? link
|
||||
: `https://${link}`;
|
||||
|
||||
try {
|
||||
const res = await axios.get(link, {
|
||||
responseType: 'arraybuffer',
|
||||
@ -78,13 +87,12 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
||||
|
||||
docs.push(...linkDocs);
|
||||
} catch (err) {
|
||||
console.error(
|
||||
'An error occurred while getting documents from links: ',
|
||||
err,
|
||||
logger.error(
|
||||
`Error at generating documents from links: ${err.message}`,
|
||||
);
|
||||
docs.push(
|
||||
new Document({
|
||||
pageContent: `Failed to retrieve content from the link: ${err}`,
|
||||
pageContent: `Failed to retrieve content from the link: ${err.message}`,
|
||||
metadata: {
|
||||
title: 'Failed to retrieve content',
|
||||
url: link,
|
||||
@ -94,6 +102,62 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
return docs;
|
||||
};
|
||||
|
||||
const getDocumentsFromJinaReader = async ({links}: { links: string[] }) => {
|
||||
const splitter = new MarkdownTextSplitter();
|
||||
let docs: Document[] = [];
|
||||
|
||||
await Promise.all(
|
||||
links.map(async link => {
|
||||
try {
|
||||
const res = await axios.get(`https://r.jina.ai/${link}`, {
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${getJinaApiKey()}`,
|
||||
}
|
||||
});
|
||||
|
||||
if(res.data.code === 200) {
|
||||
const data = res.data.data
|
||||
const splittedText = await splitter.splitText(data.content);
|
||||
const linkDocs = splittedText.map((text) => {
|
||||
return new Document({
|
||||
pageContent: text,
|
||||
metadata: {
|
||||
title: data.title,
|
||||
url: link,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
docs.push(...linkDocs);
|
||||
return;
|
||||
} else {
|
||||
docs.push(
|
||||
new Document({
|
||||
pageContent: `Failed to retrieve content from the link in the Jina reader API, code: ${res.data.code}`,
|
||||
metadata: {
|
||||
title: 'Failed to retrieve content',
|
||||
url: link,
|
||||
},
|
||||
}),
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
docs.push(
|
||||
new Document({
|
||||
pageContent: `Failed to retrieve content from the link: ${err.message}`,
|
||||
metadata: {
|
||||
title: 'Failed to retrieve content',
|
||||
url: link,
|
||||
},
|
||||
}),
|
||||
);
|
||||
}
|
||||
})
|
||||
);
|
||||
return docs;
|
||||
};
|
22
src/utils/logger.ts
Normal file
22
src/utils/logger.ts
Normal file
@ -0,0 +1,22 @@
|
||||
import winston from 'winston';
|
||||
|
||||
const logger = winston.createLogger({
|
||||
level: 'info',
|
||||
transports: [
|
||||
new winston.transports.Console({
|
||||
format: winston.format.combine(
|
||||
winston.format.colorize(),
|
||||
winston.format.simple(),
|
||||
),
|
||||
}),
|
||||
new winston.transports.File({
|
||||
filename: 'app.log',
|
||||
format: winston.format.combine(
|
||||
winston.format.timestamp(),
|
||||
winston.format.json(),
|
||||
),
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
export default logger;
|
122
src/websocket/connectionManager.ts
Normal file
122
src/websocket/connectionManager.ts
Normal file
@ -0,0 +1,122 @@
|
||||
import { WebSocket } from 'ws';
|
||||
import { handleMessage } from './messageHandler';
|
||||
import {
|
||||
getAvailableEmbeddingModelProviders,
|
||||
getAvailableChatModelProviders,
|
||||
} from '../lib/providers';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { Embeddings } from '@langchain/core/embeddings';
|
||||
import type { IncomingMessage } from 'http';
|
||||
import logger from '../utils/logger';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
|
||||
export const handleConnection = async (
|
||||
ws: WebSocket,
|
||||
request: IncomingMessage,
|
||||
) => {
|
||||
try {
|
||||
const searchParams = new URL(request.url, `http://${request.headers.host}`)
|
||||
.searchParams;
|
||||
|
||||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
|
||||
const chatModelProvider =
|
||||
searchParams.get('chatModelProvider') ||
|
||||
Object.keys(chatModelProviders)[0];
|
||||
const chatModel =
|
||||
searchParams.get('chatModel') ||
|
||||
Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
const embeddingModelProvider =
|
||||
searchParams.get('embeddingModelProvider') ||
|
||||
Object.keys(embeddingModelProviders)[0];
|
||||
const embeddingModel =
|
||||
searchParams.get('embeddingModel') ||
|
||||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
let embeddings: Embeddings | undefined;
|
||||
|
||||
if (
|
||||
chatModelProviders[chatModelProvider] &&
|
||||
chatModelProviders[chatModelProvider][chatModel] &&
|
||||
chatModelProvider != 'custom_openai'
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
} else if (chatModelProvider == 'custom_openai') {
|
||||
const customOpenaiApiKey = getCustomOpenaiApiKey();
|
||||
const customOpenaiApiUrl = getCustomOpenaiApiUrl();
|
||||
const customOpenaiModelName = getCustomOpenaiModelName();
|
||||
|
||||
if (customOpenaiApiKey && customOpenaiApiUrl && customOpenaiModelName) {
|
||||
llm = new ChatOpenAI({
|
||||
modelName: customOpenaiModelName,
|
||||
openAIApiKey: customOpenaiApiKey,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: customOpenaiApiUrl,
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
embeddingModelProviders[embeddingModelProvider] &&
|
||||
embeddingModelProviders[embeddingModelProvider][embeddingModel]
|
||||
) {
|
||||
embeddings = embeddingModelProviders[embeddingModelProvider][
|
||||
embeddingModel
|
||||
].model as Embeddings | undefined;
|
||||
}
|
||||
|
||||
if (!llm || !embeddings) {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Invalid LLM or embeddings model selected, please refresh the page and try again.',
|
||||
key: 'INVALID_MODEL_SELECTED',
|
||||
}),
|
||||
);
|
||||
ws.close();
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'signal',
|
||||
data: 'open',
|
||||
}),
|
||||
);
|
||||
clearInterval(interval);
|
||||
}
|
||||
}, 5);
|
||||
|
||||
ws.on(
|
||||
'message',
|
||||
async (message) =>
|
||||
await handleMessage(message.toString(), ws, llm, embeddings),
|
||||
);
|
||||
|
||||
ws.on('close', () => logger.debug('Connection closed'));
|
||||
} catch (err) {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Internal server error.',
|
||||
key: 'INTERNAL_SERVER_ERROR',
|
||||
}),
|
||||
);
|
||||
ws.close();
|
||||
logger.error(err);
|
||||
}
|
||||
};
|
8
src/websocket/index.ts
Normal file
8
src/websocket/index.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { initServer } from './websocketServer';
|
||||
import http from 'http';
|
||||
|
||||
export const startWebSocketServer = (
|
||||
server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
|
||||
) => {
|
||||
initServer(server);
|
||||
};
|
272
src/websocket/messageHandler.ts
Normal file
272
src/websocket/messageHandler.ts
Normal file
@ -0,0 +1,272 @@
|
||||
import { EventEmitter, WebSocket } from 'ws';
|
||||
import { BaseMessage, AIMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { Embeddings } from '@langchain/core/embeddings';
|
||||
import logger from '../utils/logger';
|
||||
import db from '../db';
|
||||
import { chats, messages as messagesSchema } from '../db/schema';
|
||||
import { eq, asc, gt, and } from 'drizzle-orm';
|
||||
import crypto from 'crypto';
|
||||
import { getFileDetails } from '../utils/files';
|
||||
import MetaSearchAgent, {
|
||||
MetaSearchAgentType,
|
||||
} from '../search/metaSearchAgent';
|
||||
import prompts from '../prompts';
|
||||
|
||||
type Message = {
|
||||
messageId: string;
|
||||
chatId: string;
|
||||
content: string;
|
||||
};
|
||||
|
||||
type WSMessage = {
|
||||
message: Message;
|
||||
optimizationMode: 'speed' | 'balanced' | 'quality';
|
||||
type: string;
|
||||
focusMode: string;
|
||||
history: Array<[string, string]>;
|
||||
files: Array<string>;
|
||||
};
|
||||
|
||||
export const searchHandlers = {
|
||||
webSearch: new MetaSearchAgent({
|
||||
activeEngines: [],
|
||||
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.webSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: true,
|
||||
}),
|
||||
academicSearch: new MetaSearchAgent({
|
||||
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
|
||||
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.academicSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
writingAssistant: new MetaSearchAgent({
|
||||
activeEngines: [],
|
||||
queryGeneratorPrompt: '',
|
||||
responsePrompt: prompts.writingAssistantPrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: false,
|
||||
summarizer: false,
|
||||
}),
|
||||
wolframAlphaSearch: new MetaSearchAgent({
|
||||
activeEngines: ['wolframalpha'],
|
||||
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
|
||||
rerank: false,
|
||||
rerankThreshold: 0,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
youtubeSearch: new MetaSearchAgent({
|
||||
activeEngines: ['youtube'],
|
||||
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.youtubeSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
redditSearch: new MetaSearchAgent({
|
||||
activeEngines: ['reddit'],
|
||||
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
|
||||
responsePrompt: prompts.redditSearchResponsePrompt,
|
||||
rerank: true,
|
||||
rerankThreshold: 0.3,
|
||||
searchWeb: true,
|
||||
summarizer: false,
|
||||
}),
|
||||
};
|
||||
|
||||
const handleEmitterEvents = (
|
||||
emitter: EventEmitter,
|
||||
ws: WebSocket,
|
||||
messageId: string,
|
||||
chatId: string,
|
||||
) => {
|
||||
let recievedMessage = '';
|
||||
let sources = [];
|
||||
|
||||
emitter.on('data', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
if (parsedData.type === 'response') {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'message',
|
||||
data: parsedData.data,
|
||||
messageId: messageId,
|
||||
}),
|
||||
);
|
||||
recievedMessage += parsedData.data;
|
||||
} else if (parsedData.type === 'sources') {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'sources',
|
||||
data: parsedData.data,
|
||||
messageId: messageId,
|
||||
}),
|
||||
);
|
||||
sources = parsedData.data;
|
||||
}
|
||||
});
|
||||
emitter.on('end', () => {
|
||||
ws.send(JSON.stringify({ type: 'messageEnd', messageId: messageId }));
|
||||
|
||||
db.insert(messagesSchema)
|
||||
.values({
|
||||
content: recievedMessage,
|
||||
chatId: chatId,
|
||||
messageId: messageId,
|
||||
role: 'assistant',
|
||||
metadata: JSON.stringify({
|
||||
createdAt: new Date(),
|
||||
...(sources && sources.length > 0 && { sources }),
|
||||
}),
|
||||
})
|
||||
.execute();
|
||||
});
|
||||
emitter.on('error', (data) => {
|
||||
const parsedData = JSON.parse(data);
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: parsedData.data,
|
||||
key: 'CHAIN_ERROR',
|
||||
}),
|
||||
);
|
||||
});
|
||||
};
|
||||
|
||||
export const handleMessage = async (
|
||||
message: string,
|
||||
ws: WebSocket,
|
||||
llm: BaseChatModel,
|
||||
embeddings: Embeddings,
|
||||
) => {
|
||||
try {
|
||||
const parsedWSMessage = JSON.parse(message) as WSMessage;
|
||||
const parsedMessage = parsedWSMessage.message;
|
||||
|
||||
if (parsedWSMessage.files.length > 0) {
|
||||
/* TODO: Implement uploads in other classes/single meta class system*/
|
||||
parsedWSMessage.focusMode = 'webSearch';
|
||||
}
|
||||
|
||||
const humanMessageId =
|
||||
parsedMessage.messageId ?? crypto.randomBytes(7).toString('hex');
|
||||
const aiMessageId = crypto.randomBytes(7).toString('hex');
|
||||
|
||||
if (!parsedMessage.content)
|
||||
return ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Invalid message format',
|
||||
key: 'INVALID_FORMAT',
|
||||
}),
|
||||
);
|
||||
|
||||
const history: BaseMessage[] = parsedWSMessage.history.map((msg) => {
|
||||
if (msg[0] === 'human') {
|
||||
return new HumanMessage({
|
||||
content: msg[1],
|
||||
});
|
||||
} else {
|
||||
return new AIMessage({
|
||||
content: msg[1],
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
if (parsedWSMessage.type === 'message') {
|
||||
const handler: MetaSearchAgentType =
|
||||
searchHandlers[parsedWSMessage.focusMode];
|
||||
|
||||
if (handler) {
|
||||
try {
|
||||
const emitter = await handler.searchAndAnswer(
|
||||
parsedMessage.content,
|
||||
history,
|
||||
llm,
|
||||
embeddings,
|
||||
parsedWSMessage.optimizationMode,
|
||||
parsedWSMessage.files,
|
||||
);
|
||||
|
||||
handleEmitterEvents(emitter, ws, aiMessageId, parsedMessage.chatId);
|
||||
|
||||
const chat = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, parsedMessage.chatId),
|
||||
});
|
||||
|
||||
if (!chat) {
|
||||
await db
|
||||
.insert(chats)
|
||||
.values({
|
||||
id: parsedMessage.chatId,
|
||||
title: parsedMessage.content,
|
||||
createdAt: new Date().toString(),
|
||||
focusMode: parsedWSMessage.focusMode,
|
||||
files: parsedWSMessage.files.map(getFileDetails),
|
||||
})
|
||||
.execute();
|
||||
}
|
||||
|
||||
const messageExists = await db.query.messages.findFirst({
|
||||
where: eq(messagesSchema.messageId, humanMessageId),
|
||||
});
|
||||
|
||||
if (!messageExists) {
|
||||
await db
|
||||
.insert(messagesSchema)
|
||||
.values({
|
||||
content: parsedMessage.content,
|
||||
chatId: parsedMessage.chatId,
|
||||
messageId: humanMessageId,
|
||||
role: 'user',
|
||||
metadata: JSON.stringify({
|
||||
createdAt: new Date(),
|
||||
}),
|
||||
})
|
||||
.execute();
|
||||
} else {
|
||||
await db
|
||||
.delete(messagesSchema)
|
||||
.where(
|
||||
and(
|
||||
gt(messagesSchema.id, messageExists.id),
|
||||
eq(messagesSchema.chatId, parsedMessage.chatId),
|
||||
),
|
||||
)
|
||||
.execute();
|
||||
}
|
||||
} catch (err) {
|
||||
console.log(err);
|
||||
}
|
||||
} else {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Invalid focus mode',
|
||||
key: 'INVALID_FOCUS_MODE',
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'error',
|
||||
data: 'Invalid message format',
|
||||
key: 'INVALID_FORMAT',
|
||||
}),
|
||||
);
|
||||
logger.error(`Failed to handle message: ${err}`);
|
||||
}
|
||||
};
|
16
src/websocket/websocketServer.ts
Normal file
16
src/websocket/websocketServer.ts
Normal file
@ -0,0 +1,16 @@
|
||||
import { WebSocketServer } from 'ws';
|
||||
import { handleConnection } from './connectionManager';
|
||||
import http from 'http';
|
||||
import { getPort } from '../config';
|
||||
import logger from '../utils/logger';
|
||||
|
||||
export const initServer = (
|
||||
server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
|
||||
) => {
|
||||
const port = getPort();
|
||||
const wss = new WebSocketServer({ server });
|
||||
|
||||
wss.on('connection', handleConnection);
|
||||
|
||||
logger.info(`WebSocket server started on port ${port}`);
|
||||
};
|
@ -1,27 +1,18 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"lib": ["dom", "dom.iterable", "esnext"],
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"noEmit": true,
|
||||
"lib": ["ESNext"],
|
||||
"module": "Node16",
|
||||
"moduleResolution": "Node16",
|
||||
"target": "ESNext",
|
||||
"outDir": "dist",
|
||||
"sourceMap": false,
|
||||
"esModuleInterop": true,
|
||||
"module": "esnext",
|
||||
"moduleResolution": "bundler",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "preserve",
|
||||
"incremental": true,
|
||||
"plugins": [
|
||||
{
|
||||
"name": "next"
|
||||
}
|
||||
],
|
||||
"paths": {
|
||||
"@/*": ["./src/*"]
|
||||
},
|
||||
"target": "ES2017"
|
||||
"experimentalDecorators": true,
|
||||
"emitDecoratorMetadata": true,
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"skipLibCheck": true,
|
||||
"skipDefaultLibCheck": true
|
||||
},
|
||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
|
||||
"exclude": ["node_modules"]
|
||||
"include": ["src"],
|
||||
"exclude": ["node_modules", "**/*.spec.ts"]
|
||||
}
|
||||
|
2
ui/.env.example
Normal file
2
ui/.env.example
Normal file
@ -0,0 +1,2 @@
|
||||
NEXT_PUBLIC_WS_URL=ws://localhost:3001
|
||||
NEXT_PUBLIC_API_URL=http://localhost:3001/api
|
34
ui/.gitignore
vendored
Normal file
34
ui/.gitignore
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
# dependencies
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
.yarn/install-state.gz
|
||||
|
||||
# testing
|
||||
/coverage
|
||||
|
||||
# next.js
|
||||
/.next/
|
||||
/out/
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
*.pem
|
||||
|
||||
# debug
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# local env files
|
||||
.env*.local
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
11
ui/.prettierrc.js
Normal file
11
ui/.prettierrc.js
Normal file
@ -0,0 +1,11 @@
|
||||
/** @type {import("prettier").Config} */
|
||||
|
||||
const config = {
|
||||
printWidth: 80,
|
||||
trailingComma: 'all',
|
||||
endOfLine: 'auto',
|
||||
singleQuote: true,
|
||||
tabWidth: 2,
|
||||
};
|
||||
|
||||
module.exports = config;
|
7
ui/app/c/[chatId]/page.tsx
Normal file
7
ui/app/c/[chatId]/page.tsx
Normal file
@ -0,0 +1,7 @@
|
||||
import ChatWindow from '@/components/ChatWindow';
|
||||
|
||||
const Page = ({ params }: { params: { chatId: string } }) => {
|
||||
return <ChatWindow id={params.chatId} />;
|
||||
};
|
||||
|
||||
export default Page;
|
@ -19,7 +19,7 @@ const Page = () => {
|
||||
useEffect(() => {
|
||||
const fetchData = async () => {
|
||||
try {
|
||||
const res = await fetch(`/api/discover`, {
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
@ -21,7 +21,7 @@ const Page = () => {
|
||||
const fetchChats = async () => {
|
||||
setLoading(true);
|
||||
|
||||
const res = await fetch(`/api/chats`, {
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
@ -116,7 +116,7 @@ const Page = () => {
|
||||
useEffect(() => {
|
||||
const fetchConfig = async () => {
|
||||
setIsLoading(true);
|
||||
const res = await fetch(`/api/config`, {
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
@ -187,13 +187,16 @@ const Page = () => {
|
||||
[key]: value,
|
||||
} as SettingsType;
|
||||
|
||||
const response = await fetch(`/api/config`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
const response = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/config`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(updatedConfig),
|
||||
},
|
||||
body: JSON.stringify(updatedConfig),
|
||||
});
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to update config');
|
||||
@ -205,7 +208,7 @@ const Page = () => {
|
||||
key.toLowerCase().includes('api') ||
|
||||
key.toLowerCase().includes('url')
|
||||
) {
|
||||
const res = await fetch(`/api/config`, {
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
@ -29,154 +29,280 @@ export interface File {
|
||||
fileId: string;
|
||||
}
|
||||
|
||||
interface ChatModelProvider {
|
||||
name: string;
|
||||
provider: string;
|
||||
}
|
||||
|
||||
interface EmbeddingModelProvider {
|
||||
name: string;
|
||||
provider: string;
|
||||
}
|
||||
|
||||
const checkConfig = async (
|
||||
setChatModelProvider: (provider: ChatModelProvider) => void,
|
||||
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
|
||||
setIsConfigReady: (ready: boolean) => void,
|
||||
setHasError: (hasError: boolean) => void,
|
||||
const useSocket = (
|
||||
url: string,
|
||||
setIsWSReady: (ready: boolean) => void,
|
||||
setError: (error: boolean) => void,
|
||||
) => {
|
||||
try {
|
||||
let chatModel = localStorage.getItem('chatModel');
|
||||
let chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||
let embeddingModel = localStorage.getItem('embeddingModel');
|
||||
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
|
||||
const wsRef = useRef<WebSocket | null>(null);
|
||||
const reconnectTimeoutRef = useRef<NodeJS.Timeout>();
|
||||
const retryCountRef = useRef(0);
|
||||
const isCleaningUpRef = useRef(false);
|
||||
const MAX_RETRIES = 3;
|
||||
const INITIAL_BACKOFF = 1000; // 1 second
|
||||
const isConnectionErrorRef = useRef(false);
|
||||
|
||||
const autoImageSearch = localStorage.getItem('autoImageSearch');
|
||||
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
|
||||
const getBackoffDelay = (retryCount: number) => {
|
||||
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000); // Cap at 10 seconds
|
||||
};
|
||||
|
||||
if (!autoImageSearch) {
|
||||
localStorage.setItem('autoImageSearch', 'true');
|
||||
}
|
||||
|
||||
if (!autoVideoSearch) {
|
||||
localStorage.setItem('autoVideoSearch', 'false');
|
||||
}
|
||||
|
||||
const providers = await fetch(`/api/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}).then(async (res) => {
|
||||
if (!res.ok)
|
||||
throw new Error(
|
||||
`Failed to fetch models: ${res.status} ${res.statusText}`,
|
||||
);
|
||||
return res.json();
|
||||
});
|
||||
|
||||
if (
|
||||
!chatModel ||
|
||||
!chatModelProvider ||
|
||||
!embeddingModel ||
|
||||
!embeddingModelProvider
|
||||
) {
|
||||
if (!chatModel || !chatModelProvider) {
|
||||
const chatModelProviders = providers.chatModelProviders;
|
||||
|
||||
chatModelProvider =
|
||||
chatModelProvider || Object.keys(chatModelProviders)[0];
|
||||
|
||||
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
|
||||
return toast.error('No chat models available');
|
||||
useEffect(() => {
|
||||
const connectWs = async () => {
|
||||
if (wsRef.current?.readyState === WebSocket.OPEN) {
|
||||
wsRef.current.close();
|
||||
}
|
||||
|
||||
if (!embeddingModel || !embeddingModelProvider) {
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
try {
|
||||
let chatModel = localStorage.getItem('chatModel');
|
||||
let chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||
let embeddingModel = localStorage.getItem('embeddingModel');
|
||||
let embeddingModelProvider = localStorage.getItem(
|
||||
'embeddingModelProvider',
|
||||
);
|
||||
|
||||
const autoImageSearch = localStorage.getItem('autoImageSearch');
|
||||
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
|
||||
|
||||
if (!autoImageSearch) {
|
||||
localStorage.setItem('autoImageSearch', 'true');
|
||||
}
|
||||
|
||||
if (!autoVideoSearch) {
|
||||
localStorage.setItem('autoVideoSearch', 'false');
|
||||
}
|
||||
|
||||
const providers = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/models`,
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
).then(async (res) => {
|
||||
if (!res.ok)
|
||||
throw new Error(
|
||||
`Failed to fetch models: ${res.status} ${res.statusText}`,
|
||||
);
|
||||
return res.json();
|
||||
});
|
||||
|
||||
if (
|
||||
!embeddingModelProviders ||
|
||||
Object.keys(embeddingModelProviders).length === 0
|
||||
)
|
||||
return toast.error('No embedding models available');
|
||||
!chatModel ||
|
||||
!chatModelProvider ||
|
||||
!embeddingModel ||
|
||||
!embeddingModelProvider
|
||||
) {
|
||||
if (!chatModel || !chatModelProvider) {
|
||||
const chatModelProviders = providers.chatModelProviders;
|
||||
|
||||
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
|
||||
embeddingModel = Object.keys(
|
||||
embeddingModelProviders[embeddingModelProvider],
|
||||
)[0];
|
||||
chatModelProvider =
|
||||
chatModelProvider || Object.keys(chatModelProviders)[0];
|
||||
|
||||
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
|
||||
|
||||
if (
|
||||
!chatModelProviders ||
|
||||
Object.keys(chatModelProviders).length === 0
|
||||
)
|
||||
return toast.error('No chat models available');
|
||||
}
|
||||
|
||||
if (!embeddingModel || !embeddingModelProvider) {
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
|
||||
if (
|
||||
!embeddingModelProviders ||
|
||||
Object.keys(embeddingModelProviders).length === 0
|
||||
)
|
||||
return toast.error('No embedding models available');
|
||||
|
||||
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
|
||||
embeddingModel = Object.keys(
|
||||
embeddingModelProviders[embeddingModelProvider],
|
||||
)[0];
|
||||
}
|
||||
|
||||
localStorage.setItem('chatModel', chatModel!);
|
||||
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||
localStorage.setItem('embeddingModel', embeddingModel!);
|
||||
localStorage.setItem(
|
||||
'embeddingModelProvider',
|
||||
embeddingModelProvider,
|
||||
);
|
||||
} else {
|
||||
const chatModelProviders = providers.chatModelProviders;
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
|
||||
if (
|
||||
Object.keys(chatModelProviders).length > 0 &&
|
||||
!chatModelProviders[chatModelProvider]
|
||||
) {
|
||||
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
||||
chatModelProvider =
|
||||
chatModelProvidersKeys.find(
|
||||
(key) => Object.keys(chatModelProviders[key]).length > 0,
|
||||
) || chatModelProvidersKeys[0];
|
||||
|
||||
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||
}
|
||||
|
||||
if (
|
||||
chatModelProvider &&
|
||||
!chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
chatModel = Object.keys(
|
||||
chatModelProviders[
|
||||
Object.keys(chatModelProviders[chatModelProvider]).length > 0
|
||||
? chatModelProvider
|
||||
: Object.keys(chatModelProviders)[0]
|
||||
],
|
||||
)[0];
|
||||
localStorage.setItem('chatModel', chatModel);
|
||||
}
|
||||
|
||||
if (
|
||||
Object.keys(embeddingModelProviders).length > 0 &&
|
||||
!embeddingModelProviders[embeddingModelProvider]
|
||||
) {
|
||||
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
|
||||
localStorage.setItem(
|
||||
'embeddingModelProvider',
|
||||
embeddingModelProvider,
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
embeddingModelProvider &&
|
||||
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
|
||||
) {
|
||||
embeddingModel = Object.keys(
|
||||
embeddingModelProviders[embeddingModelProvider],
|
||||
)[0];
|
||||
localStorage.setItem('embeddingModel', embeddingModel);
|
||||
}
|
||||
}
|
||||
|
||||
const wsURL = new URL(url);
|
||||
const searchParams = new URLSearchParams({});
|
||||
|
||||
searchParams.append('chatModel', chatModel!);
|
||||
searchParams.append('chatModelProvider', chatModelProvider);
|
||||
|
||||
if (chatModelProvider === 'custom_openai') {
|
||||
searchParams.append(
|
||||
'openAIApiKey',
|
||||
localStorage.getItem('openAIApiKey')!,
|
||||
);
|
||||
searchParams.append(
|
||||
'openAIBaseURL',
|
||||
localStorage.getItem('openAIBaseURL')!,
|
||||
);
|
||||
}
|
||||
|
||||
searchParams.append('embeddingModel', embeddingModel!);
|
||||
searchParams.append('embeddingModelProvider', embeddingModelProvider);
|
||||
|
||||
wsURL.search = searchParams.toString();
|
||||
|
||||
const ws = new WebSocket(wsURL.toString());
|
||||
wsRef.current = ws;
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
if (ws.readyState !== 1) {
|
||||
toast.error(
|
||||
'Failed to connect to the server. Please try again later.',
|
||||
);
|
||||
}
|
||||
}, 10000);
|
||||
|
||||
ws.addEventListener('message', (e) => {
|
||||
const data = JSON.parse(e.data);
|
||||
if (data.type === 'signal' && data.data === 'open') {
|
||||
const interval = setInterval(() => {
|
||||
if (ws.readyState === 1) {
|
||||
setIsWSReady(true);
|
||||
setError(false);
|
||||
if (retryCountRef.current > 0) {
|
||||
toast.success('Connection restored.');
|
||||
}
|
||||
retryCountRef.current = 0;
|
||||
clearInterval(interval);
|
||||
}
|
||||
}, 5);
|
||||
clearTimeout(timeoutId);
|
||||
console.debug(new Date(), 'ws:connected');
|
||||
}
|
||||
if (data.type === 'error') {
|
||||
isConnectionErrorRef.current = true;
|
||||
setError(true);
|
||||
toast.error(data.data);
|
||||
}
|
||||
});
|
||||
|
||||
ws.onerror = () => {
|
||||
clearTimeout(timeoutId);
|
||||
setIsWSReady(false);
|
||||
toast.error('WebSocket connection error.');
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
clearTimeout(timeoutId);
|
||||
setIsWSReady(false);
|
||||
console.debug(new Date(), 'ws:disconnected');
|
||||
if (!isCleaningUpRef.current && !isConnectionErrorRef.current) {
|
||||
toast.error('Connection lost. Attempting to reconnect...');
|
||||
attemptReconnect();
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
console.debug(new Date(), 'ws:error', error);
|
||||
setIsWSReady(false);
|
||||
attemptReconnect();
|
||||
}
|
||||
};
|
||||
|
||||
const attemptReconnect = () => {
|
||||
retryCountRef.current += 1;
|
||||
|
||||
if (retryCountRef.current > MAX_RETRIES) {
|
||||
console.debug(new Date(), 'ws:max_retries');
|
||||
setError(true);
|
||||
toast.error(
|
||||
'Unable to connect to server after multiple attempts. Please refresh the page to try again.',
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
localStorage.setItem('chatModel', chatModel!);
|
||||
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||
localStorage.setItem('embeddingModel', embeddingModel!);
|
||||
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
|
||||
} else {
|
||||
const chatModelProviders = providers.chatModelProviders;
|
||||
const embeddingModelProviders = providers.embeddingModelProviders;
|
||||
const backoffDelay = getBackoffDelay(retryCountRef.current);
|
||||
console.debug(
|
||||
new Date(),
|
||||
`ws:retry attempt=${retryCountRef.current}/${MAX_RETRIES} delay=${backoffDelay}ms`,
|
||||
);
|
||||
|
||||
if (
|
||||
Object.keys(chatModelProviders).length > 0 &&
|
||||
!chatModelProviders[chatModelProvider]
|
||||
) {
|
||||
const chatModelProvidersKeys = Object.keys(chatModelProviders);
|
||||
chatModelProvider =
|
||||
chatModelProvidersKeys.find(
|
||||
(key) => Object.keys(chatModelProviders[key]).length > 0,
|
||||
) || chatModelProvidersKeys[0];
|
||||
|
||||
localStorage.setItem('chatModelProvider', chatModelProvider);
|
||||
if (reconnectTimeoutRef.current) {
|
||||
clearTimeout(reconnectTimeoutRef.current);
|
||||
}
|
||||
|
||||
if (
|
||||
chatModelProvider &&
|
||||
!chatModelProviders[chatModelProvider][chatModel]
|
||||
) {
|
||||
chatModel = Object.keys(
|
||||
chatModelProviders[
|
||||
Object.keys(chatModelProviders[chatModelProvider]).length > 0
|
||||
? chatModelProvider
|
||||
: Object.keys(chatModelProviders)[0]
|
||||
],
|
||||
)[0];
|
||||
localStorage.setItem('chatModel', chatModel);
|
||||
reconnectTimeoutRef.current = setTimeout(() => {
|
||||
connectWs();
|
||||
}, backoffDelay);
|
||||
};
|
||||
|
||||
connectWs();
|
||||
|
||||
return () => {
|
||||
if (reconnectTimeoutRef.current) {
|
||||
clearTimeout(reconnectTimeoutRef.current);
|
||||
}
|
||||
|
||||
if (
|
||||
Object.keys(embeddingModelProviders).length > 0 &&
|
||||
!embeddingModelProviders[embeddingModelProvider]
|
||||
) {
|
||||
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
|
||||
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
|
||||
if (wsRef.current?.readyState === WebSocket.OPEN) {
|
||||
wsRef.current.close();
|
||||
isCleaningUpRef.current = true;
|
||||
console.debug(new Date(), 'ws:cleanup');
|
||||
}
|
||||
};
|
||||
}, [url, setIsWSReady, setError]);
|
||||
|
||||
if (
|
||||
embeddingModelProvider &&
|
||||
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
|
||||
) {
|
||||
embeddingModel = Object.keys(
|
||||
embeddingModelProviders[embeddingModelProvider],
|
||||
)[0];
|
||||
localStorage.setItem('embeddingModel', embeddingModel);
|
||||
}
|
||||
}
|
||||
|
||||
setChatModelProvider({
|
||||
name: chatModel!,
|
||||
provider: chatModelProvider,
|
||||
});
|
||||
|
||||
setEmbeddingModelProvider({
|
||||
name: embeddingModel!,
|
||||
provider: embeddingModelProvider,
|
||||
});
|
||||
|
||||
setIsConfigReady(true);
|
||||
} catch (err) {
|
||||
console.error('An error occurred while checking the configuration:', err);
|
||||
setIsConfigReady(false);
|
||||
setHasError(true);
|
||||
}
|
||||
return wsRef.current;
|
||||
};
|
||||
|
||||
const loadMessages = async (
|
||||
@ -189,12 +315,15 @@ const loadMessages = async (
|
||||
setFiles: (files: File[]) => void,
|
||||
setFileIds: (fileIds: string[]) => void,
|
||||
) => {
|
||||
const res = await fetch(`/api/chats/${chatId}`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
const res = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
});
|
||||
);
|
||||
|
||||
if (res.status === 404) {
|
||||
setNotFound(true);
|
||||
@ -244,32 +373,15 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
const [chatId, setChatId] = useState<string | undefined>(id);
|
||||
const [newChatCreated, setNewChatCreated] = useState(false);
|
||||
|
||||
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
|
||||
{
|
||||
name: '',
|
||||
provider: '',
|
||||
},
|
||||
);
|
||||
|
||||
const [embeddingModelProvider, setEmbeddingModelProvider] =
|
||||
useState<EmbeddingModelProvider>({
|
||||
name: '',
|
||||
provider: '',
|
||||
});
|
||||
|
||||
const [isConfigReady, setIsConfigReady] = useState(false);
|
||||
const [hasError, setHasError] = useState(false);
|
||||
const [isReady, setIsReady] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
checkConfig(
|
||||
setChatModelProvider,
|
||||
setEmbeddingModelProvider,
|
||||
setIsConfigReady,
|
||||
setHasError,
|
||||
);
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
const [isWSReady, setIsWSReady] = useState(false);
|
||||
const ws = useSocket(
|
||||
process.env.NEXT_PUBLIC_WS_URL!,
|
||||
setIsWSReady,
|
||||
setHasError,
|
||||
);
|
||||
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [messageAppeared, setMessageAppeared] = useState(false);
|
||||
@ -287,6 +399,8 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
|
||||
const [notFound, setNotFound] = useState(false);
|
||||
|
||||
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (
|
||||
chatId &&
|
||||
@ -312,6 +426,16 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
if (ws?.readyState === 1) {
|
||||
ws.close();
|
||||
console.debug(new Date(), 'ws:cleanup');
|
||||
}
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
const messagesRef = useRef<Message[]>([]);
|
||||
|
||||
useEffect(() => {
|
||||
@ -319,18 +443,18 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
}, [messages]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isMessagesLoaded && isConfigReady) {
|
||||
if (isMessagesLoaded && isWSReady) {
|
||||
setIsReady(true);
|
||||
console.debug(new Date(), 'app:ready');
|
||||
} else {
|
||||
setIsReady(false);
|
||||
}
|
||||
}, [isMessagesLoaded, isConfigReady]);
|
||||
}, [isMessagesLoaded, isWSReady]);
|
||||
|
||||
const sendMessage = async (message: string, messageId?: string) => {
|
||||
if (loading) return;
|
||||
if (!isConfigReady) {
|
||||
toast.error('Cannot send message before the configuration is ready');
|
||||
if (!ws || ws.readyState !== WebSocket.OPEN) {
|
||||
toast.error('Cannot send message while disconnected');
|
||||
return;
|
||||
}
|
||||
|
||||
@ -343,6 +467,21 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
|
||||
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
|
||||
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'message',
|
||||
message: {
|
||||
messageId: messageId,
|
||||
chatId: chatId!,
|
||||
content: message,
|
||||
},
|
||||
files: fileIds,
|
||||
focusMode: focusMode,
|
||||
optimizationMode: optimizationMode,
|
||||
history: [...chatHistory, ['human', message]],
|
||||
}),
|
||||
);
|
||||
|
||||
setMessages((prevMessages) => [
|
||||
...prevMessages,
|
||||
{
|
||||
@ -354,7 +493,9 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
},
|
||||
]);
|
||||
|
||||
const messageHandler = async (data: any) => {
|
||||
const messageHandler = async (e: MessageEvent) => {
|
||||
const data = JSON.parse(e.data);
|
||||
|
||||
if (data.type === 'error') {
|
||||
toast.error(data.data);
|
||||
setLoading(false);
|
||||
@ -417,25 +558,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
['assistant', recievedMessage],
|
||||
]);
|
||||
|
||||
ws?.removeEventListener('message', messageHandler);
|
||||
setLoading(false);
|
||||
|
||||
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
|
||||
|
||||
const autoImageSearch = localStorage.getItem('autoImageSearch');
|
||||
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
|
||||
|
||||
if (autoImageSearch === 'true') {
|
||||
document
|
||||
.getElementById(`search-images-${lastMsg.messageId}`)
|
||||
?.click();
|
||||
}
|
||||
|
||||
if (autoVideoSearch === 'true') {
|
||||
document
|
||||
.getElementById(`search-videos-${lastMsg.messageId}`)
|
||||
?.click();
|
||||
}
|
||||
|
||||
if (
|
||||
lastMsg.role === 'assistant' &&
|
||||
lastMsg.sources &&
|
||||
@ -452,62 +579,21 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
const autoImageSearch = localStorage.getItem('autoImageSearch');
|
||||
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
|
||||
|
||||
if (autoImageSearch === 'true') {
|
||||
document.getElementById('search-images')?.click();
|
||||
}
|
||||
|
||||
if (autoVideoSearch === 'true') {
|
||||
document.getElementById('search-videos')?.click();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const res = await fetch('/api/chat', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
content: message,
|
||||
message: {
|
||||
messageId: messageId,
|
||||
chatId: chatId!,
|
||||
content: message,
|
||||
},
|
||||
chatId: chatId!,
|
||||
files: fileIds,
|
||||
focusMode: focusMode,
|
||||
optimizationMode: optimizationMode,
|
||||
history: chatHistory,
|
||||
chatModel: {
|
||||
name: chatModelProvider.name,
|
||||
provider: chatModelProvider.provider,
|
||||
},
|
||||
embeddingModel: {
|
||||
name: embeddingModelProvider.name,
|
||||
provider: embeddingModelProvider.provider,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
if (!res.body) throw new Error('No response body');
|
||||
|
||||
const reader = res.body?.getReader();
|
||||
const decoder = new TextDecoder('utf-8');
|
||||
|
||||
let partialChunk = '';
|
||||
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
partialChunk += decoder.decode(value, { stream: true });
|
||||
|
||||
try {
|
||||
const messages = partialChunk.split('\n');
|
||||
for (const msg of messages) {
|
||||
if (!msg.trim()) continue;
|
||||
const json = JSON.parse(msg);
|
||||
messageHandler(json);
|
||||
}
|
||||
partialChunk = '';
|
||||
} catch (error) {
|
||||
console.warn('Incomplete JSON, waiting for next chunk...');
|
||||
}
|
||||
}
|
||||
ws?.addEventListener('message', messageHandler);
|
||||
};
|
||||
|
||||
const rewrite = (messageId: string) => {
|
||||
@ -528,11 +614,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (isReady && initialMessage && isConfigReady) {
|
||||
if (isReady && initialMessage && ws?.readyState === 1) {
|
||||
sendMessage(initialMessage);
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [isConfigReady, isReady, initialMessage]);
|
||||
}, [ws?.readyState, isReady, initialMessage, isWSReady]);
|
||||
|
||||
if (hasError) {
|
||||
return (
|
@ -29,12 +29,15 @@ const DeleteChat = ({
|
||||
const handleDelete = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
const res = await fetch(`/api/chats/${chatId}`, {
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
const res = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
|
||||
{
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
});
|
||||
);
|
||||
|
||||
if (res.status != 200) {
|
||||
throw new Error('Failed to delete chat');
|
@ -12,18 +12,13 @@ import {
|
||||
Layers3,
|
||||
Plus,
|
||||
} from 'lucide-react';
|
||||
import Markdown, { MarkdownToJSX } from 'markdown-to-jsx';
|
||||
import Markdown from 'markdown-to-jsx';
|
||||
import Copy from './MessageActions/Copy';
|
||||
import Rewrite from './MessageActions/Rewrite';
|
||||
import MessageSources from './MessageSources';
|
||||
import SearchImages from './SearchImages';
|
||||
import SearchVideos from './SearchVideos';
|
||||
import { useSpeech } from 'react-text-to-speech';
|
||||
import ThinkBox from './ThinkBox';
|
||||
|
||||
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
|
||||
return <ThinkBox content={children as string} />;
|
||||
};
|
||||
|
||||
const MessageBox = ({
|
||||
message,
|
||||
@ -49,48 +44,27 @@ const MessageBox = ({
|
||||
|
||||
useEffect(() => {
|
||||
const regex = /\[(\d+)\]/g;
|
||||
let processedMessage = message.content;
|
||||
|
||||
if (message.role === 'assistant' && message.content.includes('<think>')) {
|
||||
const openThinkTag = processedMessage.match(/<think>/g)?.length || 0;
|
||||
const closeThinkTag = processedMessage.match(/<\/think>/g)?.length || 0;
|
||||
|
||||
if (openThinkTag > closeThinkTag) {
|
||||
processedMessage += '</think> <a> </a>'; // The extra <a> </a> is to prevent the the think component from looking bad
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
message.role === 'assistant' &&
|
||||
message?.sources &&
|
||||
message.sources.length > 0
|
||||
) {
|
||||
setParsedMessage(
|
||||
processedMessage.replace(
|
||||
return setParsedMessage(
|
||||
message.content.replace(
|
||||
regex,
|
||||
(_, number) =>
|
||||
`<a href="${
|
||||
message.sources?.[number - 1]?.metadata?.url
|
||||
}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||
),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
setParsedMessage(processedMessage);
|
||||
setParsedMessage(message.content);
|
||||
}, [message.content, message.sources, message.role]);
|
||||
|
||||
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
|
||||
|
||||
const markdownOverrides: MarkdownToJSX.Options = {
|
||||
overrides: {
|
||||
think: {
|
||||
component: ThinkTagProcessor,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
{message.role === 'user' && (
|
||||
@ -137,13 +111,11 @@ const MessageBox = ({
|
||||
Answer
|
||||
</h3>
|
||||
</div>
|
||||
|
||||
<Markdown
|
||||
className={cn(
|
||||
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
|
||||
'max-w-none break-words text-black dark:text-white',
|
||||
)}
|
||||
options={markdownOverrides}
|
||||
>
|
||||
{parsedMessage}
|
||||
</Markdown>
|
||||
@ -221,12 +193,10 @@ const MessageBox = ({
|
||||
<SearchImages
|
||||
query={history[messageIndex - 1].content}
|
||||
chatHistory={history.slice(0, messageIndex - 1)}
|
||||
messageId={message.messageId}
|
||||
/>
|
||||
<SearchVideos
|
||||
chatHistory={history.slice(0, messageIndex - 1)}
|
||||
query={history[messageIndex - 1].content}
|
||||
messageId={message.messageId}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
@ -41,7 +41,7 @@ const Attach = ({
|
||||
data.append('embedding_model_provider', embeddingModelProvider!);
|
||||
data.append('embedding_model', embeddingModel!);
|
||||
|
||||
const res = await fetch(`/api/uploads`, {
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
|
||||
method: 'POST',
|
||||
body: data,
|
||||
});
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user