Compare commits

..

42 Commits

Author SHA1 Message Date
74f7eaed6e feat(workflow): fix build errors 2025-03-20 13:43:29 +05:30
dddd944a18 feat(workflow): update docker build 2025-03-20 13:22:43 +05:30
7eccd4d75b Merge pull request #679 from ItzCrazyKns/feat/remove-backend
feat(app): fix build errors
2025-03-20 12:48:27 +05:30
62e6c24840 feat(app): fix build errors 2025-03-20 12:47:54 +05:30
04a0342b52 Merge pull request #678 from ItzCrazyKns/feat/remove-backend
Feat/remove backend
2025-03-20 12:42:18 +05:30
5c016127cb feat(package): bump version 2025-03-20 12:41:07 +05:30
8b552010f9 feat(docs): update docs 2025-03-20 12:33:15 +05:30
97804e7b4d feat(config): remove unused vars 2025-03-20 12:30:06 +05:30
33b895b75e feat(app): add search API 2025-03-20 12:29:52 +05:30
048de2cb74 feat(docs): update docs 2025-03-20 12:29:31 +05:30
274e6ca88c feat(sidebar): remove unused state 2025-03-20 11:49:00 +05:30
f628b6e416 feat(groq): remove deprecated model 2025-03-20 11:48:44 +05:30
cf7144db96 feat(providers): add HF transformers 2025-03-20 11:48:26 +05:30
ffa793056d feat(chains): remove think tags 2025-03-20 11:47:54 +05:30
584d02b92a feat(app): add thinking model support 2025-03-20 10:56:03 +05:30
008c7cbec0 feat(chat-window): remove debugging code, 2025-03-20 09:47:32 +05:30
4d1ee79b8d feat(package): migrate db when built 2025-03-20 09:47:12 +05:30
ea638279e5 feat(docker): use standalone build 2025-03-20 09:46:50 +05:30
403d13eb50 feat(package): update scripts 2025-03-19 16:34:55 +05:30
217736d05a feat(app): remove backend 2025-03-19 16:23:27 +05:30
8a24572cd2 feat(app): add upload functionality 2025-03-19 15:32:32 +05:30
649c68f292 feat(ui): fix type errors 2025-03-19 13:42:28 +05:30
bab5dba6e1 feat(app): port history saving features 2025-03-19 13:42:15 +05:30
c24edac16d feat(app): add chat functionality 2025-03-19 13:41:52 +05:30
3150c21f17 feat(icons): fix type errors 2025-03-19 13:41:01 +05:30
c46fd7a9c8 feat(utils): add files utils, remove logger, fix API url 2025-03-19 13:40:35 +05:30
bab32e8d70 feat(app): add suggestions route 2025-03-19 13:40:10 +05:30
1130746f5d feat(app): add image & video search functionality 2025-03-19 13:38:40 +05:30
d1e9361665 feat(routes): add discover route 2025-03-19 13:37:54 +05:30
3bf2337697 feat(app): add db & schema 2025-03-19 13:37:01 +05:30
ee6e197ec0 feat(app): lint & beautify 2025-03-18 11:29:04 +05:30
32f26bb4e8 feat(app): add groq, gemini & anthropic provider 2025-03-18 11:28:47 +05:30
4cb20542a5 feat(config): update file path, add post endpoint 2025-03-18 10:33:32 +05:30
97f6196d9b feat(app): add GET config route 2025-03-18 10:25:09 +05:30
6c227cab6f feat(providers): move providers to UI 2025-03-18 10:24:51 +05:30
e9e34ddff9 feat(ui): add meta search agent 2025-03-18 10:24:33 +05:30
e29a08dc46 feat(ui): add necessary utils 2025-03-18 10:24:16 +05:30
5c313e9bed feat(ui): update packages, add config, add searxng 2025-03-18 10:23:59 +05:30
6b5bd9d79b feat(prompts): move to UI 2025-03-18 10:23:21 +05:30
64d2a467b0 Merge pull request #672 from sjiampojamarn/scrolling
Only set scrollIntoView for user msg.
2025-03-17 12:03:05 +05:30
9a2c4fe3b6 Only set scrollIntoView for user msg. 2025-03-16 22:15:58 -07:00
060c68a900 feat(message-box): lint & beautify 2025-03-14 22:05:07 +05:30
133 changed files with 5226 additions and 7158 deletions

View File

@ -8,15 +8,12 @@ on:
types: [published]
jobs:
build-and-push:
build-amd64:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
@ -33,24 +30,104 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push Docker image (latest)
- name: Build and push AMD64 Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
docker buildx create --use
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:latest \
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:amd64 \
--cache-to=type=inline \
-f docker/Dockerfile \
-t itzcrazykns1337/perplexica:latest \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:amd64 \
--push .
- name: Build and push Docker image (release)
- name: Build and push AMD64 release Docker image
if: github.event_name == 'release'
run: |
docker buildx create --use
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${{ env.RELEASE_VERSION }} \
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--cache-to=type=inline \
-f docker/Dockerfile \
-t itzcrazykns1337/perplexica:${{ env.RELEASE_VERSION }} \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--push .
build-arm64:
runs-on: ubuntu-24.04-arm
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push ARM64 Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:arm64 \
--push .
- name: Build and push ARM64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--push .
manifest:
needs: [build-amd64, build-arm64]
runs-on: ubuntu-latest
steps:
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Create and push multi-arch manifest for main
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:main \
--amend itzcrazykns1337/${IMAGE_NAME}:amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:main
- name: Create and push multi-arch manifest for releases
if: github.event_name == 'release'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}

6
.gitignore vendored
View File

@ -4,9 +4,9 @@ npm-debug.log
yarn-error.log
# Build output
/.next/
/out/
/dist/
.next/
out/
dist/
# IDE/Editor specific
.vscode/

View File

@ -6,7 +6,6 @@ const config = {
endOfLine: 'auto',
singleQuote: true,
tabWidth: 2,
semi: true,
};
module.exports = config;

View File

@ -1,32 +1,43 @@
# How to Contribute to Perplexica
Hey there, thanks for deciding to contribute to Perplexica. Anything you help with will support the development of Perplexica and will make it better. Let's walk you through the key aspects to ensure your contributions are effective and in harmony with the project's setup.
Thanks for your interest in contributing to Perplexica! Your help makes this project better. This guide explains how to contribute effectively.
Perplexica is a modern AI chat application with advanced search capabilities.
## Project Structure
Perplexica's design consists of two main domains:
Perplexica's codebase is organized as follows:
- **Frontend (`ui` directory)**: This is a Next.js application holding all user interface components. It's a self-contained environment that manages everything the user interacts with.
- **Backend (root and `src` directory)**: The backend logic is situated in the `src` folder, but the root directory holds the main `package.json` for backend dependency management.
- All of the focus modes are created using the Meta Search Agent class present in `src/search/metaSearchAgent.ts`. The main logic behind Perplexica lies there.
- **UI Components and Pages**:
- **Components (`src/components`)**: Reusable UI components.
- **Pages and Routes (`src/app`)**: Next.js app directory structure with page components.
- Main app routes include: home (`/`), chat (`/c`), discover (`/discover`), library (`/library`), and settings (`/settings`).
- **API Routes (`src/app/api`)**: API endpoints implemented with Next.js API routes.
- `/api/chat`: Handles chat interactions.
- `/api/search`: Provides direct access to Perplexica's search capabilities.
- Other endpoints for models, files, and suggestions.
- **Backend Logic (`src/lib`)**: Contains all the backend functionality including search, database, and API logic.
- The search functionality is present inside `src/lib/search` directory.
- All of the focus modes are implemented using the Meta Search Agent class in `src/lib/search/metaSearchAgent.ts`.
- Database functionality is in `src/lib/db`.
- Chat model and embedding model providers are managed in `src/lib/providers`.
- Prompt templates and LLM chain definitions are in `src/lib/prompts` and `src/lib/chains` respectively.
## API Documentation
Perplexica exposes several API endpoints for programmatic access, including:
- **Search API**: Access Perplexica's advanced search capabilities directly via the `/api/search` endpoint. For detailed documentation, see `docs/api/search.md`.
## Setting Up Your Environment
Before diving into coding, setting up your local environment is key. Here's what you need to do:
### Backend
1. In the root directory, locate the `sample.config.toml` file.
2. Rename it to `config.toml` and fill in the necessary configuration fields specific to the backend.
3. Run `npm install` to install dependencies.
4. Run `npm run db:push` to set up the local sqlite.
5. Use `npm run dev` to start the backend in development mode.
### Frontend
1. Navigate to the `ui` folder and repeat the process of renaming `.env.example` to `.env`, making sure to provide the frontend-specific variables.
2. Execute `npm install` within the `ui` directory to get the frontend dependencies ready.
3. Launch the frontend development server with `npm run dev`.
2. Rename it to `config.toml` and fill in the necessary configuration fields.
3. Run `npm install` to install all dependencies.
4. Run `npm run db:push` to set up the local sqlite database.
5. Use `npm run dev` to start the application in development mode.
**Please note**: Docker configurations are present for setting up production environments, whereas `npm run dev` is used for development purposes.

View File

@ -26,13 +26,12 @@
- [Preview](#preview)
- [Features](#features)
- [Installation](#installation)
- [Docker Installation (Recommended)](#docker-installation-recommended)
- [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended)
- [Non-Docker Installation](#non-docker-installation)
- [Nginx Reverse Proxy](#nginx-reverse-proxy)
- [Ollama Connection Errors](#ollama-connection-errors)
- [Using as a Search Engine](#using-as-a-search-engine)
- [Using Perplexica's API](#using-perplexicas-api)
- [Expose Perplexica to a Network](#expose-perplexica-to-a-network)
- [Expose Perplexica to a network](#expose-perplexica-to-network)
- [One-Click Deployment](#one-click-deployment)
- [Upcoming Features](#upcoming-features)
- [Support Us](#support-us)
@ -72,9 +71,9 @@ It has many more features like image and video search. Some of the planned featu
## Installation
Perplexica can be installed using Docker (recommended) or directly on your system.
There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. Using Docker is highly recommended.
### Docker Installation (Recommended)
### Getting Started with Docker (Recommended)
1. Ensure Docker is installed and running on your system.
2. Clone the Perplexica repository:
@ -102,38 +101,21 @@ Perplexica can be installed using Docker (recommended) or directly on your syste
docker compose up -d
```
6. Wait a few minutes for the setup to complete. You can access Perplexica at http://localhost:8080 in your web browser.
6. Wait a few minutes for the setup to complete. You can access Perplexica at http://localhost:3000 in your web browser.
**Note**: After the containers are built, you can start Perplexica directly from Docker without having to open a terminal.
The Docker configuration is located in the `docker/` directory, containing:
- Dockerfile with multi-stage build for efficient images
- Service configurations for the integrated process manager
- Nginx reverse proxy configuration
### Non-Docker Installation
1. Install SearXNG and allow `JSON` format in the SearXNG settings.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. Rename the `.env.example` file to `.env` in the `ui` folder and fill in all necessary fields.
4. After populating the configuration and environment files, run `npm i` in both the `ui` folder and the root directory.
5. Install the dependencies and then execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm rum start`
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like exposing it your network, etc.
### Nginx Reverse Proxy
Perplexica includes an Nginx reverse proxy that provides several key benefits:
- **Single Port Access**: Access both frontend and backend through a single port (8080)
- **Dynamic Configuration**: Works with any domain or IP without rebuilding
- **WebSocket Support**: Automatic WebSocket URL configuration based on the current domain
- **Security Headers**: Enhanced security with proper HTTP headers
When using Docker, the reverse proxy is automatically configured. Access Perplexica at `http://localhost:8080` or `http://your-ip:8080` after starting the containers.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
### Ollama Connection Errors
@ -160,7 +142,7 @@ If you wish to use Perplexica as an alternative to traditional search engines li
1. Open your browser's settings.
2. Navigate to the 'Search Engines' section.
3. Add a new site search with the following URL: `http://localhost:8080/?q=%s`. Replace `localhost` with your IP address or domain name if needed.
3. Add a new site search with the following URL: `http://localhost:3000/?q=%s`. Replace `localhost` with your IP address or domain name, and `3000` with the port number if Perplexica is not hosted locally.
4. Click the add button. Now, you can use Perplexica directly from your browser's search bar.
## Using Perplexica's API
@ -169,15 +151,9 @@ Perplexica also provides an API for developers looking to integrate its powerful
For more details, check out the full documentation [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/API/SEARCH.md).
## Expose Perplexica to a Network
## Expose Perplexica to network
Perplexica can be easily accessed over your home network or exposed to the internet through the Nginx reverse proxy. With this setup:
1. **Local Network Access**: Access Perplexica from any device on your network using `http://server-ip:8080`
2. **Domain Configuration**: If you have a domain name, point it to your server and access Perplexica with `http://your-domain.com:8080`
3. **SSL Support**: Configure SSL certificates in Nginx for secure `https://` access
For more network configuration details, see our [networking guide](https://github.com/ItzCrazyKns/Perplexica/blob/master/docs/installation/NETWORKING.md).
You can access Perplexica over your home network by following our networking guide [here](https://github.com/ItzCrazyKns/Perplexica/blob/master/docs/installation/NETWORKING.md).
## One-Click Deployment

27
app.dockerfile Normal file
View File

@ -0,0 +1,27 @@
FROM node:20.18.0-alpine AS builder
WORKDIR /home/perplexica
COPY package.json yarn.lock ./
RUN yarn install --frozen-lockfile --network-timeout 600000
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
RUN mkdir -p /home/perplexica/data
RUN yarn build
FROM node:20.18.0-alpine
WORKDIR /home/perplexica
COPY --from=builder /home/perplexica/public ./public
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data
RUN mkdir /home/perplexica/uploads
CMD ["node", "server.js"]

View File

@ -2,43 +2,29 @@ services:
searxng:
image: docker.io/searxng/searxng:latest
volumes:
- ./searxng:/etc/searxng
- ./searxng:/etc/searxng:rw
ports:
- 4000:8080
networks:
- perplexica-network
restart: unless-stopped
perplexica:
image: itzcrazykns1337/perplexica:latest
ports:
- "8080:8080"
app:
image: itzcrazykns1337/perplexica:main
build:
context: .
dockerfile: app.dockerfile
environment:
- SEARXNG_API_URL=http://searxng:4000
- SIMILARITY_MEASURE=cosine
- KEEP_ALIVE=5m
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- GROQ_API_KEY=${GROQ_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
- OLLAMA_API_URL=${OLLAMA_API_URL:-}
- CUSTOM_OPENAI_API_KEY=${CUSTOM_OPENAI_API_KEY:-}
- CUSTOM_OPENAI_API_URL=${CUSTOM_OPENAI_API_URL:-}
- CUSTOM_OPENAI_MODEL_NAME=${CUSTOM_OPENAI_MODEL_NAME:-}
volumes:
- backend-dbstore:/app/backend/data
- uploads:/app/backend/uploads
extra_hosts:
- 'host.docker.internal:host-gateway'
depends_on:
- searxng
- SEARXNG_API_URL=http://searxng:8080
ports:
- 3000:3000
networks:
- perplexica-network
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 5s
networks:
perplexica-network:

View File

@ -1,93 +0,0 @@
# Multi-stage build for Perplexica
# Stage 1: Build the backend
FROM node:lts-alpine as backend-builder
WORKDIR /app
COPY src ./src
COPY tsconfig.json drizzle.config.ts package.json yarn.lock ./
RUN yarn install --frozen-lockfile --network-timeout 600000 && \
yarn build
# Stage 2: Build the frontend
FROM node:lts-alpine as frontend-builder
WORKDIR /app
COPY ui ./
ARG NEXT_PUBLIC_API_URL=/api
ARG NEXT_PUBLIC_WS_URL=auto
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
RUN yarn install --frozen-lockfile && \
yarn build
# Stage 3: Final image
FROM node:lts-alpine
# Install curl and jq for GitHub API access
RUN apk add --no-cache curl jq
# Determine latest S6 overlay version at build time
RUN S6_OVERLAY_VERSION=$(curl -s https://api.github.com/repos/just-containers/s6-overlay/releases/latest | jq -r .tag_name | sed 's/^v//') && \
echo "Using S6 overlay version: $S6_OVERLAY_VERSION" && \
echo "$S6_OVERLAY_VERSION" > /tmp/s6-version
# Use Docker's TARGETARCH for automatic architecture detection
ARG TARGETARCH
# Install additional required packages and create directory structure in one layer
RUN apk add --no-cache \
nginx \
tzdata \
bash && \
mkdir -p /app/backend /app/frontend /app/data /app/uploads
# Map Docker's architecture names to s6-overlay architecture names and download/install
RUN S6_OVERLAY_VERSION=$(cat /tmp/s6-version) && \
case "${TARGETARCH}" in \
"amd64") S6_OVERLAY_ARCH="x86_64" ;; \
"arm64") S6_OVERLAY_ARCH="aarch64" ;; \
"arm") S6_OVERLAY_ARCH="arm" ;; \
*) echo "Unsupported architecture: ${TARGETARCH}. Only amd64, arm64, and arm are supported." && exit 1 ;; \
esac && \
echo "Target architecture: ${TARGETARCH} -> S6 architecture: ${S6_OVERLAY_ARCH}" && \
echo "Downloading s6-overlay v${S6_OVERLAY_VERSION} for architecture: ${S6_OVERLAY_ARCH}" && \
curl -L -s -o /tmp/s6-overlay-noarch.tar.xz "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz" && \
tar -C / -Jxpf /tmp/s6-overlay-noarch.tar.xz && \
curl -L -s -o /tmp/s6-overlay-arch.tar.xz "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz" && \
tar -C / -Jxpf /tmp/s6-overlay-arch.tar.xz && \
curl -L -s -o /tmp/s6-overlay-symlinks-noarch.tar.xz "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz" && \
tar -C / -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && \
rm -f /tmp/s6-overlay-*.tar.xz /tmp/s6-version
# Copy configuration files
COPY docker/etc/s6-overlay/services /etc/services.d/
COPY docker/etc/nginx/nginx.conf /etc/nginx/nginx.conf
# Make service scripts executable
RUN chmod +x /etc/services.d/*/run /etc/services.d/*/finish
# Copy application files from builders
COPY --from=backend-builder /app/dist /app/backend/dist
COPY --from=backend-builder /app/node_modules /app/backend/node_modules
COPY --from=backend-builder /app/package.json /app/backend/package.json
COPY --from=backend-builder /app/drizzle.config.ts /app/backend/drizzle.config.ts
# Copy only the schema file for Drizzle migrations
COPY --from=backend-builder /app/src/db/schema.ts /app/backend/src/db/schema.ts
COPY --from=frontend-builder /app/.next /app/frontend/.next
COPY --from=frontend-builder /app/node_modules /app/frontend/node_modules
COPY --from=frontend-builder /app/package.json /app/frontend/package.json
COPY --from=frontend-builder /app/public /app/frontend/public
# Configure volumes and ports
VOLUME ["/app/backend/data", "/app/backend/uploads"]
EXPOSE 8080
# Set up healthcheck
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8080/ || exit 1
ENTRYPOINT ["/init"]

View File

@ -1,55 +0,0 @@
events {
worker_connections 1024;
}
http {
port_in_redirect on;
absolute_redirect off;
server {
listen 8080;
server_name localhost;
# Global timeout settings for all locations
proxy_read_timeout 86400s; # 24 hours
proxy_send_timeout 86400s; # 24 hours
proxy_connect_timeout 60s; # Connection establishment timeout
# API requests
location /api {
proxy_pass http://localhost:3001;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# WebSocket requests
location /ws {
proxy_pass http://localhost:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Frontend requests
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;
server_tokens off;
}
}

View File

@ -1,3 +0,0 @@
#!/usr/bin/with-contenv bash
s6-svc -d /var/run/s6/services/frontend
s6-svc -d /var/run/s6/services/nginx

View File

@ -1,8 +0,0 @@
#!/usr/bin/with-contenv bash
cd /app/backend
# Run database migrations before starting the app
yarn db:push
# Start the application
exec node dist/app.js

View File

@ -1,2 +0,0 @@
#!/usr/bin/with-contenv bash
s6-svc -d /var/run/s6/services/nginx

View File

@ -1,3 +0,0 @@
#!/usr/bin/with-contenv bash
cd /app/frontend
exec node_modules/.bin/next start

View File

@ -1,2 +0,0 @@
#!/usr/bin/with-contenv bash
exec nginx -g "daemon off;"

View File

@ -6,9 +6,9 @@ Perplexicas Search API makes it easy to use our AI-powered search engine. You
## Endpoint
### **POST** `http://localhost:3001/api/search`
### **POST** `http://localhost:3000/api/search`
**Note**: Replace `3001` with any other port if you've changed the default PORT
**Note**: Replace `3000` with any other port if you've changed the default PORT
### Request
@ -20,11 +20,11 @@ The API accepts a JSON object in the request body, where you define the focus mo
{
"chatModel": {
"provider": "openai",
"model": "gpt-4o-mini"
"name": "gpt-4o-mini"
},
"embeddingModel": {
"provider": "openai",
"model": "text-embedding-3-large"
"name": "text-embedding-3-large"
},
"optimizationMode": "speed",
"focusMode": "webSearch",
@ -38,18 +38,18 @@ The API accepts a JSON object in the request body, where you define the focus mo
### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- `name`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- Optional fields for custom OpenAI configuration:
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- `provider`: The provider for the embedding model (e.g., `openai`).
- `model`: The specific embedding model (e.g., `text-embedding-3-large`).
- `name`: The specific embedding model (e.g., `text-embedding-3-large`).
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:

View File

@ -4,7 +4,7 @@ Curious about how Perplexica works? Don't worry, we'll cover it here. Before we
We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows:
1. The message is sent via WS to the backend server where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
1. The message is sent to the `/api/chat` route where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources and searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started.
3. The query returned by the first chain is passed to SearXNG to search the web for information.
4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query.

View File

@ -1,46 +0,0 @@
# Accessing Perplexica over a Network
This guide explains how to access Perplexica over your network using the nginx reverse proxy included in the Docker setup.
## Basic Network Access
Perplexica is automatically accessible from any device on your network:
1. Start Perplexica using Docker Compose:
```bash
docker compose up -d
```
2. Find your server's IP address:
- **Windows**: `ipconfig` in Command Prompt
- **macOS**: `ifconfig | grep "inet "` in Terminal
- **Linux**: `ip addr show | grep "inet "` in Terminal
3. Access Perplexica from any device on your network:
```
http://YOUR_SERVER_IP:8080
```
## Custom Port Configuration
If you need to use a different port instead of the default 8080:
1. Modify the `docker-compose.yaml` file:
```yaml
perplexica:
ports:
- "YOUR_CUSTOM_PORT:8080"
```
2. Restart the containers:
```bash
docker compose down && docker compose up -d
```
## Troubleshooting
If you encounter issues accessing Perplexica over your network:
1. **Firewall Settings**: Ensure port 8080 (or your custom port) is allowed in your firewall
2. **Docker Logs**: Check for any connection issues with `docker logs perplexica`
3. **Network Access**: Make sure your devices are on the same network and can reach the server

View File

@ -39,11 +39,8 @@ To update Perplexica to the latest version, follow these steps:
2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. Execute `npm i` in both the `ui` folder and the root directory.
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm rum start`
---

View File

@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
export default defineConfig({
dialect: 'sqlite',
schema: './src/db/schema.ts',
schema: './src/lib/db/schema.ts',
out: './drizzle',
dbCredentials: {
url: './data/db.sqlite',

5
next-env.d.ts vendored Normal file
View File

@ -0,0 +1,5 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@ -1,5 +1,6 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
output: 'standalone',
images: {
remotePatterns: [
{
@ -7,6 +8,7 @@ const nextConfig = {
},
],
},
serverExternalPackages: ['pdf-parse'],
};
export default nextConfig;

View File

@ -1,53 +1,63 @@
{
"name": "perplexica-backend",
"version": "1.10.0-rc3",
"name": "perplexica-frontend",
"version": "1.10.0",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
"start": "npm run db:push && node dist/app.js",
"build": "tsc",
"dev": "nodemon --ignore uploads/ src/app.ts ",
"db:push": "drizzle-kit push sqlite",
"format": "prettier . --check",
"format:write": "prettier . --write"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.10",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/html-to-text": "^9.0.4",
"@types/multer": "^1.4.12",
"@types/pdf-parse": "^1.1.4",
"@types/readable-stream": "^4.0.11",
"@types/ws": "^8.5.12",
"drizzle-kit": "^0.22.7",
"nodemon": "^3.1.0",
"prettier": "^3.2.5",
"ts-node": "^10.9.2",
"typescript": "^5.4.3"
"dev": "next dev",
"build": "npm run db:push && next build",
"start": "next start",
"lint": "next lint",
"format:write": "prettier . --write",
"db:push": "drizzle-kit push"
},
"dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3",
"@langchain/community": "^0.2.16",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/openai": "^0.0.25",
"@langchain/google-genai": "^0.0.23",
"@xenova/transformers": "^2.17.1",
"axios": "^1.6.8",
"better-sqlite3": "^11.0.0",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
"axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
"compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0",
"cors": "^2.8.5",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.31.2",
"express": "^4.19.2",
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"langchain": "^0.1.30",
"mammoth": "^1.8.0",
"multer": "^1.4.5-lts.1",
"lucide-react": "^0.363.0",
"markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"pdf-parse": "^1.1.1",
"winston": "^3.13.0",
"ws": "^8.17.1",
"react": "^18",
"react-dom": "^18",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/html-to-text": "^9.0.4",
"@types/node": "^20",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"drizzle-kit": "^0.30.5",
"eslint": "^8",
"eslint-config-next": "14.1.4",
"postcss": "^8",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"typescript": "^5"
}
}

View File

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

Before

Width:  |  Height:  |  Size: 629 B

After

Width:  |  Height:  |  Size: 629 B

View File

@ -1,5 +1,4 @@
[GENERAL]
PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
@ -24,4 +23,4 @@ MODEL_NAME = ""
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL
SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@ -1,38 +0,0 @@
import { startWebSocketServer } from './websocket';
import express from 'express';
import cors from 'cors';
import http from 'http';
import routes from './routes';
import { getPort } from './config';
import logger from './utils/logger';
const port = getPort();
const app = express();
const server = http.createServer(app);
const corsOptions = {
origin: '*',
};
app.use(cors(corsOptions));
app.use(express.json());
app.use('/api', routes);
app.get('/api', (_, res) => {
res.status(200).json({ status: 'ok' });
});
server.listen(port, () => {
logger.info(`Server is running on port ${port}`);
});
startWebSocketServer(server);
process.on('uncaughtException', (err, origin) => {
logger.error(`Uncaught Exception at ${origin}: ${err}`);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
});

304
src/app/api/chat/route.ts Normal file
View File

@ -0,0 +1,304 @@
import prompts from '@/lib/prompts';
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream';
import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import db from '@/lib/db';
import { chats, messages as messagesSchema } from '@/lib/db/schema';
import { and, eq, gt } from 'drizzle-orm';
import { getFileDetails } from '@/lib/utils/files';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
type Message = {
messageId: string;
chatId: string;
content: string;
};
type ChatModel = {
provider: string;
name: string;
};
type EmbeddingModel = {
provider: string;
name: string;
};
type Body = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
};
const handleEmitterEvents = async (
stream: EventEmitter,
writer: WritableStreamDefaultWriter,
encoder: TextEncoder,
aiMessageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources: any[] = [];
stream.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'sources',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
sources = parsedData.data;
}
});
stream.on('end', () => {
writer.write(
encoder.encode(
JSON.stringify({
type: 'messageEnd',
messageId: aiMessageId,
}) + '\n',
),
);
writer.close();
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
})
.execute();
});
stream.on('error', (data) => {
const parsedData = JSON.parse(data);
writer.write(
encoder.encode(
JSON.stringify({
type: 'error',
data: parsedData.data,
}),
),
);
writer.close();
});
};
const handleHistorySave = async (
message: Message,
humanMessageId: string,
focusMode: string,
files: string[],
) => {
const chat = await db.query.chats.findFirst({
where: eq(chats.id, message.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: message.chatId,
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: message.content,
chatId: message.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gt(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, message.chatId),
),
)
.execute();
}
};
export const POST = async (req: Request) => {
try {
const body = (await req.json()) as Body;
const { message } = body;
if (message.content === '') {
return Response.json(
{
message: 'Please provide a message to process',
},
{ status: 400 },
);
}
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.name || Object.keys(chatModelProvider)[0]
];
const embeddingProvider =
embeddingModelProviders[
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
];
const embeddingModel =
embeddingProvider[
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
];
let llm: BaseChatModel | undefined;
let embedding = embeddingModel.model;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
if (!embedding) {
return Response.json(
{ error: 'Invalid embedding model' },
{ status: 400 },
);
}
const humanMessageId =
message.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const handler = searchHandlers[body.focusMode];
if (!handler) {
return Response.json(
{
message: 'Invalid focus mode',
},
{ status: 400 },
);
}
const stream = await handler.searchAndAnswer(
message.content,
history,
llm,
embedding,
body.optimizationMode,
body.files,
);
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId);
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
return new Response(responseStream.readable, {
headers: {
'Content-Type': 'text/event-stream',
Connection: 'keep-alive',
'Cache-Control': 'no-cache, no-transform',
},
});
} catch (err) {
console.error('An error ocurred while processing chat request:', err);
return Response.json(
{ message: 'An error ocurred while processing chat request' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,69 @@
import db from '@/lib/db';
import { chats, messages } from '@/lib/db/schema';
import { eq } from 'drizzle-orm';
export const GET = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, id),
});
return Response.json(
{
chat: chatExists,
messages: chatMessages,
},
{ status: 200 },
);
} catch (err) {
console.error('Error in getting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};
export const DELETE = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
await db.delete(chats).where(eq(chats.id, id)).execute();
await db.delete(messages).where(eq(messages.chatId, id)).execute();
return Response.json(
{ message: 'Chat deleted successfully' },
{ status: 200 },
);
} catch (err) {
console.error('Error in deleting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,15 @@
import db from '@/lib/db';
export const GET = async (req: Request) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return Response.json({ chats: chats }, { status: 200 });
} catch (err) {
console.error('Error in getting chats: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@ -1,26 +1,22 @@
import express from 'express';
import {
getAnthropicApiKey,
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
getGeminiApiKey,
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
updateConfig,
} from '@/lib/config';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import {
getGroqApiKey,
getOllamaApiEndpoint,
getAnthropicApiKey,
getGeminiApiKey,
getOpenaiApiKey,
updateConfig,
getCustomOpenaiApiUrl,
getCustomOpenaiApiKey,
getCustomOpenaiModelName,
} from '../config';
import logger from '../utils/logger';
} from '@/lib/providers';
const router = express.Router();
router.get('/', async (_, res) => {
export const GET = async (req: Request) => {
try {
const config = {};
const config: Record<string, any> = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
@ -61,44 +57,53 @@ router.get('/', async (_, res) => {
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
res.status(200).json(config);
} catch (err: any) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error getting config: ${err.message}`);
return Response.json({ ...config }, { status: 200 });
} catch (err) {
console.error('An error ocurred while getting config:', err);
return Response.json(
{ message: 'An error ocurred while getting config' },
{ status: 500 },
);
}
});
};
router.post('/', async (req, res) => {
const config = req.body;
export const POST = async (req: Request) => {
try {
const config = await req.json();
const updatedConfig = {
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
const updatedConfig = {
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
};
};
updateConfig(updatedConfig);
updateConfig(updatedConfig);
res.status(200).json({ message: 'Config updated' });
});
export default router;
return Response.json({ message: 'Config updated' }, { status: 200 });
} catch (err) {
console.error('An error ocurred while updating config:', err);
return Response.json(
{ message: 'An error ocurred while updating config' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,61 @@
import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
export const GET = async (req: Request) => {
try {
const data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
).results;
}),
])
)
.map((result) => result)
.flat()
.sort(() => Math.random() - 0.5);
return Response.json(
{
blogs: data,
},
{
status: 200,
},
);
} catch (err) {
console.error(`An error ocurred in discover route: ${err}`);
return Response.json(
{
message: 'An error has occurred',
},
{
status: 500,
},
);
}
};

View File

@ -0,0 +1,83 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const images = await handleImageSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ images }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching images: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching images' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,47 @@
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete (chatModelProviders[provider][model] as { model?: unknown })
.model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete (embeddingModelProviders[provider][model] as { model?: unknown })
.model;
});
});
return Response.json(
{
chatModelProviders,
embeddingModelProviders,
},
{
status: 200,
},
);
} catch (err) {
console.error('An error ocurred while fetching models', err);
return Response.json(
{
message: 'An error has occurred.',
},
{
status: 500,
},
);
}
};

View File

@ -1,33 +1,29 @@
import express from 'express';
import logger from '../utils/logger';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import { ChatOpenAI } from '@langchain/openai';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import { searchHandlers } from '../websocket/messageHandler';
} from '@/lib/providers';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '../search/metaSearchAgent';
import { MetaSearchAgentType } from '@/lib/search/metaSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
interface chatModel {
provider: string;
model: string;
name: string;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
}
interface embeddingModel {
provider: string;
model: string;
name: string;
}
interface ChatRequestBody {
@ -39,27 +35,24 @@ interface ChatRequestBody {
history: Array<[string, string]>;
}
router.post('/', async (req, res) => {
export const POST = async (req: Request) => {
try {
const body: ChatRequestBody = req.body;
const body: ChatRequestBody = await req.json();
if (!body.focusMode || !body.query) {
return res.status(400).json({ message: 'Missing focus mode or query' });
return Response.json(
{ message: 'Missing focus mode or query' },
{ status: 400 },
);
}
body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
return msg[0] === 'human'
? new HumanMessage({ content: msg[1] })
: new AIMessage({ content: msg[1] });
});
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
@ -70,13 +63,13 @@ router.post('/', async (req, res) => {
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
body.chatModel?.name ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
const embeddingModel =
body.embeddingModel?.model ||
body.embeddingModel?.name ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
@ -84,7 +77,7 @@ router.post('/', async (req, res) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
@ -111,13 +104,16 @@ router.post('/', async (req, res) => {
}
if (!llm || !embeddings) {
return res.status(400).json({ message: 'Invalid model selected' });
return Response.json(
{ message: 'Invalid model selected' },
{ status: 400 },
);
}
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {
return res.status(400).json({ message: 'Invalid focus mode' });
return Response.json({ message: 'Invalid focus mode' }, { status: 400 });
}
const emitter = await searchHandler.searchAndAnswer(
@ -129,30 +125,45 @@ router.post('/', async (req, res) => {
[],
);
let message = '';
let sources = [];
return new Promise(
(
resolve: (value: Response) => void,
reject: (value: Response) => void,
) => {
let message = '';
let sources: any[] = [];
emitter.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
message += parsedData.data;
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
}
});
emitter.on('data', (data) => {
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
message += parsedData.data;
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
}
} catch (error) {
reject(
Response.json({ message: 'Error parsing data' }, { status: 500 }),
);
}
});
emitter.on('end', () => {
res.status(200).json({ message, sources });
});
emitter.on('end', () => {
resolve(Response.json({ message, sources }, { status: 200 }));
});
emitter.on('error', (data) => {
const parsedData = JSON.parse(data);
res.status(500).json({ message: parsedData.data });
});
emitter.on('error', (error) => {
reject(
Response.json({ message: 'Search error', error }, { status: 500 }),
);
});
},
);
} catch (err: any) {
logger.error(`Error in getting search results: ${err.message}`);
res.status(500).json({ message: 'An error has occurred.' });
console.error(`Error in getting search results: ${err.message}`);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
});
export default router;
};

View File

@ -0,0 +1,81 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsGenerationBody {
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
},
llm,
);
return Response.json({ suggestions }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while generating suggestions: ${err}`);
return Response.json(
{ message: 'An error ocurred while generating suggestions' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,134 @@
import { NextResponse } from 'next/server';
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import { getAvailableEmbeddingModelProviders } from '@/lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
interface FileRes {
fileName: string;
fileExtension: string;
fileId: string;
}
const uploadDir = path.join(process.cwd(), 'uploads');
if (!fs.existsSync(uploadDir)) {
fs.mkdirSync(uploadDir, { recursive: true });
}
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
export async function POST(req: Request) {
try {
const formData = await req.formData();
const files = formData.getAll('files') as File[];
const embedding_model = formData.get('embedding_model');
const embedding_model_provider = formData.get('embedding_model_provider');
if (!embedding_model || !embedding_model_provider) {
return NextResponse.json(
{ message: 'Missing embedding model or provider' },
{ status: 400 },
);
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel =
embedding_model ?? Object.keys(embeddingModels[provider as string])[0];
let embeddingsModel =
embeddingModels[provider as string]?.[embeddingModel as string]?.model;
if (!embeddingsModel) {
return NextResponse.json(
{ message: 'Invalid embedding model selected' },
{ status: 400 },
);
}
const processedFiles: FileRes[] = [];
await Promise.all(
files.map(async (file: any) => {
const fileExtension = file.name.split('.').pop();
if (!['pdf', 'docx', 'txt'].includes(fileExtension!)) {
return NextResponse.json(
{ message: 'File type not supported' },
{ status: 400 },
);
}
const uniqueFileName = `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`;
const filePath = path.join(uploadDir, uniqueFileName);
const buffer = Buffer.from(await file.arrayBuffer());
fs.writeFileSync(filePath, new Uint8Array(buffer));
let docs: any[] = [];
if (fileExtension === 'pdf') {
const loader = new PDFLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'docx') {
const loader = new DocxLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'txt') {
const text = fs.readFileSync(filePath, 'utf-8');
docs = [
new Document({ pageContent: text, metadata: { title: file.name } }),
];
}
const splitted = await splitter.splitDocuments(docs);
const extractedDataPath = filePath.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(
extractedDataPath,
JSON.stringify({
title: file.name,
contents: splitted.map((doc) => doc.pageContent),
}),
);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsDataPath = filePath.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(
embeddingsDataPath,
JSON.stringify({
title: file.name,
embeddings,
}),
);
processedFiles.push({
fileName: file.name,
fileExtension: fileExtension,
fileId: uniqueFileName.replace(/\.\w+$/, ''),
});
}),
);
return NextResponse.json({
files: processedFiles,
});
} catch (error) {
console.error('Error uploading file:', error);
return NextResponse.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
}

View File

@ -0,0 +1,83 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ videos }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching videos: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching videos' },
{ status: 500 },
);
}
};

View File

@ -0,0 +1,9 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
};
export default Page;

View File

@ -19,7 +19,7 @@ const Page = () => {
useEffect(() => {
const fetchData = async () => {
try {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, {
const res = await fetch(`/api/discover`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

@ -21,7 +21,7 @@ const Page = () => {
const fetchChats = async () => {
setLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats`, {
const res = await fetch(`/api/chats`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

View File

@ -116,7 +116,7 @@ const Page = () => {
useEffect(() => {
const fetchConfig = async () => {
setIsLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
const res = await fetch(`/api/config`, {
headers: {
'Content-Type': 'application/json',
},
@ -187,16 +187,13 @@ const Page = () => {
[key]: value,
} as SettingsType;
const response = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/config`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(updatedConfig),
const response = await fetch(`/api/config`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify(updatedConfig),
});
if (!response.ok) {
throw new Error('Failed to update config');
@ -208,7 +205,7 @@ const Page = () => {
key.toLowerCase().includes('api') ||
key.toLowerCase().includes('url')
) {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
const res = await fetch(`/api/config`, {
headers: {
'Content-Type': 'application/json',
},

View File

@ -48,11 +48,17 @@ const Chat = ({
});
useEffect(() => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
};
if (messages.length === 1) {
document.title = `${messages[0].content.substring(0, 30)} - Perplexica`;
}
if (messages[messages.length - 1]?.role == 'user') {
scroll();
}
}, [messages]);
return (

View File

@ -29,280 +29,154 @@ export interface File {
fileId: string;
}
const useSocket = (
url: string,
setIsWSReady: (ready: boolean) => void,
setError: (error: boolean) => void,
interface ChatModelProvider {
name: string;
provider: string;
}
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => {
const wsRef = useRef<WebSocket | null>(null);
const reconnectTimeoutRef = useRef<NodeJS.Timeout>();
const retryCountRef = useRef(0);
const isCleaningUpRef = useRef(false);
const MAX_RETRIES = 3;
const INITIAL_BACKOFF = 1000; // 1 second
const isConnectionErrorRef = useRef(false);
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const getBackoffDelay = (retryCount: number) => {
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000); // Cap at 10 seconds
};
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
useEffect(() => {
const connectWs = async () => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
}
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem(
'embeddingModelProvider',
);
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/models`,
{
headers: {
'Content-Type': 'application/json',
},
},
).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return toast.error('No chat models available');
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
const wsURL = new URL(url);
const searchParams = new URLSearchParams({});
searchParams.append('chatModel', chatModel!);
searchParams.append('chatModelProvider', chatModelProvider);
if (chatModelProvider === 'custom_openai') {
searchParams.append(
'openAIApiKey',
localStorage.getItem('openAIApiKey')!,
);
searchParams.append(
'openAIBaseURL',
localStorage.getItem('openAIBaseURL')!,
);
}
searchParams.append('embeddingModel', embeddingModel!);
searchParams.append('embeddingModelProvider', embeddingModelProvider);
wsURL.search = searchParams.toString();
const ws = new WebSocket(wsURL.toString());
wsRef.current = ws;
const timeoutId = setTimeout(() => {
if (ws.readyState !== 1) {
toast.error(
'Failed to connect to the server. Please try again later.',
);
}
}, 10000);
ws.addEventListener('message', (e) => {
const data = JSON.parse(e.data);
if (data.type === 'signal' && data.data === 'open') {
const interval = setInterval(() => {
if (ws.readyState === 1) {
setIsWSReady(true);
setError(false);
if (retryCountRef.current > 0) {
toast.success('Connection restored.');
}
retryCountRef.current = 0;
clearInterval(interval);
}
}, 5);
clearTimeout(timeoutId);
console.debug(new Date(), 'ws:connected');
}
if (data.type === 'error') {
isConnectionErrorRef.current = true;
setError(true);
toast.error(data.data);
}
});
ws.onerror = () => {
clearTimeout(timeoutId);
setIsWSReady(false);
toast.error('WebSocket connection error.');
};
ws.onclose = () => {
clearTimeout(timeoutId);
setIsWSReady(false);
console.debug(new Date(), 'ws:disconnected');
if (!isCleaningUpRef.current && !isConnectionErrorRef.current) {
toast.error('Connection lost. Attempting to reconnect...');
attemptReconnect();
}
};
} catch (error) {
console.debug(new Date(), 'ws:error', error);
setIsWSReady(false);
attemptReconnect();
}
};
const attemptReconnect = () => {
retryCountRef.current += 1;
if (retryCountRef.current > MAX_RETRIES) {
console.debug(new Date(), 'ws:max_retries');
setError(true);
toast.error(
'Unable to connect to server after multiple attempts. Please refresh the page to try again.',
);
return;
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
const backoffDelay = getBackoffDelay(retryCountRef.current);
console.debug(
new Date(),
`ws:retry attempt=${retryCountRef.current}/${MAX_RETRIES} delay=${backoffDelay}ms`,
);
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
reconnectTimeoutRef.current = setTimeout(() => {
connectWs();
}, backoffDelay);
};
connectWs();
return () => {
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
isCleaningUpRef.current = true;
console.debug(new Date(), 'ws:cleanup');
}
};
}, [url, setIsWSReady, setError]);
return wsRef.current;
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
};
const loadMessages = async (
@ -315,15 +189,12 @@ const loadMessages = async (
setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void,
) => {
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
const res = await fetch(`/api/chats/${chatId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
);
});
if (res.status === 404) {
setNotFound(true);
@ -368,22 +239,37 @@ const loadMessages = async (
const ChatWindow = ({ id }: { id?: string }) => {
const searchParams = useSearchParams();
const initialMessage = searchParams?.get('q');
const initialMessage = searchParams.get('q');
const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false);
const [isWSReady, setIsWSReady] = useState(false);
const ws = useSocket(
process.env.NEXT_PUBLIC_WS_URL === 'auto'
? `${window.location.protocol === 'https:' ? 'wss:' : 'ws:'}//${window.location.host}/ws`
: process.env.NEXT_PUBLIC_WS_URL!,
setIsWSReady,
setHasError,
);
useEffect(() => {
checkConfig(
setChatModelProvider,
setEmbeddingModelProvider,
setIsConfigReady,
setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false);
@ -401,8 +287,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [notFound, setNotFound] = useState(false);
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
useEffect(() => {
if (
chatId &&
@ -428,16 +312,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
return () => {
if (ws?.readyState === 1) {
ws.close();
console.debug(new Date(), 'ws:cleanup');
}
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const messagesRef = useRef<Message[]>([]);
useEffect(() => {
@ -445,18 +319,18 @@ const ChatWindow = ({ id }: { id?: string }) => {
}, [messages]);
useEffect(() => {
if (isMessagesLoaded && isWSReady) {
if (isMessagesLoaded && isConfigReady) {
setIsReady(true);
console.debug(new Date(), 'app:ready');
} else {
setIsReady(false);
}
}, [isMessagesLoaded, isWSReady]);
}, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => {
if (loading) return;
if (!ws || ws.readyState !== WebSocket.OPEN) {
toast.error('Cannot send message while disconnected');
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
return;
}
@ -469,21 +343,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
ws.send(
JSON.stringify({
type: 'message',
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: [...chatHistory, ['human', message]],
}),
);
setMessages((prevMessages) => [
...prevMessages,
{
@ -495,9 +354,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
},
]);
const messageHandler = async (e: MessageEvent) => {
const data = JSON.parse(e.data);
const messageHandler = async (data: any) => {
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
@ -560,11 +417,25 @@ const ChatWindow = ({ id }: { id?: string }) => {
['assistant', recievedMessage],
]);
ws?.removeEventListener('message', messageHandler);
setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
}
if (
lastMsg.role === 'assistant' &&
lastMsg.sources &&
@ -581,21 +452,62 @@ const ChatWindow = ({ id }: { id?: string }) => {
}),
);
}
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document.getElementById('search-images')?.click();
}
if (autoVideoSearch === 'true') {
document.getElementById('search-videos')?.click();
}
}
};
ws?.addEventListener('message', messageHandler);
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
};
const rewrite = (messageId: string) => {
@ -616,11 +528,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
};
useEffect(() => {
if (isReady && initialMessage && ws?.readyState === 1) {
if (isReady && initialMessage && isConfigReady) {
sendMessage(initialMessage);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [ws?.readyState, isReady, initialMessage, isWSReady]);
}, [isConfigReady, isReady, initialMessage]);
if (hasError) {
return (

View File

@ -29,15 +29,12 @@ const DeleteChat = ({
const handleDelete = async () => {
setLoading(true);
try {
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
{
method: 'DELETE',
headers: {
'Content-Type': 'application/json',
},
const res = await fetch(`/api/chats/${chatId}`, {
method: 'DELETE',
headers: {
'Content-Type': 'application/json',
},
);
});
if (res.status != 200) {
throw new Error('Failed to delete chat');

View File

@ -12,13 +12,18 @@ import {
Layers3,
Plus,
} from 'lucide-react';
import Markdown from 'markdown-to-jsx';
import Markdown, { MarkdownToJSX } from 'markdown-to-jsx';
import Copy from './MessageActions/Copy';
import Rewrite from './MessageActions/Rewrite';
import MessageSources from './MessageSources';
import SearchImages from './SearchImages';
import SearchVideos from './SearchVideos';
import { useSpeech } from 'react-text-to-speech';
import ThinkBox from './ThinkBox';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
return <ThinkBox content={children as string} />;
};
const MessageBox = ({
message,
@ -44,31 +49,58 @@ const MessageBox = ({
useEffect(() => {
const regex = /\[(\d+)\]/g;
let processedMessage = message.content;
if (message.role === 'assistant' && message.content.includes('<think>')) {
const openThinkTag = processedMessage.match(/<think>/g)?.length || 0;
const closeThinkTag = processedMessage.match(/<\/think>/g)?.length || 0;
if (openThinkTag > closeThinkTag) {
processedMessage += '</think> <a> </a>'; // The extra <a> </a> is to prevent the the think component from looking bad
}
}
if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length > 0
) {
return setParsedMessage(
message.content.replace(
setParsedMessage(
processedMessage.replace(
regex,
(_, number) =>
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
`<a href="${
message.sources?.[number - 1]?.metadata?.url
}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
),
);
return;
}
setSpeechMessage(message.content.replace(regex, ''));
setParsedMessage(message.content);
setParsedMessage(processedMessage);
}, [message.content, message.sources, message.role]);
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
const markdownOverrides: MarkdownToJSX.Options = {
overrides: {
think: {
component: ThinkTagProcessor,
},
},
};
return (
<div>
{message.role === 'user' && (
<div className={cn('w-full', messageIndex === 0 ? 'pt-16' : 'pt-8', 'break-words')}>
<div
className={cn(
'w-full',
messageIndex === 0 ? 'pt-16' : 'pt-8',
'break-words',
)}
>
<h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12">
{message.content}
</h2>
@ -105,11 +137,13 @@ const MessageBox = ({
Answer
</h3>
</div>
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white',
)}
options={markdownOverrides}
>
{parsedMessage}
</Markdown>
@ -187,10 +221,12 @@ const MessageBox = ({
<SearchImages
query={history[messageIndex - 1].content}
chatHistory={history.slice(0, messageIndex - 1)}
messageId={message.messageId}
/>
<SearchVideos
chatHistory={history.slice(0, messageIndex - 1)}
query={history[messageIndex - 1].content}
messageId={message.messageId}
/>
</div>
</div>

View File

@ -41,7 +41,7 @@ const Attach = ({
data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
const res = await fetch(`/api/uploads`, {
method: 'POST',
body: data,
});

View File

@ -39,7 +39,7 @@ const AttachSmall = ({
data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
const res = await fetch(`/api/uploads`, {
method: 'POST',
body: data,
});

View File

@ -45,25 +45,13 @@ const focusModes = [
key: 'youtubeSearch',
title: 'Youtube',
description: 'Search and watch videos',
icon: (
<SiYoutube
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
icon: <SiYoutube className="h-5 w-auto mr-0.5" />,
},
{
key: 'redditSearch',
title: 'Reddit',
description: 'Search for discussions and opinions',
icon: (
<SiReddit
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
icon: <SiReddit className="h-5 w-auto mr-0.5" />,
},
];

View File

@ -69,11 +69,15 @@ const MessageSources = ({ sources }: { sources: Document[] }) => {
<div className="flex flex-row items-center space-x-1">
{sources.slice(3, 6).map((source, i) => {
return source.metadata.url === 'File' ? (
<div className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full">
<div
key={i}
className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full"
>
<File size={12} className="text-white/70" />
</div>
) : (
<img
key={i}
src={`https://s2.googleusercontent.com/s2/favicons?domain_url=${source.metadata.url}`}
width={16}
height={16}

View File

@ -14,9 +14,11 @@ type Image = {
const SearchImages = ({
query,
chatHistory,
messageId,
}: {
query: string;
chatHistory: Message[];
messageId: string;
}) => {
const [images, setImages] = useState<Image[] | null>(null);
const [loading, setLoading] = useState(false);
@ -27,7 +29,7 @@ const SearchImages = ({
<>
{!loading && images === null && (
<button
id="search-images"
id={`search-images-${messageId}`}
onClick={async () => {
setLoading(true);
@ -37,27 +39,24 @@ const SearchImages = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/images`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
const res = await fetch(`/api/images`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
});
const data = await res.json();

View File

@ -27,9 +27,11 @@ declare module 'yet-another-react-lightbox' {
const Searchvideos = ({
query,
chatHistory,
messageId,
}: {
query: string;
chatHistory: Message[];
messageId: string;
}) => {
const [videos, setVideos] = useState<Video[] | null>(null);
const [loading, setLoading] = useState(false);
@ -42,7 +44,7 @@ const Searchvideos = ({
<>
{!loading && videos === null && (
<button
id="search-videos"
id={`search-videos-${messageId}`}
onClick={async () => {
setLoading(true);
@ -52,27 +54,24 @@ const Searchvideos = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
const res = await fetch(`/api/videos`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
});
const data = await res.json();

View File

@ -16,8 +16,6 @@ const VerticalIconContainer = ({ children }: { children: ReactNode }) => {
const Sidebar = ({ children }: { children: React.ReactNode }) => {
const segments = useSelectedLayoutSegments();
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
const navLinks = [
{
icon: Home,

View File

@ -0,0 +1,43 @@
'use client';
import { useState } from 'react';
import { cn } from '@/lib/utils';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps {
content: string;
}
const ThinkBox = ({ content }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false);
return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">
<button
onClick={() => setIsExpanded(!isExpanded)}
className="w-full flex items-center justify-between px-4 py-1 text-black/90 dark:text-white/90 hover:bg-light-200 dark:hover:bg-dark-200 transition duration-200"
>
<div className="flex items-center space-x-2">
<BrainCircuit
size={20}
className="text-[#9C27B0] dark:text-[#CE93D8]"
/>
<p className="font-medium text-sm">Thinking Process</p>
</div>
{isExpanded ? (
<ChevronUp size={18} className="text-black/70 dark:text-white/70" />
) : (
<ChevronDown size={18} className="text-black/70 dark:text-white/70" />
)}
</button>
{isExpanded && (
<div className="px-4 py-3 text-black/80 dark:text-white/80 text-sm border-t border-light-200 dark:border-dark-200 bg-light-100/50 dark:bg-dark-100/50 whitespace-pre-wrap">
{content}
</div>
)}
</div>
);
};
export default ThinkBox;

View File

@ -1,161 +0,0 @@
import fs from 'fs';
import path from 'path';
import toml from '@iarna/toml';
const configFileName = 'config.toml';
interface Config {
GENERAL: {
PORT: number;
SIMILARITY_MEASURE: string;
KEEP_ALIVE: string;
};
MODELS: {
OPENAI: {
API_KEY: string;
};
GROQ: {
API_KEY: string;
};
ANTHROPIC: {
API_KEY: string;
};
GEMINI: {
API_KEY: string;
};
OLLAMA: {
API_URL: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
MODEL_NAME: string;
};
};
API_ENDPOINTS: {
SEARXNG: string;
};
}
type RecursivePartial<T> = {
[P in keyof T]?: RecursivePartial<T[P]>;
};
const loadConfig = () => {
try {
return toml.parse(
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
) as any as Config;
} catch (error) {
// Return default config if file doesn't exist
return {
GENERAL: {
PORT: 3001,
SIMILARITY_MEASURE: 'cosine',
KEEP_ALIVE: '5m',
},
MODELS: {
OPENAI: {
API_KEY: '',
},
GROQ: {
API_KEY: '',
},
ANTHROPIC: {
API_KEY: '',
},
GEMINI: {
API_KEY: '',
},
OLLAMA: {
API_URL: '',
},
CUSTOM_OPENAI: {
API_URL: '',
API_KEY: '',
MODEL_NAME: '',
},
},
API_ENDPOINTS: {
SEARXNG: '',
},
};
}
};
export const getPort = () =>
process.env.PORT ? parseInt(process.env.PORT, 10) : loadConfig().GENERAL.PORT;
export const getSimilarityMeasure = () =>
process.env.SIMILARITY_MEASURE || loadConfig().GENERAL.SIMILARITY_MEASURE;
export const getKeepAlive = () =>
process.env.KEEP_ALIVE || loadConfig().GENERAL.KEEP_ALIVE;
export const getOpenaiApiKey = () =>
process.env.OPENAI_API_KEY || loadConfig().MODELS.OPENAI.API_KEY;
export const getGroqApiKey = () =>
process.env.GROQ_API_KEY || loadConfig().MODELS.GROQ.API_KEY;
export const getAnthropicApiKey = () =>
process.env.ANTHROPIC_API_KEY || loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () =>
process.env.GEMINI_API_KEY || loadConfig().MODELS.GEMINI.API_KEY;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
export const getOllamaApiEndpoint = () =>
process.env.OLLAMA_API_URL || loadConfig().MODELS.OLLAMA.API_URL;
export const getCustomOpenaiApiKey = () =>
process.env.CUSTOM_OPENAI_API_KEY || loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
export const getCustomOpenaiApiUrl = () =>
process.env.CUSTOM_OPENAI_API_URL || loadConfig().MODELS.CUSTOM_OPENAI.API_URL;
export const getCustomOpenaiModelName = () =>
process.env.CUSTOM_OPENAI_MODEL_NAME || loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
}
if (typeof current !== 'object' || current === null) {
return update;
}
const result = { ...current };
for (const key in update) {
if (Object.prototype.hasOwnProperty.call(update, key)) {
const updateValue = update[key];
if (
typeof updateValue === 'object' &&
updateValue !== null &&
typeof result[key] === 'object' &&
result[key] !== null
) {
result[key] = mergeConfigs(result[key], updateValue);
} else if (updateValue !== undefined) {
result[key] = updateValue;
}
}
}
return result;
};
export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(__dirname, `../${configFileName}`),
toml.stringify(mergedConfig),
);
};

View File

@ -7,7 +7,7 @@ export const getSuggestions = async (chatHisory: Message[]) => {
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/suggestions`, {
const res = await fetch(`/api/suggestions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',

View File

@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
@ -36,6 +36,12 @@ type ImageSearchChainInput = {
query: string;
};
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
@ -52,11 +58,13 @@ const createImageSearchChain = (llm: BaseChatModel) => {
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images = [];
const images: ImageSearchResult[] = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {

View File

@ -1,5 +1,5 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser';
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
@ -8,7 +8,7 @@ import { ChatOpenAI } from '@langchain/openai';
const suggestionGeneratorPrompt = `
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:

View File

@ -7,26 +7,26 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
@ -36,6 +36,13 @@ type VideoSearchChainInput = {
query: string;
};
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
@ -52,11 +59,13 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos = [];
const videos: VideoSearchResult[] = [];
res.results.forEach((result) => {
if (

113
src/lib/config.ts Normal file
View File

@ -0,0 +1,113 @@
import fs from 'fs';
import path from 'path';
import toml from '@iarna/toml';
const configFileName = 'config.toml';
interface Config {
GENERAL: {
SIMILARITY_MEASURE: string;
KEEP_ALIVE: string;
};
MODELS: {
OPENAI: {
API_KEY: string;
};
GROQ: {
API_KEY: string;
};
ANTHROPIC: {
API_KEY: string;
};
GEMINI: {
API_KEY: string;
};
OLLAMA: {
API_URL: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
MODEL_NAME: string;
};
};
API_ENDPOINTS: {
SEARXNG: string;
};
}
type RecursivePartial<T> = {
[P in keyof T]?: RecursivePartial<T[P]>;
};
const loadConfig = () =>
toml.parse(
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE;
export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE;
export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY;
export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY;
export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
export const getCustomOpenaiApiUrl = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_URL;
export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
}
if (typeof current !== 'object' || current === null) {
return update;
}
const result = { ...current };
for (const key in update) {
if (Object.prototype.hasOwnProperty.call(update, key)) {
const updateValue = update[key];
if (
typeof updateValue === 'object' &&
updateValue !== null &&
typeof result[key] === 'object' &&
result[key] !== null
) {
result[key] = mergeConfigs(result[key], updateValue);
} else if (updateValue !== undefined) {
result[key] = updateValue;
}
}
}
return result;
};
export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig),
);
};

View File

@ -1,8 +1,9 @@
import { drizzle } from 'drizzle-orm/better-sqlite3';
import Database from 'better-sqlite3';
import * as schema from './schema';
import path from 'path';
const sqlite = new Database('data/db.sqlite');
const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite'));
const db = drizzle(sqlite, {
schema: schema,
});

View File

@ -28,7 +28,7 @@ export class HuggingFaceTransformersEmbeddings
timeout?: number;
private pipelinePromise: Promise<any>;
private pipelinePromise: Promise<any> | undefined;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});

View File

@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
constructor(args?: LineOutputParserArgs) {
super();
this.key = args.key ?? this.key;
this.key = args?.key ?? this.key;
}
static lc_name() {

View File

@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
constructor(args?: LineListOutputParserArgs) {
super();
this.key = args.key ?? this.key;
this.key = args?.key ?? this.key;
}
static lc_name() {

View File

@ -50,7 +50,7 @@ export const academicSearchResponsePrompt = `
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Academic', this means you will be searching for academic papers and articles on the web.
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -50,7 +50,7 @@ export const redditSearchResponsePrompt = `
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -50,7 +50,7 @@ export const wolframAlphaSearchResponsePrompt = `
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -1,5 +1,5 @@
export const writingAssistantPrompt = `
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Writing Assistant', this means you will be helping the user write a response to a given query.
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Writing Assistant', this means you will be helping the user write a response to a given query.
Since you are a writing assistant, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode.
You will be shared a context that can contain information from files user has uploaded to get answers from. You will have to generate answers upon that.

View File

@ -50,7 +50,7 @@ export const youtubeSearchResponsePrompt = `
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcrip
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -1,6 +1,38 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { getAnthropicApiKey } from '../../config';
import logger from '../../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey();
@ -8,52 +40,25 @@ export const loadAnthropicChatModels = async () => {
if (!anthropicApiKey) return {};
try {
const chatModels = {
'claude-3-5-sonnet-20241022': {
displayName: 'Claude 3.5 Sonnet',
model: new ChatAnthropic({
const chatModels: Record<string, ChatModel> = {};
anthropicChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-sonnet-20241022',
}),
},
'claude-3-5-haiku-20241022': {
displayName: 'Claude 3.5 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-haiku-20241022',
}),
},
'claude-3-opus-20240229': {
displayName: 'Claude 3 Opus',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-opus-20240229',
}),
},
'claude-3-sonnet-20240229': {
displayName: 'Claude 3 Sonnet',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-sonnet-20240229',
}),
},
'claude-3-haiku-20240307': {
displayName: 'Claude 3 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-haiku-20240307',
}),
},
};
configuration: {
baseURL: 'https://api.anthropic.com/v1/',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Anthropic models: ${err}`);
console.error(`Error loading Anthropic models: ${err}`);
return {};
}
};

View File

@ -1,9 +1,42 @@
import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../../config';
import logger from '../../utils/logger';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Pro Experimental',
key: 'gemini-2.0-pro-exp-02-05',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Gemini Embedding',
key: 'gemini-embedding-exp',
},
];
export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey();
@ -11,75 +44,53 @@ export const loadGeminiChatModels = async () => {
if (!geminiApiKey) return {};
try {
const chatModels = {
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash',
const chatModels: Record<string, ChatModel> = {};
geminiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: geminiApiKey,
modelName: model.key,
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-flash-8b': {
displayName: 'Gemini 1.5 Flash 8B',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash-8b',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-exp': {
displayName: 'Gemini 2.0 Flash Exp',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-exp',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-thinking-exp-01-21': {
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
};
configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Gemini models: ${err}`);
console.error(`Error loading Gemini models: ${err}`);
return {};
}
};
export const loadGeminiEmbeddingsModels = async () => {
export const loadGeminiEmbeddingModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
try {
const embeddingModels = {
'text-embedding-004': {
displayName: 'Text Embedding',
model: new GoogleGenerativeAIEmbeddings({
apiKey: geminiApiKey,
modelName: 'text-embedding-004',
}),
},
};
const embeddingModels: Record<string, EmbeddingModel> = {};
geminiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey: geminiApiKey,
modelName: model.key,
configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
}) as unknown as Embeddings,
};
});
return embeddingModels;
} catch (err) {
logger.error(`Error loading Gemini embeddings model: ${err}`);
console.error(`Error loading OpenAI embeddings models: ${err}`);
return {};
}
};

View File

@ -1,6 +1,78 @@
import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../../config';
import logger from '../../utils/logger';
import { getGroqApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
];
export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey();
@ -8,129 +80,25 @@ export const loadGroqChatModels = async () => {
if (!groqApiKey) return {};
try {
const chatModels = {
'llama-3.3-70b-versatile': {
displayName: 'Llama 3.3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.3-70b-versatile',
temperature: 0.7,
},
{
const chatModels: Record<string, ChatModel> = {};
groqChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-3b-preview': {
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-vision-preview': {
displayName: 'Llama 3.2 11B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-vision-preview': {
displayName: 'Llama 3.2 90B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-8b-instant': {
displayName: 'Llama 3.1 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.1-8b-instant',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-8b-8192': {
displayName: 'LLaMA3 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-70b-8192': {
displayName: 'LLaMA3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'mixtral-8x7b-32768': {
displayName: 'Mixtral 8x7B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'gemma2-9b-it': {
displayName: 'Gemma2 9B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma2-9b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
};
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Groq models: ${err}`);
console.error(`Error loading Groq models: ${err}`);
return {};
}
};

View File

@ -1,33 +1,51 @@
import { loadGroqChatModels } from './groq';
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../../config';
} from '../config';
import { ChatOpenAI } from '@langchain/openai';
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
import { loadGroqChatModels } from './groq';
import { loadAnthropicChatModels } from './anthropic';
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
import { loadTransformersEmbeddingsModels } from './transformers';
const chatModelProviders = {
export interface ChatModel {
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
string,
() => Promise<Record<string, ChatModel>>
> = {
openai: loadOpenAIChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels,
groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
};
const embeddingModelProviders = {
openai: loadOpenAIEmbeddingsModels,
local: loadTransformersEmbeddingsModels,
ollama: loadOllamaEmbeddingsModels,
gemini: loadGeminiEmbeddingsModels,
export const embeddingModelProviders: Record<
string,
() => Promise<Record<string, EmbeddingModel>>
> = {
openai: loadOpenAIEmbeddingModels,
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels,
};
export const getAvailableChatModelProviders = async () => {
const models = {};
const models: Record<string, Record<string, ChatModel>> = {};
for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider]();
@ -52,7 +70,7 @@ export const getAvailableChatModelProviders = async () => {
configuration: {
baseURL: customOpenAiApiUrl,
},
}),
}) as unknown as BaseChatModel,
},
}
: {}),
@ -62,7 +80,7 @@ export const getAvailableChatModelProviders = async () => {
};
export const getAvailableEmbeddingModelProviders = async () => {
const models = {};
const models: Record<string, Record<string, EmbeddingModel>> = {};
for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider]();

View File

@ -1,74 +1,73 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
export const loadOllamaChatModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint();
const keepAlive = getKeepAlive();
const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {};
if (!ollamaApiEndpoint) return {};
try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models: ollamaModels } = response.data;
const { models } = res.data;
const chatModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
const chatModels: Record<string, ChatModel> = {};
models.forEach((model: any) => {
chatModels[model.model] = {
displayName: model.name,
model: new ChatOllama({
baseUrl: ollamaEndpoint,
baseUrl: ollamaApiEndpoint,
model: model.model,
temperature: 0.7,
keepAlive: keepAlive,
keepAlive: getKeepAlive(),
}),
};
return acc;
}, {});
});
return chatModels;
} catch (err) {
logger.error(`Error loading Ollama models: ${err}`);
console.error(`Error loading Ollama models: ${err}`);
return {};
}
};
export const loadOllamaEmbeddingsModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint();
export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {};
if (!ollamaApiEndpoint) return {};
try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models: ollamaModels } = response.data;
const { models } = res.data;
const embeddingsModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model: any) => {
embeddingModels[model.model] = {
displayName: model.name,
model: new OllamaEmbeddings({
baseUrl: ollamaEndpoint,
baseUrl: ollamaApiEndpoint,
model: model.model,
}),
};
});
return acc;
}, {});
return embeddingsModels;
return embeddingModels;
} catch (err) {
logger.error(`Error loading Ollama embeddings model: ${err}`);
console.error(`Error loading Ollama embeddings models: ${err}`);
return {};
}
};

View File

@ -1,89 +1,90 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../../config';
import logger from '../../utils/logger';
import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
export const loadOpenAIChatModels = async () => {
const openAIApiKey = getOpenaiApiKey();
const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {};
if (!openaiApiKey) return {};
try {
const chatModels = {
'gpt-3.5-turbo': {
displayName: 'GPT-3.5 Turbo',
const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-3.5-turbo',
openAIApiKey: openaiApiKey,
modelName: model.key,
temperature: 0.7,
}),
},
'gpt-4': {
displayName: 'GPT-4',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
},
'gpt-4-turbo': {
displayName: 'GPT-4 turbo',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
},
'gpt-4o': {
displayName: 'GPT-4 omni',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o',
temperature: 0.7,
}),
},
'gpt-4o-mini': {
displayName: 'GPT-4 omni mini',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o-mini',
temperature: 0.7,
}),
},
};
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading OpenAI models: ${err}`);
console.error(`Error loading OpenAI models: ${err}`);
return {};
}
};
export const loadOpenAIEmbeddingsModels = async () => {
const openAIApiKey = getOpenaiApiKey();
export const loadOpenAIEmbeddingModels = async () => {
const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {};
if (!openaiApiKey) return {};
try {
const embeddingModels = {
'text-embedding-3-small': {
displayName: 'Text Embedding 3 Small',
const embeddingModels: Record<string, EmbeddingModel> = {};
openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-small',
}),
},
'text-embedding-3-large': {
displayName: 'Text Embedding 3 Large',
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
},
};
openAIApiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};
});
return embeddingModels;
} catch (err) {
logger.error(`Error loading OpenAI embeddings model: ${err}`);
console.error(`Error loading OpenAI embeddings models: ${err}`);
return {};
}
};

Some files were not shown because too many files have changed in this diff Show More