Compare commits

..

8 Commits

Author SHA1 Message Date
ItzCrazyKns
a401e67d87 Merge pull request #666 from AnotiaWang/admin-password
feat: sync changes from master branch
2025-03-08 20:08:59 +05:30
AnotiaWang
d95849e538 fix: missing MODEL_NAME in config sample 2025-03-07 23:54:53 +08:00
AnotiaWang
ec5e5b3893 Merge branch 'master' into admin-password 2025-03-07 23:54:47 +08:00
ItzCrazyKns
639fbd7a15 feat(chat-window): lint & beautify 2024-08-02 19:37:20 +05:30
ItzCrazyKns
a88104434d feat(copilot): respect preferences 2024-08-02 19:36:50 +05:30
ItzCrazyKns
a1e0d368c6 feat(settings): add preferences 2024-08-02 19:36:39 +05:30
ItzCrazyKns
5779701b7d feat(sidebar): respect preferences 2024-08-02 19:35:57 +05:30
ItzCrazyKns
fdfe8d1f41 feat(app): add password auth for settings 2024-08-02 19:32:38 +05:30
142 changed files with 9110 additions and 21165 deletions

View File

@@ -1,94 +0,0 @@
# GitHub Copilot Instructions for Perplexica
This file provides context and guidance for GitHub Copilot when working with the Perplexica codebase.
## Project Overview
Perplexica is an open-source AI-powered search engine that uses advanced machine learning to provide intelligent search results. It combines web search capabilities with LLM-based processing to understand and answer user questions, similar to Perplexity AI but fully open source.
## Key Components
- **Frontend**: Next.js application with React components (in `/src/components` and `/src/app`)
- **Backend Logic**: Node.js backend with API routes (in `/src/app/api`) and library code (in `/src/lib`)
- **Search Engine**: Uses SearXNG as a metadata search engine
- **LLM Integration**: Supports multiple models including OpenAI, Anthropic, Groq, Ollama (local models)
- **Database**: SQLite database managed with Drizzle ORM
## Architecture
The system works through these main steps:
- User submits a query
- The system determines if web search is needed
- If needed, it searches the web using SearXNG
- Results are ranked using embedding-based similarity search
- LLMs are used to generate a comprehensive response with cited sources
## Key Technologies
- **Frontend**: React, Next.js, Tailwind CSS
- **Backend**: Node.js
- **Database**: SQLite with Drizzle ORM
- **AI/ML**: LangChain for orchestration, various LLM providers
- **Search**: SearXNG integration
- **Embedding Models**: For re-ranking search results
## Project Structure
- `/src/app`: Next.js app directory with page components and API routes
- `/src/components`: Reusable UI components
- `/src/lib`: Backend functionality
- `/lib/search`: Search functionality and meta search agent
- `/lib/db`: Database schema and operations
- `/lib/providers`: LLM and embedding model integrations
- `/lib/prompts`: Prompt templates for LLMs
- `/lib/chains`: LangChain chains for various operations
## Focus Modes
Perplexica supports multiple specialized search modes:
- All Mode: General web search
- Local Research Mode: Research and interact with local files with citations
- Chat Mode: Have a creative conversation
- Academic Search Mode: For academic research
- YouTube Search Mode: For video content
- Wolfram Alpha Search Mode: For calculations and data analysis
- Reddit Search Mode: For community discussions
## Development Workflow
- Use `npm run dev` for local development
- Format code with `npm run format:write` before committing
- Database migrations: `npm run db:push`
- Build for production: `npm run build`
- Start production server: `npm run start`
## Configuration
The application uses a `config.toml` file (created from `sample.config.toml`) for configuration, including:
- API keys for various LLM providers
- Database settings
- Search engine configuration
- Similarity measure settings
## Common Tasks
When working on this codebase, you might need to:
- Add new API endpoints in `/src/app/api`
- Modify UI components in `/src/components`
- Extend search functionality in `/src/lib/search`
- Add new LLM providers in `/src/lib/providers`
- Update database schema in `/src/lib/db/schema.ts`
- Create new prompt templates in `/src/lib/prompts`
- Build new chains in `/src/lib/chains`
## AI Behavior
- Avoid conciliatory language
- It is not necessary to apologize
- If you don't know the answer, ask for clarification
- Do not add additional packages or dependencies unless explicitly requested
- Only make changes to the code that are relevant to the task at hand

View File

@@ -8,12 +8,18 @@ on:
types: [published]
jobs:
build-amd64:
build-and-push:
runs-on: ubuntu-latest
strategy:
matrix:
service: [backend, app]
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
@@ -30,109 +36,38 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push AMD64 Docker image
- name: Build and push Docker image for ${{ matrix.service }}
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:amd64 \
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:amd64 \
-t itzcrazykns1337/${IMAGE_NAME}:main \
--push .
- name: Build and push AMD64 release Docker image
- name: Build and push release Docker image for ${{ matrix.service }}
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--push .
build-arm64:
runs-on: ubuntu-24.04-arm
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push ARM64 Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:arm64 \
--push .
- name: Build and push ARM64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--push .
manifest:
needs: [build-amd64, build-arm64]
runs-on: ubuntu-latest
steps:
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Create and push multi-arch manifest for main
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:main \
--amend itzcrazykns1337/${IMAGE_NAME}:amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:main
- name: Create and push multi-arch manifest for releases
if: github.event_name == 'release'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}

6
.gitignore vendored
View File

@@ -4,9 +4,9 @@ npm-debug.log
yarn-error.log
# Build output
.next/
out/
dist/
/.next/
/out/
/dist/
# IDE/Editor specific
.vscode/

View File

@@ -6,6 +6,7 @@ const config = {
endOfLine: 'auto',
singleQuote: true,
tabWidth: 2,
semi: true,
};
module.exports = config;

View File

@@ -1,43 +1,32 @@
# How to Contribute to Perplexica
Thanks for your interest in contributing to Perplexica! Your help makes this project better. This guide explains how to contribute effectively.
Perplexica is a modern AI chat application with advanced search capabilities.
Hey there, thanks for deciding to contribute to Perplexica. Anything you help with will support the development of Perplexica and will make it better. Let's walk you through the key aspects to ensure your contributions are effective and in harmony with the project's setup.
## Project Structure
Perplexica's codebase is organized as follows:
Perplexica's design consists of two main domains:
- **UI Components and Pages**:
- **Components (`src/components`)**: Reusable UI components.
- **Pages and Routes (`src/app`)**: Next.js app directory structure with page components.
- Main app routes include: home (`/`), chat (`/c`), discover (`/discover`), library (`/library`), and settings (`/settings`).
- **API Routes (`src/app/api`)**: API endpoints implemented with Next.js API routes.
- `/api/chat`: Handles chat interactions.
- `/api/search`: Provides direct access to Perplexica's search capabilities.
- Other endpoints for models, files, and suggestions.
- **Backend Logic (`src/lib`)**: Contains all the backend functionality including search, database, and API logic.
- The search functionality is present inside `src/lib/search` directory.
- All of the focus modes are implemented using the Meta Search Agent class in `src/lib/search/metaSearchAgent.ts`.
- Database functionality is in `src/lib/db`.
- Chat model and embedding model providers are managed in `src/lib/providers`.
- Prompt templates and LLM chain definitions are in `src/lib/prompts` and `src/lib/chains` respectively.
## API Documentation
Perplexica exposes several API endpoints for programmatic access, including:
- **Search API**: Access Perplexica's advanced search capabilities directly via the `/api/search` endpoint. For detailed documentation, see `docs/api/search.md`.
- **Frontend (`ui` directory)**: This is a Next.js application holding all user interface components. It's a self-contained environment that manages everything the user interacts with.
- **Backend (root and `src` directory)**: The backend logic is situated in the `src` folder, but the root directory holds the main `package.json` for backend dependency management.
- All of the focus modes are created using the Meta Search Agent class present in `src/search/metaSearchAgent.ts`. The main logic behind Perplexica lies there.
## Setting Up Your Environment
Before diving into coding, setting up your local environment is key. Here's what you need to do:
### Backend
1. In the root directory, locate the `sample.config.toml` file.
2. Rename it to `config.toml` and fill in the necessary configuration fields.
3. Run `npm install` to install all dependencies.
4. Run `npm run db:push` to set up the local sqlite database.
5. Use `npm run dev` to start the application in development mode.
2. Rename it to `config.toml` and fill in the necessary configuration fields specific to the backend.
3. Run `npm install` to install dependencies.
4. Run `npm run db:push` to set up the local sqlite.
5. Use `npm run dev` to start the backend in development mode.
### Frontend
1. Navigate to the `ui` folder and repeat the process of renaming `.env.example` to `.env`, making sure to provide the frontend-specific variables.
2. Execute `npm install` within the `ui` directory to get the frontend dependencies ready.
3. Launch the frontend development server with `npm run dev`.
**Please note**: Docker configurations are present for setting up production environments, whereas `npm run dev` is used for development purposes.

View File

@@ -1,5 +1,21 @@
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
<div align="center" markdown="1">
<sup>Special thanks to:</sup>
<br>
<br>
<a href="https://www.warp.dev/perplexica">
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/775dd593-9b5f-40f1-bf48-479faff4c27b">
</a>
### [Warp, the AI Devtool that lives in your terminal](https://www.warp.dev/perplexica)
[Available for MacOS, Linux, & Windows](https://www.warp.dev/perplexica)
</div>
<hr/>
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?)
@@ -41,10 +57,9 @@ Want to know more about its architecture and how it works? You can read it [here
- **Two Main Modes:**
- **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page.
- **Normal Mode:** Processes your query and performs a web search.
- **Focus Modes:** Special modes to better answer specific types of questions. Perplexica currently has 7 focus modes:
- **Focus Modes:** Special modes to better answer specific types of questions. Perplexica currently has 6 focus modes:
- **All Mode:** Searches the entire web to find the best results.
- **Local Research Mode:** Research and interact with local files with citations.
- **Chat Mode:** Have a truly creative conversation without web search.
- **Writing Assistant Mode:** Helpful for writing tasks that do not require searching the web.
- **Academic Search Mode:** Finds articles and papers, ideal for academic research.
- **YouTube Search Mode:** Finds YouTube videos based on the search query.
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
@@ -94,13 +109,14 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
1. Install SearXNG and allow `JSON` format in the SearXNG settings.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm rum start`
3. Rename the `.env.example` file to `.env` in the `ui` folder and fill in all necessary fields.
4. After populating the configuration and environment files, run `npm i` in both the `ui` folder and the root directory.
5. Install the dependencies and then execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like exposing it your network, etc.
### Ollama Connection Errors
@@ -138,47 +154,12 @@ For more details, check out the full documentation [here](https://github.com/Itz
## Expose Perplexica to network
Perplexica runs on Next.js and handles all API requests. It works right away on the same network and stays accessible even with port forwarding.
### Running Behind a Reverse Proxy
When running Perplexica behind a reverse proxy (like Nginx, Apache, or Traefik), follow these steps to ensure proper functionality:
1. **Configure the BASE_URL setting**:
- In `config.toml`, set the `BASE_URL` parameter under the `[GENERAL]` section to your public-facing URL (e.g., `https://perplexica.yourdomain.com`)
2. **Ensure proper headers forwarding**:
- Your reverse proxy should forward the following headers:
- `X-Forwarded-Host`
- `X-Forwarded-Proto`
- `X-Forwarded-Port` (if using non-standard ports)
3. **Example Nginx configuration**:
```nginx
server {
listen 80;
server_name perplexica.yourdomain.com;
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
}
}
```
This ensures that OpenSearch descriptions, browser integrations, and all URLs work properly when accessing Perplexica through your reverse proxy.
You can access Perplexica over your home network by following our networking guide [here](https://github.com/ItzCrazyKns/Perplexica/blob/master/docs/installation/NETWORKING.md).
## One-Click Deployment
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
[![Run on ClawCloud](https://raw.githubusercontent.com/ClawCloud/Run-Template/refs/heads/main/Run-on-ClawCloud.svg)](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
## Upcoming Features

View File

@@ -1,27 +1,15 @@
FROM node:20.18.0-slim AS builder
FROM node:20.18.0-alpine
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
WORKDIR /home/perplexica
COPY package.json yarn.lock ./
RUN yarn install --frozen-lockfile --network-timeout 600000
COPY ui /home/perplexica/
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
RUN mkdir -p /home/perplexica/data
RUN yarn install --frozen-lockfile
RUN yarn build
FROM node:20.18.0-slim
WORKDIR /home/perplexica
COPY --from=builder /home/perplexica/public ./public
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data
RUN mkdir /home/perplexica/uploads
CMD ["node", "server.js"]
CMD ["yarn", "start"]

17
backend.dockerfile Normal file
View File

@@ -0,0 +1,17 @@
FROM node:18-slim
WORKDIR /home/perplexica
COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/
COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn build
CMD ["yarn", "start"]

View File

@@ -9,21 +9,41 @@ services:
- perplexica-network
restart: unless-stopped
app:
image: itzcrazykns1337/perplexica:main
perplexica-backend:
build:
context: .
dockerfile: app.dockerfile
dockerfile: backend.dockerfile
image: itzcrazykns1337/perplexica-backend:main
environment:
- SEARXNG_API_URL=http://searxng:8080
depends_on:
- searxng
ports:
- 3000:3000
networks:
- perplexica-network
- 3001:3001
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- perplexica-network
restart: unless-stopped
perplexica-frontend:
build:
context: .
dockerfile: app.dockerfile
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
ports:
- 3000:3000
networks:
- perplexica-network
restart: unless-stopped
networks:

View File

@@ -6,9 +6,9 @@ Perplexicas Search API makes it easy to use our AI-powered search engine. You
## Endpoint
### **POST** `http://localhost:3000/api/search`
### **POST** `http://localhost:3001/api/search`
**Note**: Replace `3000` with any other port if you've changed the default PORT
**Note**: Replace `3001` with any other port if you've changed the default PORT
### Request
@@ -20,11 +20,11 @@ The API accepts a JSON object in the request body, where you define the focus mo
{
"chatModel": {
"provider": "openai",
"name": "gpt-4o-mini"
"model": "gpt-4o-mini"
},
"embeddingModel": {
"provider": "openai",
"name": "text-embedding-3-large"
"model": "text-embedding-3-large"
},
"optimizationMode": "speed",
"focusMode": "webSearch",
@@ -32,30 +32,28 @@ The API accepts a JSON object in the request body, where you define the focus mo
"history": [
["human", "Hi, how are you?"],
["assistant", "I am doing well, how can I help you today?"]
],
"systemInstructions": "Focus on providing technical details about Perplexica's architecture.",
"stream": false
]
}
```
### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `name`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- Optional fields for custom OpenAI configuration:
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- `provider`: The provider for the embedding model (e.g., `openai`).
- `name`: The specific embedding model (e.g., `text-embedding-3-large`).
- `model`: The specific embedding model (e.g., `text-embedding-3-large`).
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:
- `webSearch`, `academicSearch`, `localResearch`, `chat`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`.
- `webSearch`, `academicSearch`, `writingAssistant`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`.
- **`optimizationMode`** (string, optional): Specifies the optimization mode to control the balance between performance and quality. Available modes:
@@ -64,8 +62,6 @@ The API accepts a JSON object in the request body, where you define the focus mo
- **`query`** (string, required): The search query or question.
- **`systemInstructions`** (string, optional): Custom instructions provided by the user to guide the AI's response. These instructions are treated as user preferences and have lower priority than the system's core instructions. For example, you can specify a particular writing style, format, or focus area.
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
```json
@@ -75,13 +71,11 @@ The API accepts a JSON object in the request body, where you define the focus mo
]
```
- **`stream`** (boolean, optional): When set to `true`, enables streaming responses. Default is `false`.
### Response
The response from the API includes both the final message and the sources used to generate that message.
#### Standard Response (stream: false)
#### Example Response
```json
{
@@ -106,28 +100,6 @@ The response from the API includes both the final message and the sources used t
}
```
#### Streaming Response (stream: true)
When streaming is enabled, the API returns a stream of newline-delimited JSON objects. Each line contains a complete, valid JSON object. The response has Content-Type: application/json.
Example of streamed response objects:
```
{"type":"init","data":"Stream connected"}
{"type":"sources","data":[{"pageContent":"...","metadata":{"title":"...","url":"..."}},...]}
{"type":"response","data":"Perplexica is an "}
{"type":"response","data":"innovative, open-source "}
{"type":"response","data":"AI-powered search engine..."}
{"type":"done"}
```
Clients should process each line as a separate JSON object. The different message types include:
- **`init`**: Initial connection message
- **`sources`**: All sources used for the response
- **`response`**: Chunks of the generated answer text
- **`done`**: Indicates the stream is complete
### Fields in the Response
- **`message`** (string): The search result, generated based on the query and focus mode.

View File

@@ -4,7 +4,7 @@ Curious about how Perplexica works? Don't worry, we'll cover it here. Before we
We'll understand how Perplexica works by taking an example of a scenario where a user asks: "How does an A.C. work?". We'll break down the process into steps to make it easier to understand. The steps are as follows:
1. The message is sent to the `/api/chat` route where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
1. The message is sent via WS to the backend server where it invokes the chain. The chain will depend on your focus mode. For this example, let's assume we use the "webSearch" focus mode.
2. The chain is now invoked; first, the message is passed to another chain where it first predicts (using the chat history and the question) whether there is a need for sources and searching the web. If there is, it will generate a query (in accordance with the chat history) for searching the web that we'll take up later. If not, the chain will end there, and then the answer generator chain, also known as the response generator, will be started.
3. The query returned by the first chain is passed to SearXNG to search the web for information.
4. After the information is retrieved, it is based on keyword-based search. We then convert the information into embeddings and the query as well, then we perform a similarity search to find the most relevant sources to answer the query.

View File

@@ -0,0 +1,109 @@
# Expose Perplexica to a network
This guide will show you how to make Perplexica available over a network. Follow these steps to allow computers on the same network to interact with Perplexica. Choose the instructions that match the operating system you are using.
## Windows
1. Open PowerShell as Administrator
2. Navigate to the directory containing the `docker-compose.yaml` file
3. Stop and remove the existing Perplexica containers and images:
```bash
docker compose down --rmi all
```
4. Open the `docker-compose.yaml` file in a text editor like Notepad++
5. Replace `127.0.0.1` with the IP address of the server Perplexica is running on in these two lines:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and close the `docker-compose.yaml` file
7. Rebuild and restart the Perplexica container:
```bash
docker compose up -d --build
```
## macOS
1. Open the Terminal application
2. Navigate to the directory with the `docker-compose.yaml` file:
```bash
cd /path/to/docker-compose.yaml
```
3. Stop and remove existing containers and images:
```bash
docker compose down --rmi all
```
4. Open `docker-compose.yaml` in a text editor like Sublime Text:
```bash
nano docker-compose.yaml
```
5. Replace `127.0.0.1` with the server IP in these lines:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and exit the editor
7. Rebuild and restart Perplexica:
```bash
docker compose up -d --build
```
## Linux
1. Open the terminal
2. Navigate to the `docker-compose.yaml` directory:
```bash
cd /path/to/docker-compose.yaml
```
3. Stop and remove containers and images:
```bash
docker compose down --rmi all
```
4. Edit `docker-compose.yaml`:
```bash
nano docker-compose.yaml
```
5. Replace `127.0.0.1` with the server IP:
```bash
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
```
6. Save and exit the editor
7. Rebuild and restart Perplexica:
```bash
docker compose up -d --build
```

View File

@@ -39,8 +39,11 @@ To update Perplexica to the latest version, follow these steps:
2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm rum start`
4. Execute `npm i` in both the `ui` folder and the root directory.
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
---

View File

@@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
export default defineConfig({
dialect: 'sqlite',
schema: './src/lib/db/schema.ts',
schema: './src/db/schema.ts',
out: './drizzle',
dbCredentials: {
url: './data/db.sqlite',

5
next-env.d.ts vendored
View File

@@ -1,5 +0,0 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

11860
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,68 +1,53 @@
{
"name": "perplexica-frontend",
"version": "1.10.2",
"name": "perplexica-backend",
"version": "1.10.0-rc3",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
"dev": "next dev --turbopack",
"build": "npm run db:push && next build",
"start": "next start",
"lint": "next lint",
"format:write": "prettier . --write",
"db:push": "drizzle-kit push"
},
"dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/google-genai": "^0.1.12",
"@langchain/ollama": "^0.2.0",
"@langchain/openai": "^0.0.25",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"@types/react-syntax-highlighter": "^15.5.13",
"@xenova/transformers": "^2.17.2",
"axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
"compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0",
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"langchain": "^0.1.30",
"lucide-react": "^0.363.0",
"markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"pdf-parse": "^1.1.1",
"react": "^18",
"react-dom": "^18",
"react-syntax-highlighter": "^15.6.1",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4"
"start": "npm run db:push && node dist/app.js",
"build": "tsc",
"dev": "nodemon --ignore uploads/ src/app.ts ",
"db:push": "drizzle-kit push sqlite",
"format": "prettier . --check",
"format:write": "prettier . --write"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/better-sqlite3": "^7.6.10",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/html-to-text": "^9.0.4",
"@types/node": "^20",
"@types/multer": "^1.4.12",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"drizzle-kit": "^0.30.5",
"eslint": "^8",
"eslint-config-next": "14.1.4",
"postcss": "^8",
"@types/readable-stream": "^4.0.11",
"@types/ws": "^8.5.12",
"drizzle-kit": "^0.22.7",
"nodemon": "^3.1.0",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"typescript": "^5"
"ts-node": "^10.9.2",
"typescript": "^5.4.3"
},
"dependencies": {
"@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3",
"@langchain/community": "^0.2.16",
"@langchain/openai": "^0.0.25",
"@langchain/google-genai": "^0.0.23",
"@xenova/transformers": "^2.17.1",
"axios": "^1.6.8",
"better-sqlite3": "^11.0.0",
"compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0",
"cors": "^2.8.5",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.31.2",
"express": "^4.19.2",
"html-to-text": "^9.0.5",
"langchain": "^0.1.30",
"mammoth": "^1.8.0",
"multer": "^1.4.5-lts.1",
"pdf-parse": "^1.1.1",
"winston": "^3.13.0",
"ws": "^8.17.1",
"zod": "^3.22.4"
}
}

View File

@@ -1,7 +1,11 @@
[GENERAL]
PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
CONFIG_PASSWORD = "lorem_ipsum" # Password to access config
DISCOVER_ENABLED = true
LIBRARY_ENABLED = true
COPILOT_ENABLED = true
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
BASE_URL = "" # Optional. When set, overrides detected URL for OpenSearch and other public URLs
[MODELS.OPENAI]
API_KEY = ""
@@ -23,11 +27,5 @@ MODEL_NAME = ""
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK]
API_KEY = ""
[MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
[API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768
SEARXNG = "http://localhost:32768" # SearxNG API URL

38
src/app.ts Normal file
View File

@@ -0,0 +1,38 @@
import { startWebSocketServer } from './websocket';
import express from 'express';
import cors from 'cors';
import http from 'http';
import routes from './routes';
import { getPort } from './config';
import logger from './utils/logger';
const port = getPort();
const app = express();
const server = http.createServer(app);
const corsOptions = {
origin: '*',
};
app.use(cors(corsOptions));
app.use(express.json());
app.use('/api', routes);
app.get('/api', (_, res) => {
res.status(200).json({ status: 'ok' });
});
server.listen(port, () => {
logger.info(`Server is running on port ${port}`);
});
startWebSocketServer(server);
process.on('uncaughtException', (err, origin) => {
logger.error(`Uncaught Exception at ${origin}: ${err}`);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
});

View File

@@ -1,360 +0,0 @@
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import db from '@/lib/db';
import { chats, messages as messagesSchema } from '@/lib/db/schema';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import { searchHandlers } from '@/lib/search';
import { getFileDetails } from '@/lib/utils/files';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOllama } from '@langchain/ollama';
import { ChatOpenAI } from '@langchain/openai';
import crypto from 'crypto';
import { and, eq, gte } from 'drizzle-orm';
import { EventEmitter } from 'stream';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
type Message = {
messageId: string;
chatId: string;
content: string;
};
type ChatModel = {
provider: string;
name: string;
ollamaContextWindow?: number;
};
type EmbeddingModel = {
provider: string;
name: string;
};
type Body = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
systemInstructions: string;
};
type ModelStats = {
modelName: string;
responseTime?: number;
};
const handleEmitterEvents = async (
stream: EventEmitter,
writer: WritableStreamDefaultWriter,
encoder: TextEncoder,
aiMessageId: string,
chatId: string,
startTime: number,
) => {
let recievedMessage = '';
let sources: any[] = [];
let searchQuery: string | undefined;
let searchUrl: string | undefined;
stream.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
// Capture the search query if available
if (parsedData.searchQuery) {
searchQuery = parsedData.searchQuery;
}
if (parsedData.searchUrl) {
searchUrl = parsedData.searchUrl;
}
writer.write(
encoder.encode(
JSON.stringify({
type: 'sources',
data: parsedData.data,
searchQuery: parsedData.searchQuery,
messageId: aiMessageId,
searchUrl: searchUrl,
}) + '\n',
),
);
sources = parsedData.data;
}
});
let modelStats: ModelStats = {
modelName: '',
};
stream.on('stats', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'modelStats') {
modelStats = parsedData.data;
}
});
stream.on('end', () => {
const endTime = Date.now();
const duration = endTime - startTime;
modelStats = {
...modelStats,
responseTime: duration,
};
writer.write(
encoder.encode(
JSON.stringify({
type: 'messageEnd',
messageId: aiMessageId,
modelStats: modelStats,
searchQuery: searchQuery,
searchUrl: searchUrl,
}) + '\n',
),
);
writer.close();
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
...(searchQuery && { searchQuery }),
modelStats: modelStats,
...(searchUrl && { searchUrl }),
}),
})
.execute();
});
stream.on('error', (data) => {
const parsedData = JSON.parse(data);
writer.write(
encoder.encode(
JSON.stringify({
type: 'error',
data: parsedData.data,
}),
),
);
writer.close();
});
};
const handleHistorySave = async (
message: Message,
humanMessageId: string,
focusMode: string,
files: string[],
) => {
const chat = await db.query.chats.findFirst({
where: eq(chats.id, message.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: message.chatId,
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: message.content,
chatId: message.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gte(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, message.chatId),
),
)
.execute();
}
};
export const POST = async (req: Request) => {
try {
const startTime = Date.now();
const body = (await req.json()) as Body;
const { message } = body;
if (message.content === '') {
return Response.json(
{
message: 'Please provide a message to process',
},
{ status: 400 },
);
}
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.name || Object.keys(chatModelProvider)[0]
];
const embeddingProvider =
embeddingModelProviders[
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
];
const embeddingModel =
embeddingProvider[
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
];
let llm: BaseChatModel | undefined;
let embedding = embeddingModel.model;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
// Set context window size for Ollama models
if (llm instanceof ChatOllama && body.chatModel?.provider === 'ollama') {
llm.numCtx = body.chatModel.ollamaContextWindow || 2048;
}
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
if (!embedding) {
return Response.json(
{ error: 'Invalid embedding model' },
{ status: 400 },
);
}
const humanMessageId =
message.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const handler = searchHandlers[body.focusMode];
if (!handler) {
return Response.json(
{
message: 'Invalid focus mode',
},
{ status: 400 },
);
}
const stream = await handler.searchAndAnswer(
message.content,
history,
llm,
embedding,
body.optimizationMode,
body.files,
body.systemInstructions,
);
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
handleEmitterEvents(
stream,
writer,
encoder,
aiMessageId,
message.chatId,
startTime,
);
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
return new Response(responseStream.readable, {
headers: {
'Content-Type': 'text/event-stream',
Connection: 'keep-alive',
'Cache-Control': 'no-cache, no-transform',
},
});
} catch (err) {
console.error('An error occurred while processing chat request:', err);
return Response.json(
{ message: 'An error occurred while processing chat request' },
{ status: 500 },
);
}
};

View File

@@ -1,69 +0,0 @@
import db from '@/lib/db';
import { chats, messages } from '@/lib/db/schema';
import { eq } from 'drizzle-orm';
export const GET = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, id),
});
return Response.json(
{
chat: chatExists,
messages: chatMessages,
},
{ status: 200 },
);
} catch (err) {
console.error('Error in getting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};
export const DELETE = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
await db.delete(chats).where(eq(chats.id, id)).execute();
await db.delete(messages).where(eq(messages.chatId, id)).execute();
return Response.json(
{ message: 'Chat deleted successfully' },
{ status: 200 },
);
} catch (err) {
console.error('Error in deleting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@@ -1,15 +0,0 @@
import db from '@/lib/db';
export const GET = async (req: Request) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return Response.json({ chats: chats }, { status: 200 });
} catch (err) {
console.error('Error in getting chats: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@@ -1,121 +0,0 @@
import {
getAnthropicApiKey,
getBaseUrl,
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
getGeminiApiKey,
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
getDeepseekApiKey,
getLMStudioApiEndpoint,
updateConfig,
} from '@/lib/config';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const config: Record<string, any> = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
config['chatModelProviders'] = {};
config['embeddingModelProviders'] = {};
for (const provider in chatModelProviders) {
config['chatModelProviders'][provider] = Object.keys(
chatModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: chatModelProviders[provider][model].displayName,
};
});
}
for (const provider in embeddingModelProviders) {
config['embeddingModelProviders'][provider] = Object.keys(
embeddingModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: embeddingModelProviders[provider][model].displayName,
};
});
}
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
config['baseUrl'] = getBaseUrl();
return Response.json({ ...config }, { status: 200 });
} catch (err) {
console.error('An error occurred while getting config:', err);
return Response.json(
{ message: 'An error occurred while getting config' },
{ status: 500 },
);
}
};
export const POST = async (req: Request) => {
try {
const config = await req.json();
const updatedConfig = {
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
DEEPSEEK: {
API_KEY: config.deepseekApiKey,
},
LM_STUDIO: {
API_URL: config.lmStudioApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
};
updateConfig(updatedConfig);
return Response.json({ message: 'Config updated' }, { status: 200 });
} catch (err) {
console.error('An error occurred while updating config:', err);
return Response.json(
{ message: 'An error occurred while updating config' },
{ status: 500 },
);
}
};

View File

@@ -1,61 +0,0 @@
import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
export const GET = async (req: Request) => {
try {
const data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
).results;
}),
])
)
.map((result) => result)
.flat()
.sort(() => Math.random() - 0.5);
return Response.json(
{
blogs: data,
},
{
status: 200,
},
);
} catch (err) {
console.error(`An error occurred in discover route: ${err}`);
return Response.json(
{
message: 'An error has occurred',
},
{
status: 500,
},
);
}
};

View File

@@ -1,89 +0,0 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOllama } from '@langchain/ollama';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
ollamaContextWindow?: number;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
// Set context window size for Ollama models
if (llm instanceof ChatOllama && body.chatModel?.provider === 'ollama') {
llm.numCtx = body.chatModel.ollamaContextWindow || 2048;
}
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const images = await handleImageSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ images }, { status: 200 });
} catch (err) {
console.error(`An error occurred while searching images: ${err}`);
return Response.json(
{ message: 'An error occurred while searching images' },
{ status: 500 },
);
}
};

View File

@@ -1,47 +0,0 @@
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete (chatModelProviders[provider][model] as { model?: unknown })
.model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete (embeddingModelProviders[provider][model] as { model?: unknown })
.model;
});
});
return Response.json(
{
chatModelProviders,
embeddingModelProviders,
},
{
status: 200,
},
);
} catch (err) {
console.error('An error occurred while fetching models', err);
return Response.json(
{
message: 'An error has occurred.',
},
{
status: 500,
},
);
}
};

View File

@@ -1,69 +0,0 @@
import { NextResponse } from 'next/server';
import { getBaseUrl } from '@/lib/config';
/**
* Creates an OpenSearch XML response with the given origin URL
*/
function generateOpenSearchResponse(origin: string): NextResponse {
const opensearchXml = `<?xml version="1.0" encoding="utf-8"?>
<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
<ShortName>Perplexica</ShortName>
<LongName>Search with Perplexica AI</LongName>
<Description>Perplexica is a powerful AI-driven search engine that understands your queries and delivers relevant results.</Description>
<InputEncoding>UTF-8</InputEncoding>
<Image width="16" height="16" type="image/x-icon">${origin}/favicon.ico</Image>
<Url type="text/html" template="${origin}/?q={searchTerms}"/>
<Url type="application/opensearchdescription+xml" rel="self" template="${origin}/api/opensearch"/>
</OpenSearchDescription>`;
return new NextResponse(opensearchXml, {
headers: {
'Content-Type': 'application/opensearchdescription+xml',
},
});
}
export async function GET(request: Request) {
// Check if a BASE_URL is explicitly configured
const configBaseUrl = getBaseUrl();
// If BASE_URL is configured, use it, otherwise detect from request
if (configBaseUrl) {
// Remove any trailing slashes for consistency
let origin = configBaseUrl.replace(/\/+$/, '');
return generateOpenSearchResponse(origin);
}
// Detect the correct origin, taking into account reverse proxy headers
const url = new URL(request.url);
let origin = url.origin;
// Extract headers
const headers = Object.fromEntries(request.headers);
// Check for X-Forwarded-Host and related headers to handle reverse proxies
if (headers['x-forwarded-host']) {
// Determine protocol: prefer X-Forwarded-Proto, fall back to original or https
const protocol =
headers['x-forwarded-proto'] || url.protocol.replace(':', '');
// Build the correct public-facing origin
origin = `${protocol}://${headers['x-forwarded-host']}`;
// Handle non-standard ports if specified in X-Forwarded-Port
if (headers['x-forwarded-port']) {
const port = headers['x-forwarded-port'];
// Don't append standard ports (80 for HTTP, 443 for HTTPS)
if (
!(
(protocol === 'http' && port === '80') ||
(protocol === 'https' && port === '443')
)
) {
origin = `${origin}:${port}`;
}
}
}
// Generate and return the OpenSearch response
return generateOpenSearchResponse(origin);
}

View File

@@ -1,276 +0,0 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import { ChatOpenAI } from '@langchain/openai';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '@/lib/search/metaSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
import { ChatOllama } from '@langchain/ollama';
interface chatModel {
provider: string;
name: string;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
ollamaContextWindow?: number;
}
interface embeddingModel {
provider: string;
name: string;
}
interface ChatRequestBody {
optimizationMode: 'speed' | 'balanced';
focusMode: string;
chatModel?: chatModel;
embeddingModel?: embeddingModel;
query: string;
history: Array<[string, string]>;
stream?: boolean;
systemInstructions?: string;
}
export const POST = async (req: Request) => {
try {
const body: ChatRequestBody = await req.json();
if (!body.focusMode || !body.query) {
return Response.json(
{ message: 'Missing focus mode or query' },
{ status: 400 },
);
}
body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
body.stream = body.stream || false;
const history: BaseMessage[] = body.history.map((msg) => {
return msg[0] === 'human'
? new HumanMessage({ content: msg[1] })
: new AIMessage({ content: msg[1] });
});
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.name ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
const embeddingModel =
body.embeddingModel?.name ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (llm instanceof ChatOllama && body.chatModel?.provider === 'ollama') {
llm.numCtx = body.chatModel.ollamaContextWindow || 2048;
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
return Response.json(
{ message: 'Invalid model selected' },
{ status: 400 },
);
}
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {
return Response.json({ message: 'Invalid focus mode' }, { status: 400 });
}
const emitter = await searchHandler.searchAndAnswer(
body.query,
history,
llm,
embeddings,
body.optimizationMode,
[],
body.systemInstructions || '',
);
if (!body.stream) {
return new Promise(
(
resolve: (value: Response) => void,
reject: (value: Response) => void,
) => {
let message = '';
let sources: any[] = [];
emitter.on('data', (data: string) => {
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
message += parsedData.data;
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
}
} catch (error) {
reject(
Response.json(
{ message: 'Error parsing data' },
{ status: 500 },
),
);
}
});
emitter.on('end', () => {
resolve(Response.json({ message, sources }, { status: 200 }));
});
emitter.on('error', (error: any) => {
reject(
Response.json(
{ message: 'Search error', error },
{ status: 500 },
),
);
});
},
);
}
const encoder = new TextEncoder();
const abortController = new AbortController();
const { signal } = abortController;
const stream = new ReadableStream({
start(controller) {
let sources: any[] = [];
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'init',
data: 'Stream connected',
}) + '\n',
),
);
signal.addEventListener('abort', () => {
emitter.removeAllListeners();
try {
controller.close();
} catch (error) {}
});
emitter.on('data', (data: string) => {
if (signal.aborted) return;
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'response',
data: parsedData.data,
}) + '\n',
),
);
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'sources',
data: sources,
}) + '\n',
),
);
}
} catch (error) {
controller.error(error);
}
});
emitter.on('end', () => {
if (signal.aborted) return;
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'done',
}) + '\n',
),
);
controller.close();
});
emitter.on('error', (error: any) => {
if (signal.aborted) return;
controller.error(error);
});
},
cancel() {
abortController.abort();
},
});
return new Response(stream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache, no-transform',
Connection: 'keep-alive',
},
});
} catch (err: any) {
console.error(`Error in getting search results: ${err.message}`);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@@ -1,87 +0,0 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
import { ChatOllama } from '@langchain/ollama';
interface ChatModel {
provider: string;
model: string;
ollamaContextWindow?: number;
}
interface SuggestionsGenerationBody {
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
// Set context window size for Ollama models
if (llm instanceof ChatOllama && body.chatModel?.provider === 'ollama') {
llm.numCtx = body.chatModel.ollamaContextWindow || 2048;
}
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
},
llm,
);
return Response.json({ suggestions }, { status: 200 });
} catch (err) {
console.error(`An error occurred while generating suggestions: ${err}`);
return Response.json(
{ message: 'An error occurred while generating suggestions' },
{ status: 500 },
);
}
};

View File

@@ -1,134 +0,0 @@
import { NextResponse } from 'next/server';
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import { getAvailableEmbeddingModelProviders } from '@/lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
interface FileRes {
fileName: string;
fileExtension: string;
fileId: string;
}
const uploadDir = path.join(process.cwd(), 'uploads');
if (!fs.existsSync(uploadDir)) {
fs.mkdirSync(uploadDir, { recursive: true });
}
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
export async function POST(req: Request) {
try {
const formData = await req.formData();
const files = formData.getAll('files') as File[];
const embedding_model = formData.get('embedding_model');
const embedding_model_provider = formData.get('embedding_model_provider');
if (!embedding_model || !embedding_model_provider) {
return NextResponse.json(
{ message: 'Missing embedding model or provider' },
{ status: 400 },
);
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel =
embedding_model ?? Object.keys(embeddingModels[provider as string])[0];
let embeddingsModel =
embeddingModels[provider as string]?.[embeddingModel as string]?.model;
if (!embeddingsModel) {
return NextResponse.json(
{ message: 'Invalid embedding model selected' },
{ status: 400 },
);
}
const processedFiles: FileRes[] = [];
await Promise.all(
files.map(async (file: any) => {
const fileExtension = file.name.split('.').pop();
if (!['pdf', 'docx', 'txt'].includes(fileExtension!)) {
return NextResponse.json(
{ message: 'File type not supported' },
{ status: 400 },
);
}
const uniqueFileName = `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`;
const filePath = path.join(uploadDir, uniqueFileName);
const buffer = Buffer.from(await file.arrayBuffer());
fs.writeFileSync(filePath, new Uint8Array(buffer));
let docs: any[] = [];
if (fileExtension === 'pdf') {
const loader = new PDFLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'docx') {
const loader = new DocxLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'txt') {
const text = fs.readFileSync(filePath, 'utf-8');
docs = [
new Document({ pageContent: text, metadata: { title: file.name } }),
];
}
const splitted = await splitter.splitDocuments(docs);
const extractedDataPath = filePath.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(
extractedDataPath,
JSON.stringify({
title: file.name,
contents: splitted.map((doc) => doc.pageContent),
}),
);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsDataPath = filePath.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(
embeddingsDataPath,
JSON.stringify({
title: file.name,
embeddings,
}),
);
processedFiles.push({
fileName: file.name,
fileExtension: fileExtension,
fileId: uniqueFileName.replace(/\.\w+$/, ''),
});
}),
);
return NextResponse.json({
files: processedFiles,
});
} catch (error) {
console.error('Error uploading file:', error);
return NextResponse.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
}

View File

@@ -1,89 +0,0 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOllama } from '@langchain/ollama';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
ollamaContextWindow?: number;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
// Set context window size for Ollama models
if (llm instanceof ChatOllama && body.chatModel?.provider === 'ollama') {
llm.numCtx = body.chatModel.ollamaContextWindow || 2048;
}
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ videos }, { status: 200 });
} catch (err) {
console.error(`An error occurred while searching videos: ${err}`);
return Response.json(
{ message: 'An error occurred while searching videos' },
{ status: 500 },
);
}
};

View File

@@ -1,9 +0,0 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
};
export default Page;

View File

@@ -1,12 +0,0 @@
import { Metadata } from 'next';
import React from 'react';
export const metadata: Metadata = {
title: 'Library - Perplexica',
};
const Layout = ({ children }: { children: React.ReactNode }) => {
return <div>{children}</div>;
};
export default Layout;

View File

@@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import { searchSearxng } from '../lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
@@ -36,12 +36,6 @@ type ImageSearchChainInput = {
query: string;
};
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
@@ -58,13 +52,11 @@ const createImageSearchChain = (llm: BaseChatModel) => {
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images: ImageSearchResult[] = [];
const images = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {

View File

@@ -1,5 +1,5 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
@@ -10,7 +10,6 @@ const suggestionGeneratorPrompt = `
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
If you are a thinking or reasoning AI, you should avoid using \`<suggestions>\` and \`</suggestions>\` tags in your thinking. Those tags should only be used in the final output.
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:

View File

@@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import { searchSearxng } from '../lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
@@ -36,13 +36,6 @@ type VideoSearchChainInput = {
query: string;
};
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
@@ -59,13 +52,11 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos: VideoSearchResult[] = [];
const videos = [];
res.results.forEach((result) => {
if (

View File

@@ -1,241 +0,0 @@
'use client';
import { Fragment, useEffect, useRef, useState } from 'react';
import { File, Message } from './ChatWindow';
import MessageBox from './MessageBox';
import MessageBoxLoading from './MessageBoxLoading';
import MessageInput from './MessageInput';
const Chat = ({
loading,
messages,
sendMessage,
scrollTrigger,
rewrite,
fileIds,
setFileIds,
files,
setFiles,
optimizationMode,
setOptimizationMode,
focusMode,
setFocusMode,
}: {
messages: Message[];
sendMessage: (
message: string,
options?: {
messageId?: string;
rewriteIndex?: number;
suggestions?: string[];
},
) => void;
loading: boolean;
scrollTrigger: number;
rewrite: (messageId: string) => void;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
focusMode: string;
setFocusMode: (mode: string) => void;
}) => {
const [isAtBottom, setIsAtBottom] = useState(true);
const [manuallyScrolledUp, setManuallyScrolledUp] = useState(false);
const [inputStyle, setInputStyle] = useState<React.CSSProperties>({});
const messageEnd = useRef<HTMLDivElement | null>(null);
const containerRef = useRef<HTMLDivElement | null>(null);
const SCROLL_THRESHOLD = 250; // pixels from bottom to consider "at bottom"
// Check if user is at bottom of page
useEffect(() => {
const checkIsAtBottom = () => {
const position = window.innerHeight + window.scrollY;
const height = document.body.scrollHeight;
const atBottom = position >= height - SCROLL_THRESHOLD;
setIsAtBottom(atBottom);
};
// Initial check
checkIsAtBottom();
// Add scroll event listener
window.addEventListener('scroll', checkIsAtBottom);
return () => {
window.removeEventListener('scroll', checkIsAtBottom);
};
}, []);
// Detect wheel and touch events to identify user's scrolling direction
useEffect(() => {
const checkIsAtBottom = () => {
const position = window.innerHeight + window.scrollY;
const height = document.body.scrollHeight;
const atBottom = position >= height - SCROLL_THRESHOLD;
// If user scrolls to bottom, reset the manuallyScrolledUp flag
if (atBottom) {
setManuallyScrolledUp(false);
}
setIsAtBottom(atBottom);
};
const handleWheel = (e: WheelEvent) => {
// Positive deltaY means scrolling down, negative means scrolling up
if (e.deltaY < 0) {
// User is scrolling up
setManuallyScrolledUp(true);
} else if (e.deltaY > 0) {
checkIsAtBottom();
}
};
const handleTouchStart = (e: TouchEvent) => {
// Immediately stop auto-scrolling on any touch interaction
setManuallyScrolledUp(true);
};
// Add event listeners
window.addEventListener('wheel', handleWheel, { passive: true });
window.addEventListener('touchstart', handleTouchStart, { passive: true });
return () => {
window.removeEventListener('wheel', handleWheel);
window.removeEventListener('touchstart', handleTouchStart);
};
}, [isAtBottom]);
// Scroll when user sends a message
useEffect(() => {
const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
};
if (messages.length === 1) {
document.title = `${messages[0].content.substring(0, 30)} - Perplexica`;
}
// Always scroll when user sends a message
if (messages[messages.length - 1]?.role === 'user') {
scroll();
setIsAtBottom(true); // Reset to true when user sends a message
setManuallyScrolledUp(false); // Reset manually scrolled flag when user sends a message
}
}, [messages]);
// Auto-scroll for assistant responses only if user is at bottom and hasn't manually scrolled up
useEffect(() => {
const position = window.innerHeight + window.scrollY;
const height = document.body.scrollHeight;
const atBottom = position >= height - SCROLL_THRESHOLD;
setIsAtBottom(atBottom);
if (isAtBottom && !manuallyScrolledUp && messages.length > 0) {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
}
}, [scrollTrigger, isAtBottom, messages.length, manuallyScrolledUp]);
// Sync input width with main container width
useEffect(() => {
const updateInputStyle = () => {
if (containerRef.current) {
const rect = containerRef.current.getBoundingClientRect();
setInputStyle({
width: rect.width,
left: rect.left,
right: window.innerWidth - rect.right,
});
}
};
// Initial calculation
updateInputStyle();
// Update on resize
window.addEventListener('resize', updateInputStyle);
return () => {
window.removeEventListener('resize', updateInputStyle);
};
}, []);
return (
<div ref={containerRef} className="space-y-6 pt-8 pb-48 sm:mx-4 md:mx-8">
{messages.map((msg, i) => {
const isLast = i === messages.length - 1;
return (
<Fragment key={msg.messageId}>
<MessageBox
key={i}
message={msg}
messageIndex={i}
history={messages}
loading={loading}
isLast={isLast}
rewrite={rewrite}
sendMessage={sendMessage}
/>
{!isLast && msg.role === 'assistant' && (
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
)}
</Fragment>
);
})}
{loading && <MessageBoxLoading />}
<div className="fixed bottom-24 lg:bottom-10 z-40" style={inputStyle}>
{/* Scroll to bottom button - appears above the MessageInput when user has scrolled up */}
{manuallyScrolledUp && !isAtBottom && (
<div className="absolute -top-14 right-2 z-10">
<button
onClick={() => {
setManuallyScrolledUp(false);
setIsAtBottom(true);
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
}}
className="bg-[#24A0ED] text-white hover:bg-opacity-85 transition duration-100 rounded-full px-4 py-2 shadow-lg flex items-center justify-center"
aria-label="Scroll to bottom"
>
<svg
xmlns="http://www.w3.org/2000/svg"
className="h-5 w-5 mr-1"
viewBox="0 0 20 20"
fill="currentColor"
>
<path
fillRule="evenodd"
d="M14.707 12.707a1 1 0 01-1.414 0L10 9.414l-3.293 3.293a1 1 0 01-1.414-1.414l4-4a1 1 0 011.414 0l4 4a1 1 0 010 1.414z"
clipRule="evenodd"
transform="rotate(180 10 10)"
/>
</svg>
<span className="text-sm">Scroll to bottom</span>
</button>
</div>
)}
<MessageInput
firstMessage={messages.length === 0}
loading={loading}
sendMessage={sendMessage}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
focusMode={focusMode}
setFocusMode={setFocusMode}
/>
</div>
<div ref={messageEnd} className="h-0" />
</div>
);
};
export default Chat;

View File

@@ -1,680 +0,0 @@
'use client';
import { useEffect, useRef, useState } from 'react';
import { Document } from '@langchain/core/documents';
import Navbar from './Navbar';
import Chat from './Chat';
import EmptyChat from './EmptyChat';
import crypto from 'crypto';
import { toast } from 'sonner';
import { useSearchParams } from 'next/navigation';
import { getSuggestions } from '@/lib/actions';
import { Settings } from 'lucide-react';
import Link from 'next/link';
import NextError from 'next/error';
export type ModelStats = {
modelName: string;
responseTime?: number;
};
export type Message = {
messageId: string;
chatId: string;
createdAt: Date;
content: string;
role: 'user' | 'assistant';
suggestions?: string[];
sources?: Document[];
modelStats?: ModelStats;
searchQuery?: string;
searchUrl?: string;
};
export interface File {
fileName: string;
fileExtension: string;
fileId: string;
}
interface ChatModelProvider {
name: string;
provider: string;
}
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => {
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
};
const loadMessages = async (
chatId: string,
setMessages: (messages: Message[]) => void,
setIsMessagesLoaded: (loaded: boolean) => void,
setChatHistory: (history: [string, string][]) => void,
setFocusMode: (mode: string) => void,
setNotFound: (notFound: boolean) => void,
setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void,
) => {
const res = await fetch(`/api/chats/${chatId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
if (res.status === 404) {
setNotFound(true);
setIsMessagesLoaded(true);
return;
}
const data = await res.json();
const messages = data.messages.map((msg: any) => {
return {
...msg,
...JSON.parse(msg.metadata),
};
}) as Message[];
setMessages(messages);
const history = messages.map((msg) => {
return [msg.role, msg.content];
}) as [string, string][];
console.debug(new Date(), 'app:messages_loaded');
document.title = messages[0].content;
const files = data.chat.files.map((file: any) => {
return {
fileName: file.name,
fileExtension: file.name.split('.').pop(),
fileId: file.fileId,
};
});
setFiles(files);
setFileIds(files.map((file: File) => file.fileId));
setChatHistory(history);
setFocusMode(data.chat.focusMode);
setIsMessagesLoaded(true);
};
const ChatWindow = ({ id }: { id?: string }) => {
const searchParams = useSearchParams();
const initialMessage = searchParams.get('q');
const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false);
useEffect(() => {
checkConfig(
setChatModelProvider,
setEmbeddingModelProvider,
setIsConfigReady,
setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false);
const [scrollTrigger, setScrollTrigger] = useState(0);
const [chatHistory, setChatHistory] = useState<[string, string][]>([]);
const [messages, setMessages] = useState<Message[]>([]);
const [files, setFiles] = useState<File[]>([]);
const [fileIds, setFileIds] = useState<string[]>([]);
const [focusMode, setFocusMode] = useState('webSearch');
const [optimizationMode, setOptimizationMode] = useState('speed');
const [isMessagesLoaded, setIsMessagesLoaded] = useState(false);
const [notFound, setNotFound] = useState(false);
useEffect(() => {
const savedOptimizationMode = localStorage.getItem('optimizationMode');
if (savedOptimizationMode !== null) {
setOptimizationMode(savedOptimizationMode);
} else {
localStorage.setItem('optimizationMode', optimizationMode);
}
}, []);
useEffect(() => {
if (
chatId &&
!newChatCreated &&
!isMessagesLoaded &&
messages.length === 0
) {
loadMessages(
chatId,
setMessages,
setIsMessagesLoaded,
setChatHistory,
setFocusMode,
setNotFound,
setFiles,
setFileIds,
);
} else if (!chatId) {
setNewChatCreated(true);
setIsMessagesLoaded(true);
setChatId(crypto.randomBytes(20).toString('hex'));
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const messagesRef = useRef<Message[]>([]);
useEffect(() => {
messagesRef.current = messages;
}, [messages]);
useEffect(() => {
if (isMessagesLoaded && isConfigReady) {
setIsReady(true);
console.debug(new Date(), 'app:ready');
} else {
setIsReady(false);
}
}, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (
message: string,
options?: {
messageId?: string;
rewriteIndex?: number;
suggestions?: string[];
},
) => {
setScrollTrigger((x) => (x === 0 ? -1 : 0));
// Special case: If we're just updating an existing message with suggestions
if (options?.suggestions && options.messageId) {
setMessages((prev) =>
prev.map((msg) => {
if (msg.messageId === options.messageId) {
return { ...msg, suggestions: options.suggestions };
}
return msg;
}),
);
return;
}
if (loading) return;
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
return;
}
setLoading(true);
let sources: Document[] | undefined = undefined;
let recievedMessage = '';
let added = false;
let messageChatHistory = chatHistory;
if (options?.rewriteIndex !== undefined) {
const rewriteIndex = options.rewriteIndex;
setMessages((prev) => {
return [...prev.slice(0, messages.length > 2 ? rewriteIndex - 1 : 0)];
});
messageChatHistory = chatHistory.slice(
0,
messages.length > 2 ? rewriteIndex - 1 : 0,
);
setChatHistory(messageChatHistory);
setScrollTrigger((prev) => prev + 1);
}
const messageId =
options?.messageId ?? crypto.randomBytes(7).toString('hex');
setMessages((prevMessages) => [
...prevMessages,
{
content: message,
messageId: messageId,
chatId: chatId!,
role: 'user',
createdAt: new Date(),
},
]);
const messageHandler = async (data: any) => {
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
return;
}
if (data.type === 'sources') {
sources = data.data;
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: '',
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
sources: sources,
searchQuery: data.searchQuery,
searchUrl: data.searchUrl,
createdAt: new Date(),
},
]);
added = true;
setScrollTrigger((prev) => prev + 1);
}
}
if (data.type === 'message') {
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: data.data,
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
sources: sources,
createdAt: new Date(),
modelStats: {
modelName: data.modelName,
},
},
]);
added = true;
}
setMessages((prev) =>
prev.map((message) => {
if (message.messageId === data.messageId) {
return { ...message, content: message.content + data.data };
}
return message;
}),
);
recievedMessage += data.data;
setScrollTrigger((prev) => prev + 1);
}
if (data.type === 'messageEnd') {
setChatHistory((prevHistory) => [
...prevHistory,
['human', message],
['assistant', recievedMessage],
]);
// Always update the message, adding modelStats if available
setMessages((prev) =>
prev.map((message) => {
if (message.messageId === data.messageId) {
return {
...message,
// Include model stats if available, otherwise null
modelStats: data.modelStats || null,
// Make sure the searchQuery is preserved (if available in the message data)
searchQuery: message.searchQuery || data.searchQuery,
searchUrl: message.searchUrl || data.searchUrl,
};
}
return message;
}),
);
setLoading(false);
setScrollTrigger((prev) => prev + 1);
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoSuggestions = localStorage.getItem('autoSuggestions');
if (
lastMsg.role === 'assistant' &&
lastMsg.sources &&
lastMsg.sources.length > 0 &&
!lastMsg.suggestions &&
autoSuggestions !== 'false' // Default to true if not set
) {
const suggestions = await getSuggestions(messagesRef.current);
setMessages((prev) =>
prev.map((msg) => {
if (msg.messageId === lastMsg.messageId) {
return { ...msg, suggestions: suggestions };
}
return msg;
}),
);
}
}
};
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
// Get the latest model selection from localStorage
const currentChatModelProvider = localStorage.getItem('chatModelProvider');
const currentChatModel = localStorage.getItem('chatModel');
// Use the most current model selection from localStorage, falling back to the state if not available
const modelProvider =
currentChatModelProvider || chatModelProvider.provider;
const modelName = currentChatModel || chatModelProvider.name;
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: messageChatHistory,
chatModel: {
name: modelName,
provider: modelProvider,
...(chatModelProvider.provider === 'ollama' && {
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
systemInstructions: localStorage.getItem('systemInstructions'),
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
};
const rewrite = (messageId: string) => {
const messageIndex = messages.findIndex(
(msg) => msg.messageId === messageId,
);
if (messageIndex == -1) return;
sendMessage(messages[messageIndex - 1].content, {
messageId: messageId,
rewriteIndex: messageIndex,
});
};
useEffect(() => {
if (isReady && initialMessage && isConfigReady) {
sendMessage(initialMessage);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isConfigReady, isReady, initialMessage]);
if (hasError) {
return (
<div className="relative">
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
<Link href="/settings">
<Settings className="cursor-pointer lg:hidden" />
</Link>
</div>
<div className="flex flex-col items-center justify-center min-h-screen">
<p className="dark:text-white/70 text-black/70 text-sm">
Failed to connect to the server. Please try again later.
</p>
</div>
</div>
);
}
return isReady ? (
notFound ? (
<NextError statusCode={404} />
) : (
<div>
{messages.length > 0 ? (
<>
<Navbar chatId={chatId!} messages={messages} />
<Chat
loading={loading}
messages={messages}
sendMessage={sendMessage}
scrollTrigger={scrollTrigger}
rewrite={rewrite}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
focusMode={focusMode}
setFocusMode={setFocusMode}
/>
</>
) : (
<EmptyChat
sendMessage={sendMessage}
focusMode={focusMode}
setFocusMode={setFocusMode}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
)}
</div>
)
) : (
<div className="flex flex-row items-center justify-center min-h-screen">
<svg
aria-hidden="true"
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
viewBox="0 0 100 101"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
fill="currentColor"
/>
<path
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
fill="currentFill"
/>
</svg>
</div>
);
};
export default ChatWindow;

View File

@@ -1,82 +0,0 @@
'use client';
import React, { useState, useEffect, useRef } from 'react';
import { Info } from 'lucide-react';
import { ModelStats } from '../ChatWindow';
import { cn } from '@/lib/utils';
interface ModelInfoButtonProps {
modelStats: ModelStats | null;
}
const ModelInfoButton: React.FC<ModelInfoButtonProps> = ({ modelStats }) => {
const [showPopover, setShowPopover] = useState(false);
const popoverRef = useRef<HTMLDivElement>(null);
const buttonRef = useRef<HTMLButtonElement>(null);
// Always render, using "Unknown" as fallback if model info isn't available
const modelName = modelStats?.modelName || 'Unknown';
useEffect(() => {
const handleClickOutside = (event: MouseEvent) => {
if (
popoverRef.current &&
!popoverRef.current.contains(event.target as Node) &&
buttonRef.current &&
!buttonRef.current.contains(event.target as Node)
) {
setShowPopover(false);
}
};
document.addEventListener('mousedown', handleClickOutside);
return () => {
document.removeEventListener('mousedown', handleClickOutside);
};
}, []);
return (
<div className="relative">
<button
ref={buttonRef}
className="p-1 ml-1 text-black/70 dark:text-white/70 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
onClick={() => setShowPopover(!showPopover)}
aria-label="Show model information"
>
<Info size={18} />
</button>
{showPopover && (
<div
ref={popoverRef}
className="absolute z-10 left-6 top-0 w-64 rounded-md shadow-lg bg-white dark:bg-dark-secondary border border-light-200 dark:border-dark-200"
>
<div className="py-2 px-3">
<h4 className="text-sm font-medium mb-2 text-black dark:text-white">
Model Information
</h4>
<div className="space-y-1 text-xs">
<div className="flex justify-between">
<span className="text-black/70 dark:text-white/70">Model:</span>
<span className="text-black dark:text-white font-medium">
{modelName}
</span>
</div>
{modelStats?.responseTime && (
<div className="flex justify-between">
<span className="text-black/70 dark:text-white/70">
Response time:
</span>
<span className="text-black dark:text-white font-medium">
{(modelStats.responseTime / 1000).toFixed(2)}s
</span>
</div>
)}
</div>
</div>
</div>
)}
</div>
);
};
export default ModelInfoButton;

View File

@@ -1,136 +0,0 @@
import { cn } from '@/lib/utils';
import { CheckCheck, CopyIcon } from 'lucide-react';
import { useState } from 'react';
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter';
import { oneDark } from 'react-syntax-highlighter/dist/cjs/styles/prism';
import { Message } from './ChatWindow';
import MessageTabs from './MessageTabs';
import ThinkBox from './ThinkBox';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
return <ThinkBox content={children as string} />;
};
const CodeBlock = ({
className,
children,
}: {
className?: string;
children: React.ReactNode;
}) => {
// Extract language from className (format could be "language-javascript" or "lang-javascript")
let language = '';
if (className) {
if (className.startsWith('language-')) {
language = className.replace('language-', '');
} else if (className.startsWith('lang-')) {
language = className.replace('lang-', '');
}
}
const content = children as string;
const [isCopied, setIsCopied] = useState(false);
const handleCopyCode = () => {
navigator.clipboard.writeText(content);
setIsCopied(true);
setTimeout(() => setIsCopied(false), 2000);
};
console.log('Code block language:', language, 'Class name:', className); // For debugging
return (
<div className="rounded-md overflow-hidden my-4 relative group border border-dark-secondary">
<div className="flex justify-between items-center px-4 py-2 bg-dark-200 border-b border-dark-secondary text-xs text-white/70 font-mono">
<span>{language}</span>
<button
onClick={handleCopyCode}
className="p-1 rounded-md hover:bg-dark-secondary transition duration-200"
aria-label="Copy code to clipboard"
>
{isCopied ? (
<CheckCheck size={14} className="text-green-500" />
) : (
<CopyIcon size={14} className="text-white/70" />
)}
</button>
</div>
<SyntaxHighlighter
language={language || 'text'}
style={oneDark}
customStyle={{
margin: 0,
padding: '1rem',
borderRadius: 0,
backgroundColor: '#1c1c1c',
}}
wrapLines={true}
wrapLongLines={true}
showLineNumbers={language !== '' && content.split('\n').length > 1}
useInlineStyles={true}
PreTag="div"
>
{content}
</SyntaxHighlighter>
</div>
);
};
const MessageBox = ({
message,
messageIndex,
history,
loading,
isLast,
rewrite,
sendMessage,
}: {
message: Message;
messageIndex: number;
history: Message[];
loading: boolean;
isLast: boolean;
rewrite: (messageId: string) => void;
sendMessage: (
message: string,
options?: {
messageId?: string;
rewriteIndex?: number;
suggestions?: string[];
},
) => void;
}) => {
return (
<div>
{message.role === 'user' && (
<div
className={cn(
'w-full',
messageIndex === 0 ? 'pt-16' : 'pt-8',
'break-words',
)}
>
<h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12">
{message.content}
</h2>
</div>
)}
{message.role === 'assistant' && (
<MessageTabs
query={history[messageIndex - 1].content}
chatHistory={history.slice(0, messageIndex - 1)}
messageId={message.messageId}
message={message}
isLast={isLast}
loading={loading}
rewrite={rewrite}
sendMessage={sendMessage}
/>
)}
</div>
);
};
export default MessageBox;

View File

@@ -1,305 +0,0 @@
import { useEffect, useState } from 'react';
import { Cpu, ChevronDown, ChevronRight } from 'lucide-react';
import { cn } from '@/lib/utils';
import {
Popover,
PopoverButton,
PopoverPanel,
Transition,
} from '@headlessui/react';
import { Fragment } from 'react';
interface ModelOption {
provider: string;
model: string;
displayName: string;
}
interface ProviderModelMap {
[provider: string]: {
displayName: string;
models: ModelOption[];
};
}
const ModelSelector = ({
selectedModel,
setSelectedModel,
}: {
selectedModel: { provider: string; model: string } | null;
setSelectedModel: (model: { provider: string; model: string }) => void;
}) => {
const [providerModels, setProviderModels] = useState<ProviderModelMap>({});
const [providersList, setProvidersList] = useState<string[]>([]);
const [loading, setLoading] = useState(true);
const [selectedModelDisplay, setSelectedModelDisplay] = useState<string>('');
const [selectedProviderDisplay, setSelectedProviderDisplay] =
useState<string>('');
const [expandedProviders, setExpandedProviders] = useState<
Record<string, boolean>
>({});
useEffect(() => {
const fetchModels = async () => {
try {
const response = await fetch('/api/models', {
headers: {
'Content-Type': 'application/json',
},
});
if (!response.ok) {
throw new Error(`Failed to fetch models: ${response.status}`);
}
const data = await response.json();
const providersData: ProviderModelMap = {};
// Organize models by provider
Object.entries(data.chatModelProviders).forEach(
([provider, models]: [string, any]) => {
const providerDisplayName =
provider.charAt(0).toUpperCase() + provider.slice(1);
providersData[provider] = {
displayName: providerDisplayName,
models: [],
};
Object.entries(models).forEach(
([modelKey, modelData]: [string, any]) => {
providersData[provider].models.push({
provider,
model: modelKey,
displayName: modelData.displayName || modelKey,
});
},
);
},
);
// Filter out providers with no models
Object.keys(providersData).forEach((provider) => {
if (providersData[provider].models.length === 0) {
delete providersData[provider];
}
});
// Sort providers by name (only those that have models)
const sortedProviders = Object.keys(providersData).sort();
setProvidersList(sortedProviders);
// Initialize expanded state for all providers
const initialExpandedState: Record<string, boolean> = {};
sortedProviders.forEach((provider) => {
initialExpandedState[provider] = selectedModel?.provider === provider;
});
// Expand the first provider if none is selected
if (sortedProviders.length > 0 && !selectedModel) {
initialExpandedState[sortedProviders[0]] = true;
}
setExpandedProviders(initialExpandedState);
setProviderModels(providersData);
// Find the current model in our options to display its name
if (selectedModel) {
const provider = providersData[selectedModel.provider];
if (provider) {
const currentModel = provider.models.find(
(option) => option.model === selectedModel.model,
);
if (currentModel) {
setSelectedModelDisplay(currentModel.displayName);
setSelectedProviderDisplay(provider.displayName);
}
}
}
setLoading(false);
} catch (error) {
console.error('Error fetching models:', error);
setLoading(false);
}
};
fetchModels();
}, [selectedModel, setSelectedModel]);
const toggleProviderExpanded = (provider: string) => {
setExpandedProviders((prev) => ({
...prev,
[provider]: !prev[provider],
}));
};
const handleSelectModel = (option: ModelOption) => {
setSelectedModel({
provider: option.provider,
model: option.model,
});
setSelectedModelDisplay(option.displayName);
setSelectedProviderDisplay(
providerModels[option.provider]?.displayName || option.provider,
);
// Save to localStorage for persistence
localStorage.setItem('chatModelProvider', option.provider);
localStorage.setItem('chatModel', option.model);
};
const getDisplayText = () => {
if (loading) return 'Loading...';
if (!selectedModelDisplay) return 'Select model';
return `${selectedModelDisplay} (${selectedProviderDisplay})`;
};
return (
<Popover className="relative">
{({ open }) => (
<>
<div className="relative">
<PopoverButton className="group flex items-center justify-center text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary active:scale-95 transition duration-200 hover:text-black dark:hover:text-white">
<Cpu size={18} />
<span className="mx-2 text-xs font-medium overflow-hidden text-ellipsis whitespace-nowrap max-w-44 hidden lg:block">
{getDisplayText()}
</span>
<ChevronDown
size={16}
className={cn(
'transition-transform',
open ? 'rotate-180' : 'rotate-0',
)}
/>
</PopoverButton>
</div>
<Transition
as={Fragment}
enter="transition ease-out duration-200"
enterFrom="opacity-0 translate-y-1"
enterTo="opacity-100 translate-y-0"
leave="transition ease-in duration-150"
leaveFrom="opacity-100 translate-y-0"
leaveTo="opacity-0 translate-y-1"
>
<PopoverPanel className="absolute z-10 w-72 transform bottom-full mb-2">
<div className="overflow-hidden rounded-lg shadow-lg ring-1 ring-black/5 dark:ring-white/5 bg-white dark:bg-dark-secondary divide-y divide-light-200 dark:divide-dark-200">
<div className="px-4 py-3">
<h3 className="text-sm font-medium text-black/90 dark:text-white/90">
Select Model
</h3>
<p className="text-xs text-black/60 dark:text-white/60 mt-1">
Choose a provider and model for your conversation
</p>
</div>
<div className="max-h-72 overflow-y-auto">
{loading ? (
<div className="px-4 py-3 text-sm text-black/70 dark:text-white/70">
Loading available models...
</div>
) : providersList.length === 0 ? (
<div className="px-4 py-3 text-sm text-black/70 dark:text-white/70">
No models available
</div>
) : (
<div className="py-1">
{providersList.map((providerKey) => {
const provider = providerModels[providerKey];
const isExpanded = expandedProviders[providerKey];
return (
<div
key={providerKey}
className="border-t border-light-200 dark:border-dark-200 first:border-t-0"
>
{/* Provider header */}
<button
className={cn(
'w-full flex items-center justify-between px-4 py-2 text-sm text-left',
'hover:bg-light-100 dark:hover:bg-dark-100',
selectedModel?.provider === providerKey
? 'bg-light-50 dark:bg-dark-50'
: '',
)}
onClick={() =>
toggleProviderExpanded(providerKey)
}
>
<div className="font-medium flex items-center">
<Cpu
size={14}
className="mr-2 text-black/70 dark:text-white/70"
/>
{provider.displayName}
{selectedModel?.provider === providerKey && (
<span className="ml-2 text-xs text-[#24A0ED]">
(active)
</span>
)}
</div>
<ChevronRight
size={14}
className={cn(
'transition-transform',
isExpanded ? 'rotate-90' : '',
)}
/>
</button>
{/* Models list */}
{isExpanded && (
<div className="pl-6">
{provider.models.map((modelOption) => (
<PopoverButton
key={`${modelOption.provider}-${modelOption.model}`}
className={cn(
'w-full text-left px-4 py-2 text-sm flex items-center',
selectedModel?.provider ===
modelOption.provider &&
selectedModel?.model ===
modelOption.model
? 'bg-light-100 dark:bg-dark-100 text-black dark:text-white'
: 'text-black/70 dark:text-white/70 hover:bg-light-100 dark:hover:bg-dark-100',
)}
onClick={() =>
handleSelectModel(modelOption)
}
>
<div className="flex flex-col flex-1">
<span className="font-medium">
{modelOption.displayName}
</span>
</div>
{/* Active indicator */}
{selectedModel?.provider ===
modelOption.provider &&
selectedModel?.model ===
modelOption.model && (
<div className="ml-auto bg-[#24A0ED] text-white text-xs px-1.5 py-0.5 rounded">
Active
</div>
)}
</PopoverButton>
))}
</div>
)}
</div>
);
})}
</div>
)}
</div>
</div>
</PopoverPanel>
</Transition>
</>
)}
</Popover>
);
};
export default ModelSelector;

View File

@@ -1,48 +0,0 @@
/* eslint-disable @next/next/no-img-element */
import { Document } from '@langchain/core/documents';
import { File } from 'lucide-react';
const MessageSources = ({ sources }: { sources: Document[] }) => {
return (
<div className="grid grid-cols-2 lg:grid-cols-4 gap-2">
{sources.map((source, i) => (
<a
className="bg-light-100 hover:bg-light-200 dark:bg-dark-100 dark:hover:bg-dark-200 transition duration-200 rounded-lg p-3 flex flex-col space-y-2 font-medium"
key={i}
href={source.metadata.url}
target="_blank"
>
<p className="dark:text-white text-xs overflow-hidden whitespace-nowrap text-ellipsis">
{source.metadata.title}
</p>
<div className="flex flex-row items-center justify-between">
<div className="flex flex-row items-center space-x-1">
{source.metadata.url === 'File' ? (
<div className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full">
<File size={12} className="text-white/70" />
</div>
) : (
<img
src={`https://s2.googleusercontent.com/s2/favicons?domain_url=${source.metadata.url}`}
width={16}
height={16}
alt="favicon"
className="rounded-lg h-4 w-4"
/>
)}
<p className="text-xs text-black/50 dark:text-white/50 overflow-hidden whitespace-nowrap text-ellipsis">
{source.metadata.url.replace(/.+\/\/|www.|\..+/g, '')}
</p>
</div>
<div className="flex flex-row items-center space-x-1 text-black/50 dark:text-white/50 text-xs">
<div className="bg-black/50 dark:bg-white/50 h-[4px] w-[4px] rounded-full" />
<span>{i + 1}</span>
</div>
</div>
</a>
))}
</div>
);
};
export default MessageSources;

View File

@@ -1,535 +0,0 @@
/* eslint-disable @next/next/no-img-element */
'use client';
import { getSuggestions } from '@/lib/actions';
import { cn } from '@/lib/utils';
import {
BookCopy,
CheckCheck,
Copy as CopyIcon,
Disc3,
ImagesIcon,
Layers3,
Plus,
Sparkles,
StopCircle,
VideoIcon,
Volume2,
} from 'lucide-react';
import Markdown, { MarkdownToJSX } from 'markdown-to-jsx';
import { useEffect, useState } from 'react';
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter';
import { oneDark } from 'react-syntax-highlighter/dist/cjs/styles/prism';
import { useSpeech } from 'react-text-to-speech';
import { Message } from './ChatWindow';
import Copy from './MessageActions/Copy';
import ModelInfoButton from './MessageActions/ModelInfo';
import Rewrite from './MessageActions/Rewrite';
import MessageSources from './MessageSources';
import SearchImages from './SearchImages';
import SearchVideos from './SearchVideos';
import ThinkBox from './ThinkBox';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
return <ThinkBox content={children as string} />;
};
const CodeBlock = ({
className,
children,
}: {
className?: string;
children: React.ReactNode;
}) => {
// Extract language from className (format could be "language-javascript" or "lang-javascript")
let language = '';
if (className) {
if (className.startsWith('language-')) {
language = className.replace('language-', '');
} else if (className.startsWith('lang-')) {
language = className.replace('lang-', '');
}
}
const content = children as string;
const [isCopied, setIsCopied] = useState(false);
const handleCopyCode = () => {
navigator.clipboard.writeText(content);
setIsCopied(true);
setTimeout(() => setIsCopied(false), 2000);
};
return (
<div className="rounded-md overflow-hidden my-4 relative group border border-dark-secondary">
<div className="flex justify-between items-center px-4 py-2 bg-dark-200 border-b border-dark-secondary text-xs text-white/70 font-mono">
<span>{language}</span>
<button
onClick={handleCopyCode}
className="p-1 rounded-md hover:bg-dark-secondary transition duration-200"
aria-label="Copy code to clipboard"
>
{isCopied ? (
<CheckCheck size={14} className="text-green-500" />
) : (
<CopyIcon size={14} className="text-white/70" />
)}
</button>
</div>
<SyntaxHighlighter
language={language || 'text'}
style={oneDark}
customStyle={{
margin: 0,
padding: '1rem',
borderRadius: 0,
backgroundColor: '#1c1c1c',
}}
wrapLines={true}
wrapLongLines={true}
showLineNumbers={language !== '' && content.split('\n').length > 1}
useInlineStyles={true}
PreTag="div"
>
{content}
</SyntaxHighlighter>
</div>
);
};
type TabType = 'text' | 'sources' | 'images' | 'videos';
interface SearchTabsProps {
chatHistory: Message[];
query: string;
messageId: string;
message: Message;
isLast: boolean;
loading: boolean;
rewrite: (messageId: string) => void;
sendMessage: (
message: string,
options?: {
messageId?: string;
rewriteIndex?: number;
suggestions?: string[];
},
) => void;
}
const MessageTabs = ({
chatHistory,
query,
messageId,
message,
isLast,
loading,
rewrite,
sendMessage,
}: SearchTabsProps) => {
const [activeTab, setActiveTab] = useState<TabType>('text');
const [imageCount, setImageCount] = useState(0);
const [videoCount, setVideoCount] = useState(0);
const [parsedMessage, setParsedMessage] = useState(message.content);
const [speechMessage, setSpeechMessage] = useState(message.content);
const [loadingSuggestions, setLoadingSuggestions] = useState(false);
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
// Callback functions to update counts
const updateImageCount = (count: number) => {
setImageCount(count);
};
const updateVideoCount = (count: number) => {
setVideoCount(count);
};
// Load suggestions handling
const handleLoadSuggestions = async () => {
if (
loadingSuggestions ||
(message?.suggestions && message.suggestions.length > 0)
)
return;
setLoadingSuggestions(true);
try {
const suggestions = await getSuggestions([...chatHistory, message]);
// Update the message.suggestions property through parent component
sendMessage('', { messageId: message.messageId, suggestions });
} catch (error) {
console.error('Error loading suggestions:', error);
} finally {
setLoadingSuggestions(false);
}
};
// Process message content
useEffect(() => {
const citationRegex = /\[([^\]]+)\]/g;
const regex = /\[(\d+)\]/g;
let processedMessage = message.content;
if (message.role === 'assistant' && message.content.includes('<think>')) {
const openThinkTag = processedMessage.match(/<think>/g)?.length || 0;
const closeThinkTag = processedMessage.match(/<\/think>/g)?.length || 0;
if (openThinkTag > closeThinkTag) {
processedMessage += '</think> <a> </a>'; // The extra <a> </a> is to prevent the think component from looking bad
}
}
if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length > 0
) {
setParsedMessage(
processedMessage.replace(
citationRegex,
(_, capturedContent: string) => {
const numbers = capturedContent
.split(',')
.map((numStr) => numStr.trim());
const linksHtml = numbers
.map((numStr) => {
const number = parseInt(numStr);
if (isNaN(number) || number <= 0) {
return `[${numStr}]`;
}
const source = message.sources?.[number - 1];
const url = source?.metadata?.url;
if (url) {
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
} else {
return `[${numStr}]`;
}
})
.join('');
return linksHtml;
},
),
);
setSpeechMessage(message.content.replace(regex, ''));
return;
}
setSpeechMessage(message.content.replace(regex, ''));
setParsedMessage(processedMessage);
}, [message.content, message.sources, message.role]);
// Auto-suggest effect (similar to MessageBox)
useEffect(() => {
const autoSuggestions = localStorage.getItem('autoSuggestions');
if (
isLast &&
message.role === 'assistant' &&
!loading &&
autoSuggestions === 'true'
) {
handleLoadSuggestions();
}
}, [isLast, loading, message.role]);
// Markdown formatting options
const markdownOverrides: MarkdownToJSX.Options = {
overrides: {
think: {
component: ThinkTagProcessor,
},
code: {
component: ({ className, children }) => {
// Check if it's an inline code block or a fenced code block
if (className) {
// This is a fenced code block (```code```)
return <CodeBlock className={className}>{children}</CodeBlock>;
}
// This is an inline code block (`code`)
return (
<code className="px-1.5 py-0.5 rounded bg-dark-secondary text-white font-mono text-sm">
{children}
</code>
);
},
},
pre: {
component: ({ children }) => children,
},
},
};
return (
<div className="flex flex-col w-full">
{/* Tabs */}
<div className="flex border-b border-light-200 dark:border-dark-200 overflow-x-auto no-scrollbar sticky top-0 bg-light-primary dark:bg-dark-primary z-10 -mx-4 px-4 mb-2 shadow-sm">
<button
onClick={() => setActiveTab('text')}
className={cn(
'flex items-center px-4 py-3 text-sm font-medium transition-all duration-200 relative',
activeTab === 'text'
? 'border-b-2 border-[#24A0ED] text-[#24A0ED] bg-light-100 dark:bg-dark-100'
: 'text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white hover:bg-light-100 dark:hover:bg-dark-100',
)}
aria-selected={activeTab === 'text'}
role="tab"
>
<Disc3 size={16} className="mr-2" />
<span className="whitespace-nowrap">Answer</span>
</button>
{message.sources && message.sources.length > 0 && (
<button
onClick={() => setActiveTab('sources')}
className={cn(
'flex items-center space-x-2 px-4 py-3 text-sm font-medium transition-all duration-200 relative',
activeTab === 'sources'
? 'border-b-2 border-[#24A0ED] text-[#24A0ED] bg-light-100 dark:bg-dark-100'
: 'text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white hover:bg-light-100 dark:hover:bg-dark-100',
)}
aria-selected={activeTab === 'sources'}
role="tab"
>
<BookCopy size={16} />
<span className="whitespace-nowrap">Sources</span>
<span
className={cn(
'ml-1.5 px-1.5 py-0.5 text-xs rounded-full',
activeTab === 'sources'
? 'bg-[#24A0ED]/20 text-[#24A0ED]'
: 'bg-light-200 dark:bg-dark-200 text-black/70 dark:text-white/70',
)}
>
{message.sources.length}
</span>
</button>
)}
<button
onClick={() => setActiveTab('images')}
className={cn(
'flex items-center space-x-2 px-4 py-3 text-sm font-medium transition-all duration-200 relative',
activeTab === 'images'
? 'border-b-2 border-[#24A0ED] text-[#24A0ED] bg-light-100 dark:bg-dark-100'
: 'text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white hover:bg-light-100 dark:hover:bg-dark-100',
)}
aria-selected={activeTab === 'images'}
role="tab"
>
<ImagesIcon size={16} />
<span className="whitespace-nowrap">Images</span>
{imageCount > 0 && (
<span
className={cn(
'ml-1.5 px-1.5 py-0.5 text-xs rounded-full',
activeTab === 'images'
? 'bg-[#24A0ED]/20 text-[#24A0ED]'
: 'bg-light-200 dark:bg-dark-200 text-black/70 dark:text-white/70',
)}
>
{imageCount}
</span>
)}
</button>
<button
onClick={() => setActiveTab('videos')}
className={cn(
'flex items-center space-x-2 px-4 py-3 text-sm font-medium transition-all duration-200 relative',
activeTab === 'videos'
? 'border-b-2 border-[#24A0ED] text-[#24A0ED] bg-light-100 dark:bg-dark-100'
: 'text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white hover:bg-light-100 dark:hover:bg-dark-100',
)}
aria-selected={activeTab === 'videos'}
role="tab"
>
<VideoIcon size={16} />
<span className="whitespace-nowrap">Videos</span>
{videoCount > 0 && (
<span
className={cn(
'ml-1.5 px-1.5 py-0.5 text-xs rounded-full',
activeTab === 'videos'
? 'bg-[#24A0ED]/20 text-[#24A0ED]'
: 'bg-light-200 dark:bg-dark-200 text-black/70 dark:text-white/70',
)}
>
{videoCount}
</span>
)}
</button>
</div>
{/* Tab content */}
<div
className="min-h-[150px] transition-all duration-200 ease-in-out"
role="tabpanel"
>
{/* Answer Tab */}
{activeTab === 'text' && (
<div className="flex flex-col space-y-4 animate-fadeIn">
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'prose-code:bg-transparent prose-code:p-0 prose-code:text-inherit prose-code:font-normal prose-code:before:content-none prose-code:after:content-none',
'prose-pre:bg-transparent prose-pre:border-0 prose-pre:m-0 prose-pre:p-0',
'max-w-none break-words px-4 text-white',
)}
options={markdownOverrides}
>
{parsedMessage}
</Markdown>
{loading && isLast ? null : (
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white px-4 py-4">
<div className="flex flex-row items-center space-x-1">
<Rewrite rewrite={rewrite} messageId={message.messageId} />
{message.modelStats && (
<ModelInfoButton modelStats={message.modelStats} />
)}
</div>
<div className="flex flex-row items-center space-x-1">
<Copy initialMessage={message.content} message={message} />
<button
onClick={() => {
if (speechStatus === 'started') {
stop();
} else {
start();
}
}}
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
>
{speechStatus === 'started' ? (
<StopCircle size={18} />
) : (
<Volume2 size={18} />
)}
</button>
</div>
</div>
)}
{isLast && message.role === 'assistant' && !loading && (
<>
<div className="border-t border-light-secondary dark:border-dark-secondary px-4 pt-4 mt-4">
<div className="flex flex-row items-center space-x-2 mb-3">
<Layers3 size={20} />
<h3 className="text-xl font-medium">Related</h3>
{(!message.suggestions ||
message.suggestions.length === 0) && (
<button
onClick={handleLoadSuggestions}
disabled={loadingSuggestions}
className="px-4 py-2 flex flex-row items-center justify-center space-x-2 rounded-lg bg-light-secondary dark:bg-dark-secondary hover:bg-light-200 dark:hover:bg-dark-200 transition duration-200 text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white"
>
{loadingSuggestions ? (
<div className="w-4 h-4 border-2 border-t-transparent border-gray-400 dark:border-gray-500 rounded-full animate-spin" />
) : (
<Sparkles size={16} />
)}
<span>
{loadingSuggestions
? 'Loading suggestions...'
: 'Load suggestions'}
</span>
</button>
)}
</div>
{message.suggestions && message.suggestions.length > 0 && (
<div className="flex flex-col space-y-3 mt-2">
{message.suggestions.map((suggestion, i) => (
<div
className="flex flex-col space-y-3 text-sm"
key={i}
>
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
<div
onClick={() => {
sendMessage(suggestion);
}}
className="cursor-pointer flex flex-row justify-between font-medium space-x-2 items-center"
>
<p className="transition duration-200 hover:text-[#24A0ED]">
{suggestion}
</p>
<Plus
size={20}
className="text-[#24A0ED] flex-shrink-0"
/>
</div>
</div>
))}
</div>
)}
</div>
</>
)}
</div>
)}
{/* Sources Tab */}
{activeTab === 'sources' &&
message.sources &&
message.sources.length > 0 && (
<div className="p-4 animate-fadeIn">
{message.searchQuery && (
<div className="mb-4 text-sm bg-light-secondary dark:bg-dark-secondary rounded-lg p-3">
<span className="font-medium text-black/70 dark:text-white/70">
Search query:
</span>{' '}
{message.searchUrl ? (
<a
href={message.searchUrl}
target="_blank"
rel="noopener noreferrer"
className="dark:text-white text-black hover:underline"
>
{message.searchQuery}
</a>
) : (
<span className="text-black dark:text-white">
{message.searchQuery}
</span>
)}
</div>
)}
<MessageSources sources={message.sources} />
</div>
)}
{/* Images Tab */}
{activeTab === 'images' && (
<div className="p-3 animate-fadeIn">
<SearchImages
query={query}
chatHistory={chatHistory}
messageId={messageId}
onImagesLoaded={updateImageCount}
/>
</div>
)}
{/* Videos Tab */}
{activeTab === 'videos' && (
<div className="p-3 animate-fadeIn">
<SearchVideos
query={query}
chatHistory={chatHistory}
messageId={messageId}
onVideosLoaded={updateVideoCount}
/>
</div>
)}
</div>
</div>
);
};
export default MessageTabs;

View File

@@ -1,139 +0,0 @@
/* eslint-disable @next/next/no-img-element */
import { useEffect, useRef, useState } from 'react';
import Lightbox from 'yet-another-react-lightbox';
import 'yet-another-react-lightbox/styles.css';
import { Message } from './ChatWindow';
type Image = {
url: string;
img_src: string;
title: string;
};
const SearchImages = ({
query,
chatHistory,
messageId,
onImagesLoaded,
}: {
query: string;
chatHistory: Message[];
messageId: string;
onImagesLoaded?: (count: number) => void;
}) => {
const [images, setImages] = useState<Image[] | null>(null);
const [loading, setLoading] = useState(true);
const [open, setOpen] = useState(false);
const [slides, setSlides] = useState<any[]>([]);
const hasLoadedRef = useRef(false);
useEffect(() => {
// Skip fetching if images are already loaded for this message
if (hasLoadedRef.current) {
return;
}
const fetchImages = async () => {
setLoading(true);
const chatModelProvider = localStorage.getItem('chatModelProvider');
const chatModel = localStorage.getItem('chatModel');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
try {
const res = await fetch(`/api/images`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
...(chatModelProvider === 'ollama' && {
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
}),
});
const data = await res.json();
const images = data.images ?? [];
setImages(images);
setSlides(
images.map((image: Image) => {
return {
src: image.img_src,
};
}),
);
if (onImagesLoaded && images.length > 0) {
onImagesLoaded(images.length);
}
// Mark as loaded to prevent refetching
hasLoadedRef.current = true;
} catch (error) {
console.error('Error fetching images:', error);
} finally {
setLoading(false);
}
};
fetchImages();
// Reset the loading state when component unmounts
return () => {
hasLoadedRef.current = false;
};
}, [query, messageId]);
return (
<>
{loading && (
<div className="grid grid-cols-2 gap-2">
{[...Array(4)].map((_, i) => (
<div
key={i}
className="bg-light-secondary dark:bg-dark-secondary h-32 w-full rounded-lg animate-pulse aspect-video object-cover"
/>
))}
</div>
)}
{images !== null && images.length > 0 && (
<>
<div className="grid grid-cols-2 gap-2">
{images.map((image, i) => (
<img
onClick={() => {
setOpen(true);
setSlides([
slides[i],
...slides.slice(0, i),
...slides.slice(i + 1),
]);
}}
key={i}
src={image.img_src}
alt={image.title}
className="h-full w-full aspect-video object-cover rounded-lg transition duration-200 active:scale-95 hover:scale-[1.02] cursor-zoom-in"
/>
))}
</div>
<Lightbox open={open} close={() => setOpen(false)} slides={slides} />
</>
)}
</>
);
};
export default SearchImages;

View File

@@ -1,203 +0,0 @@
/* eslint-disable @next/next/no-img-element */
import { PlayCircle } from 'lucide-react';
import { useEffect, useRef, useState } from 'react';
import Lightbox, { GenericSlide, VideoSlide } from 'yet-another-react-lightbox';
import 'yet-another-react-lightbox/styles.css';
import { Message } from './ChatWindow';
type Video = {
url: string;
img_src: string;
title: string;
iframe_src: string;
};
declare module 'yet-another-react-lightbox' {
export interface VideoSlide extends GenericSlide {
type: 'video-slide';
src: string;
iframe_src: string;
}
interface SlideTypes {
'video-slide': VideoSlide;
}
}
const Searchvideos = ({
query,
chatHistory,
messageId,
onVideosLoaded,
}: {
query: string;
chatHistory: Message[];
messageId: string;
onVideosLoaded?: (count: number) => void;
}) => {
const [videos, setVideos] = useState<Video[] | null>(null);
const [loading, setLoading] = useState(true);
const [open, setOpen] = useState(false);
const [slides, setSlides] = useState<VideoSlide[]>([]);
const [currentIndex, setCurrentIndex] = useState(0);
const videoRefs = useRef<(HTMLIFrameElement | null)[]>([]);
const hasLoadedRef = useRef(false);
useEffect(() => {
// Skip fetching if videos are already loaded for this message
if (hasLoadedRef.current) {
return;
}
const fetchVideos = async () => {
setLoading(true);
const chatModelProvider = localStorage.getItem('chatModelProvider');
const chatModel = localStorage.getItem('chatModel');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
try {
const res = await fetch(`/api/videos`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
...(chatModelProvider === 'ollama' && {
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
}),
});
const data = await res.json();
const videos = data.videos ?? [];
setVideos(videos);
setSlides(
videos.map((video: Video) => {
return {
type: 'video-slide',
iframe_src: video.iframe_src,
src: video.img_src,
};
}),
);
if (onVideosLoaded && videos.length > 0) {
onVideosLoaded(videos.length);
}
// Mark as loaded to prevent refetching
hasLoadedRef.current = true;
} catch (error) {
console.error('Error fetching videos:', error);
} finally {
setLoading(false);
}
};
fetchVideos();
// Reset the loading state when component unmounts
return () => {
hasLoadedRef.current = false;
};
}, [query, messageId]);
return (
<>
{loading && (
<div className="grid grid-cols-2 gap-2">
{[...Array(4)].map((_, i) => (
<div
key={i}
className="bg-light-secondary dark:bg-dark-secondary h-32 w-full rounded-lg animate-pulse aspect-video object-cover"
/>
))}
</div>
)}
{videos !== null && videos.length > 0 && (
<>
<div className="grid grid-cols-2 gap-2">
{videos.map((video, i) => (
<div
onClick={() => {
setOpen(true);
setSlides([
slides[i],
...slides.slice(0, i),
...slides.slice(i + 1),
]);
}}
className="relative transition duration-200 active:scale-95 hover:scale-[1.02] cursor-pointer"
key={i}
>
<img
src={video.img_src}
alt={video.title}
className="relative h-full w-full aspect-video object-cover rounded-lg"
/>
<div className="absolute bg-white/70 dark:bg-black/70 text-black/70 dark:text-white/70 px-2 py-1 flex flex-row items-center space-x-1 bottom-1 right-1 rounded-md">
<PlayCircle size={15} />
<p className="text-xs">Video</p>
</div>
</div>
))}
</div>
<Lightbox
open={open}
close={() => setOpen(false)}
slides={slides}
index={currentIndex}
on={{
view: ({ index }) => {
const previousIframe = videoRefs.current[currentIndex];
if (previousIframe?.contentWindow) {
previousIframe.contentWindow.postMessage(
'{"event":"command","func":"pauseVideo","args":""}',
'*',
);
}
setCurrentIndex(index);
},
}}
render={{
slide: ({ slide }) => {
const index = slides.findIndex((s) => s === slide);
return slide.type === 'video-slide' ? (
<div className="h-full w-full flex flex-row items-center justify-center">
<iframe
src={`${slide.iframe_src}${slide.iframe_src.includes('?') ? '&' : '?'}enablejsapi=1`}
ref={(el) => {
if (el) {
videoRefs.current[index] = el;
}
}}
className="aspect-video max-h-[95vh] w-[95vw] rounded-2xl md:w-[80vw]"
allowFullScreen
allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
/>
</div>
) : null;
},
}}
/>
</>
)}
</>
);
};
export default Searchvideos;

View File

@@ -1,99 +0,0 @@
'use client';
import { cn } from '@/lib/utils';
import { BookOpenText, Home, Search, SquarePen, Settings } from 'lucide-react';
import Link from 'next/link';
import { useSelectedLayoutSegments } from 'next/navigation';
import React, { useState, type ReactNode } from 'react';
import Layout from './Layout';
const VerticalIconContainer = ({ children }: { children: ReactNode }) => {
return (
<div className="flex flex-col items-center gap-y-3 w-full">{children}</div>
);
};
const Sidebar = ({ children }: { children: React.ReactNode }) => {
const segments = useSelectedLayoutSegments();
const navLinks = [
{
icon: Home,
href: '/',
active: segments.length === 0 || segments.includes('c'),
label: 'Home',
},
{
icon: Search,
href: '/discover',
active: segments.includes('discover'),
label: 'Discover',
},
{
icon: BookOpenText,
href: '/library',
active: segments.includes('library'),
label: 'Library',
},
];
return (
<div>
<div className="hidden lg:fixed lg:inset-y-0 lg:z-50 lg:flex lg:w-20 lg:flex-col">
<div className="flex grow flex-col items-center justify-between gap-y-5 overflow-y-auto bg-light-secondary dark:bg-dark-secondary px-2 py-8">
<a href="/">
<SquarePen className="cursor-pointer" />
</a>
<VerticalIconContainer>
{navLinks.map((link, i) => (
<Link
key={i}
href={link.href}
className={cn(
'relative flex flex-row items-center justify-center cursor-pointer hover:bg-black/10 dark:hover:bg-white/10 duration-150 transition w-full py-2 rounded-lg',
link.active
? 'text-black dark:text-white'
: 'text-black/70 dark:text-white/70',
)}
>
<link.icon />
{link.active && (
<div className="absolute right-0 -mr-2 h-full w-1 rounded-l-lg bg-black dark:bg-white" />
)}
</Link>
))}
</VerticalIconContainer>
<Link href="/settings">
<Settings className="cursor-pointer" />
</Link>
</div>
</div>
<div className="fixed bottom-0 w-full z-50 flex flex-row items-center gap-x-6 bg-light-primary dark:bg-dark-primary px-4 py-4 shadow-sm lg:hidden">
{navLinks.map((link, i) => (
<Link
href={link.href}
key={i}
className={cn(
'relative flex flex-col items-center space-y-1 text-center w-full',
link.active
? 'text-black dark:text-white'
: 'text-black dark:text-white/70',
)}
>
{link.active && (
<div className="absolute top-0 -mt-4 h-1 w-full rounded-b-lg bg-black dark:bg-white" />
)}
<link.icon />
<p className="text-xs">{link.label}</p>
</Link>
))}
</div>
<Layout>{children}</Layout>
</div>
);
};
export default Sidebar;

View File

@@ -1,43 +0,0 @@
'use client';
import { useState } from 'react';
import { cn } from '@/lib/utils';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps {
content: string;
}
const ThinkBox = ({ content }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false);
return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">
<button
onClick={() => setIsExpanded(!isExpanded)}
className="w-full flex items-center justify-between px-4 py-1 text-black/90 dark:text-white/90 hover:bg-light-200 dark:hover:bg-dark-200 transition duration-200"
>
<div className="flex items-center space-x-2">
<BrainCircuit
size={20}
className="text-[#9C27B0] dark:text-[#CE93D8]"
/>
<p className="font-medium text-sm">Thinking Process</p>
</div>
{isExpanded ? (
<ChevronUp size={18} className="text-black/70 dark:text-white/70" />
) : (
<ChevronDown size={18} className="text-black/70 dark:text-white/70" />
)}
</button>
{isExpanded && (
<div className="px-4 py-3 text-black/80 dark:text-white/80 text-sm border-t border-light-200 dark:border-dark-200 bg-light-100/50 dark:bg-dark-100/50 whitespace-pre-wrap">
{content}
</div>
)}
</div>
);
};
export default ThinkBox;

View File

@@ -1,21 +1,18 @@
import fs from 'fs';
import path from 'path';
import toml from '@iarna/toml';
// Use dynamic imports for Node.js modules to prevent client-side errors
let fs: any;
let path: any;
if (typeof window === 'undefined') {
// We're on the server
fs = require('fs');
path = require('path');
}
const configFileName = 'config.toml';
interface Config {
GENERAL: {
PORT: number;
SIMILARITY_MEASURE: string;
CONFIG_PASSWORD: string;
DISCOVER_ENABLED: boolean;
LIBRARY_ENABLED: boolean;
COPILOT_ENABLED: boolean;
KEEP_ALIVE: string;
BASE_URL?: string;
};
MODELS: {
OPENAI: {
@@ -33,12 +30,6 @@ interface Config {
OLLAMA: {
API_URL: string;
};
DEEPSEEK: {
API_KEY: string;
};
LM_STUDIO: {
API_URL: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
@@ -54,24 +45,25 @@ type RecursivePartial<T> = {
[P in keyof T]?: RecursivePartial<T[P]>;
};
const loadConfig = () => {
// Server-side only
if (typeof window === 'undefined') {
return toml.parse(
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
}
const loadConfig = () =>
toml.parse(
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
) as any as Config;
// Client-side fallback - settings will be loaded via API
return {} as Config;
};
export const getPort = () => loadConfig().GENERAL.PORT;
export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE;
export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE;
export const getConfigPassword = () => loadConfig().GENERAL.CONFIG_PASSWORD;
export const getBaseUrl = () => loadConfig().GENERAL.BASE_URL;
export const isDiscoverEnabled = () => loadConfig().GENERAL.DISCOVER_ENABLED;
export const isLibraryEnabled = () => loadConfig().GENERAL.LIBRARY_ENABLED;
export const isCopilotEnabled = () => loadConfig().GENERAL.COPILOT_ENABLED;
export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE;
export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY;
@@ -86,8 +78,6 @@ export const getSearxngApiEndpoint = () =>
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@@ -97,9 +87,6 @@ export const getCustomOpenaiApiUrl = () =>
export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
export const getLMStudioApiEndpoint = () =>
loadConfig().MODELS.LM_STUDIO.API_URL;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
@@ -132,13 +119,11 @@ const mergeConfigs = (current: any, update: any): any => {
};
export const updateConfig = (config: RecursivePartial<Config>) => {
// Server-side only
if (typeof window === 'undefined') {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig),
);
}
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(__dirname, `../${configFileName}`),
toml.stringify(mergedConfig),
);
};

View File

@@ -1,9 +1,8 @@
import { drizzle } from 'drizzle-orm/better-sqlite3';
import Database from 'better-sqlite3';
import * as schema from './schema';
import path from 'path';
const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite'));
const sqlite = new Database('data/db.sqlite');
const db = drizzle(sqlite, {
schema: schema,
});

View File

@@ -28,7 +28,7 @@ export class HuggingFaceTransformersEmbeddings
timeout?: number;
private pipelinePromise: Promise<any> | undefined;
private pipelinePromise: Promise<any>;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});

View File

@@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
constructor(args?: LineOutputParserArgs) {
super();
this.key = args?.key ?? this.key;
this.key = args.key ?? this.key;
}
static lc_name() {
@@ -21,10 +21,6 @@ class LineOutputParser extends BaseOutputParser<string> {
async parse(text: string): Promise<string> {
text = text.trim() || '';
// First, remove all <think>...</think> blocks to avoid parsing tags inside thinking content
// This might be a little aggressive. Prompt massaging might be all we need, but this is a guarantee and should rarely mess anything up.
text = this.removeThinkingBlocks(text);
const regex = /^(\s*(-|\*|\d+\.\s|\d+\)\s|\u2022)\s*)+/;
const startKeyIndex = text.indexOf(`<${this.key}>`);
const endKeyIndex = text.indexOf(`</${this.key}>`);
@@ -44,17 +40,6 @@ class LineOutputParser extends BaseOutputParser<string> {
return line;
}
/**
* Removes all content within <think>...</think> blocks
* @param text The input text containing thinking blocks
* @returns The text with all thinking blocks removed
*/
private removeThinkingBlocks(text: string): string {
// Use regex to identify and remove all <think>...</think> blocks
// Using the 's' flag to make dot match newlines
return text.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
}
getFormatInstructions(): string {
throw new Error('Not implemented.');
}

View File

@@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
constructor(args?: LineListOutputParserArgs) {
super();
this.key = args?.key ?? this.key;
this.key = args.key ?? this.key;
}
static lc_name() {
@@ -21,10 +21,6 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
async parse(text: string): Promise<string[]> {
text = text.trim() || '';
// First, remove all <think>...</think> blocks to avoid parsing tags inside thinking content
// This might be a little aggressive. Prompt massaging might be all we need, but this is a guarantee and should rarely mess anything up.
text = this.removeThinkingBlocks(text);
const regex = /^(\s*(-|\*|\d+\.\s|\d+\)\s|\u2022)\s*)+/;
const startKeyIndex = text.indexOf(`<${this.key}>`);
const endKeyIndex = text.indexOf(`</${this.key}>`);
@@ -46,17 +42,6 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
return lines;
}
/**
* Removes all content within <think>...</think> blocks
* @param text The input text containing thinking blocks
* @returns The text with all thinking blocks removed
*/
private removeThinkingBlocks(text: string): string {
// Use regex to identify and remove all <think>...</think> blocks
// Using [\s\S] pattern to match all characters including newlines
return text.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
}
getFormatInstructions(): string {
throw new Error('Not implemented.');
}

View File

@@ -1,19 +0,0 @@
export const chatPrompt = `
You are Perplexica, an AI model who is expert at having creative conversations with users. You are currently set on focus mode 'Chat', which means you will engage in a truly creative conversation without searching the web or citing sources.
In Chat mode, you should be:
- Creative and engaging in your responses
- Helpful and informative based on your internal knowledge
- Conversational and natural in your tone
- Willing to explore ideas, hypothetical scenarios, and creative topics
Since you are in Chat mode, you would not perform web searches or cite sources. If the user asks a question that would benefit from web search or specific data, you can suggest they switch to a different focus mode like 'All Mode' for general web search or another specialized mode.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
<context>
{context}
</context>
`;

View File

@@ -1,43 +1,6 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config';
export const PROVIDER_INFO = {
key: 'anthropic',
displayName: 'Anthropic',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
import { getAnthropicApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey();
@@ -45,22 +8,52 @@ export const loadAnthropicChatModels = async () => {
if (!anthropicApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
anthropicChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
const chatModels = {
'claude-3-5-sonnet-20241022': {
displayName: 'Claude 3.5 Sonnet',
model: new ChatAnthropic({
apiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
};
});
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-sonnet-20241022',
}),
},
'claude-3-5-haiku-20241022': {
displayName: 'Claude 3.5 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-haiku-20241022',
}),
},
'claude-3-opus-20240229': {
displayName: 'Claude 3 Opus',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-opus-20240229',
}),
},
'claude-3-sonnet-20240229': {
displayName: 'Claude 3 Sonnet',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-sonnet-20240229',
}),
},
'claude-3-haiku-20240307': {
displayName: 'Claude 3 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-haiku-20240307',
}),
},
};
return chatModels;
} catch (err) {
console.error(`Error loading Anthropic models: ${err}`);
logger.error(`Error loading Anthropic models: ${err}`);
return {};
}
};

View File

@@ -1,49 +0,0 @@
import { ChatOpenAI } from '@langchain/openai';
import { getDeepseekApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
export const PROVIDER_INFO = {
key: 'deepseek',
displayName: 'Deepseek AI',
};
const deepseekChatModels: Record<string, string>[] = [
{
displayName: 'Deepseek Chat (Deepseek V3)',
key: 'deepseek-chat',
},
{
displayName: 'Deepseek Reasoner (Deepseek R1)',
key: 'deepseek-reasoner',
},
];
export const loadDeepseekChatModels = async () => {
const deepseekApiKey = getDeepseekApiKey();
if (!deepseekApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
deepseekChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.deepseek.com',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Deepseek models: ${err}`);
return {};
}
};

View File

@@ -2,57 +2,8 @@ import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'gemini',
displayName: 'Google Gemini',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-exp-03-25',
},
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Flash Thinking Experimental',
key: 'gemini-2.0-flash-thinking-exp-01-21',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 004',
key: 'models/text-embedding-004',
},
{
displayName: 'Embedding 001',
key: 'models/embedding-001',
},
];
import { getGeminiApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey();
@@ -60,47 +11,75 @@ export const loadGeminiChatModels = async () => {
if (!geminiApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
geminiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
const chatModels = {
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash',
model: new ChatGoogleGenerativeAI({
apiKey: geminiApiKey,
modelName: model.key,
modelName: 'gemini-1.5-flash',
temperature: 0.7,
}) as unknown as BaseChatModel,
};
});
apiKey: geminiApiKey,
}),
},
'gemini-1.5-flash-8b': {
displayName: 'Gemini 1.5 Flash 8B',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash-8b',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-exp': {
displayName: 'Gemini 2.0 Flash Exp',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-exp',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-thinking-exp-01-21': {
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
};
return chatModels;
} catch (err) {
console.error(`Error loading Gemini models: ${err}`);
logger.error(`Error loading Gemini models: ${err}`);
return {};
}
};
export const loadGeminiEmbeddingModels = async () => {
export const loadGeminiEmbeddingsModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
try {
const embeddingModels: Record<string, EmbeddingModel> = {};
geminiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
const embeddingModels = {
'text-embedding-004': {
displayName: 'Text Embedding',
model: new GoogleGenerativeAIEmbeddings({
apiKey: geminiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};
});
modelName: 'text-embedding-004',
}),
},
};
return embeddingModels;
} catch (err) {
console.error(`Error loading OpenAI embeddings models: ${err}`);
logger.error(`Error loading Gemini embeddings model: ${err}`);
return {};
}
};

View File

@@ -1,91 +1,6 @@
import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../config';
import { ChatModel } from '.';
export const PROVIDER_INFO = {
key: 'groq',
displayName: 'Groq',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
/* {
displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)',
key: 'meta-llama/llama-4-maverick-17b-128e-instruct',
}, */
{
displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)',
key: 'meta-llama/llama-4-scout-17b-16e-instruct',
},
];
import { getGroqApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey();
@@ -93,25 +8,129 @@ export const loadGroqChatModels = async () => {
if (!groqApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
groqChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
const chatModels = {
'llama-3.3-70b-versatile': {
displayName: 'Llama 3.3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.3-70b-versatile',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
}) as unknown as BaseChatModel,
};
});
),
},
'llama-3.2-3b-preview': {
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-vision-preview': {
displayName: 'Llama 3.2 11B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-vision-preview': {
displayName: 'Llama 3.2 90B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-8b-instant': {
displayName: 'Llama 3.1 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.1-8b-instant',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-8b-8192': {
displayName: 'LLaMA3 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-70b-8192': {
displayName: 'LLaMA3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'mixtral-8x7b-32768': {
displayName: 'Mixtral 8x7B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'gemma2-9b-it': {
displayName: 'Gemma2 9B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma2-9b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
};
return chatModels;
} catch (err) {
console.error(`Error loading Groq models: ${err}`);
logger.error(`Error loading Groq models: ${err}`);
return {};
}
};

View File

@@ -1,109 +1,38 @@
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import {
loadOpenAIChatModels,
loadOpenAIEmbeddingModels,
PROVIDER_INFO as OpenAIInfo,
PROVIDER_INFO,
} from './openai';
import { loadGroqChatModels } from './groq';
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
} from '../../config';
import { ChatOpenAI } from '@langchain/openai';
import {
loadOllamaChatModels,
loadOllamaEmbeddingModels,
PROVIDER_INFO as OllamaInfo,
} from './ollama';
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
import {
loadAnthropicChatModels,
PROVIDER_INFO as AnthropicInfo,
} from './anthropic';
import {
loadGeminiChatModels,
loadGeminiEmbeddingModels,
PROVIDER_INFO as GeminiInfo,
} from './gemini';
import {
loadTransformersEmbeddingsModels,
PROVIDER_INFO as TransformersInfo,
} from './transformers';
import {
loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo,
} from './deepseek';
import {
loadLMStudioChatModels,
loadLMStudioEmbeddingsModels,
PROVIDER_INFO as LMStudioInfo,
} from './lmstudio';
export const PROVIDER_METADATA = {
openai: OpenAIInfo,
ollama: OllamaInfo,
groq: GroqInfo,
anthropic: AnthropicInfo,
gemini: GeminiInfo,
transformers: TransformersInfo,
deepseek: DeepseekInfo,
lmstudio: LMStudioInfo,
custom_openai: {
key: 'custom_openai',
displayName: 'Custom OpenAI',
},
};
export interface ChatModel {
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
string,
() => Promise<Record<string, ChatModel>>
> = {
const chatModelProviders = {
openai: loadOpenAIChatModels,
ollama: loadOllamaChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels,
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels,
lmstudio: loadLMStudioChatModels,
};
export const embeddingModelProviders: Record<
string,
() => Promise<Record<string, EmbeddingModel>>
> = {
openai: loadOpenAIEmbeddingModels,
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels,
lmstudio: loadLMStudioEmbeddingsModels,
const embeddingModelProviders = {
openai: loadOpenAIEmbeddingsModels,
local: loadTransformersEmbeddingsModels,
ollama: loadOllamaEmbeddingsModels,
gemini: loadGeminiEmbeddingsModels,
};
export const getAvailableChatModelProviders = async () => {
const models: Record<string, Record<string, ChatModel>> = {};
const models = {};
for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider]();
if (Object.keys(providerModels).length > 0) {
// Sort models alphabetically by their keys
const sortedModels: Record<string, ChatModel> = {};
Object.keys(providerModels)
.sort()
.forEach((key) => {
sortedModels[key] = providerModels[key];
});
models[provider] = sortedModels;
models[provider] = providerModels;
}
}
@@ -123,7 +52,7 @@ export const getAvailableChatModelProviders = async () => {
configuration: {
baseURL: customOpenAiApiUrl,
},
}) as unknown as BaseChatModel,
}),
},
}
: {}),
@@ -133,19 +62,12 @@ export const getAvailableChatModelProviders = async () => {
};
export const getAvailableEmbeddingModelProviders = async () => {
const models: Record<string, Record<string, EmbeddingModel>> = {};
const models = {};
for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider]();
if (Object.keys(providerModels).length > 0) {
// Sort embedding models alphabetically by their keys
const sortedModels: Record<string, EmbeddingModel> = {};
Object.keys(providerModels)
.sort()
.forEach((key) => {
sortedModels[key] = providerModels[key];
});
models[provider] = sortedModels;
models[provider] = providerModels;
}
}

View File

@@ -1,100 +0,0 @@
import { getKeepAlive, getLMStudioApiEndpoint } from '../config';
import axios from 'axios';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'lmstudio',
displayName: 'LM Studio',
};
import { ChatOpenAI } from '@langchain/openai';
import { OpenAIEmbeddings } from '@langchain/openai';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
interface LMStudioModel {
id: string;
name?: string;
}
const ensureV1Endpoint = (endpoint: string): string =>
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
try {
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
return true;
} catch {
return false;
}
};
export const loadLMStudioChatModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
temperature: 0.7,
streaming: true,
maxRetries: 3,
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading LM Studio models: ${err}`);
return {};
}
};
export const loadLMStudioEmbeddingsModels = async () => {
const endpoint = getLMStudioApiEndpoint();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
headers: { 'Content-Type': 'application/json' },
});
const embeddingsModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: LMStudioModel) => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
modelName: model.id,
}) as unknown as Embeddings,
};
});
return embeddingsModels;
} catch (err) {
console.error(`Error loading LM Studio embeddings model: ${err}`);
return {};
}
};

View File

@@ -1,78 +1,74 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/ollama';
import { OllamaEmbeddings } from '@langchain/ollama';
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
const ollamaEndpoint = getOllamaApiEndpoint();
const keepAlive = getKeepAlive();
if (!ollamaApiEndpoint) return {};
if (!ollamaEndpoint) return {};
try {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models } = res.data;
const { models: ollamaModels } = response.data;
const chatModels: Record<string, ChatModel> = {};
models.forEach((model: any) => {
chatModels[model.model] = {
const chatModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
displayName: model.name,
model: new ChatOllama({
baseUrl: ollamaApiEndpoint,
baseUrl: ollamaEndpoint,
model: model.model,
temperature: 0.7,
keepAlive: getKeepAlive(),
keepAlive: keepAlive,
}),
};
});
return acc;
}, {});
return chatModels;
} catch (err) {
console.error(`Error loading Ollama models: ${err}`);
logger.error(`Error loading Ollama models: ${err}`);
return {};
}
};
export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
export const loadOllamaEmbeddingsModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint();
if (!ollamaApiEndpoint) return {};
if (!ollamaEndpoint) return {};
try {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models } = res.data;
const { models: ollamaModels } = response.data;
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model: any) => {
embeddingModels[model.model] = {
const embeddingsModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
displayName: model.name,
model: new OllamaEmbeddings({
baseUrl: ollamaApiEndpoint,
baseUrl: ollamaEndpoint,
model: model.model,
}),
};
});
return embeddingModels;
return acc;
}, {});
return embeddingsModels;
} catch (err) {
console.error(`Error loading Ollama embeddings models: ${err}`);
logger.error(`Error loading Ollama embeddings model: ${err}`);
return {};
}
};

View File

@@ -1,107 +1,89 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'openai',
displayName: 'OpenAI',
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
{
displayName: 'GPT 4.1 nano',
key: 'gpt-4.1-nano',
},
{
displayName: 'GPT 4.1 mini',
key: 'gpt-4.1-mini',
},
{
displayName: 'GPT 4.1',
key: 'gpt-4.1',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
import { getOpenaiApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadOpenAIChatModels = async () => {
const openaiApiKey = getOpenaiApiKey();
const openAIApiKey = getOpenaiApiKey();
if (!openaiApiKey) return {};
if (!openAIApiKey) return {};
try {
const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
const chatModels = {
'gpt-3.5-turbo': {
displayName: 'GPT-3.5 Turbo',
model: new ChatOpenAI({
openAIApiKey: openaiApiKey,
modelName: model.key,
openAIApiKey,
modelName: 'gpt-3.5-turbo',
temperature: 0.7,
}) as unknown as BaseChatModel,
};
});
}),
},
'gpt-4': {
displayName: 'GPT-4',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
},
'gpt-4-turbo': {
displayName: 'GPT-4 turbo',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
},
'gpt-4o': {
displayName: 'GPT-4 omni',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o',
temperature: 0.7,
}),
},
'gpt-4o-mini': {
displayName: 'GPT-4 omni mini',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o-mini',
temperature: 0.7,
}),
},
};
return chatModels;
} catch (err) {
console.error(`Error loading OpenAI models: ${err}`);
logger.error(`Error loading OpenAI models: ${err}`);
return {};
}
};
export const loadOpenAIEmbeddingModels = async () => {
const openaiApiKey = getOpenaiApiKey();
export const loadOpenAIEmbeddingsModels = async () => {
const openAIApiKey = getOpenaiApiKey();
if (!openaiApiKey) return {};
if (!openAIApiKey) return {};
try {
const embeddingModels: Record<string, EmbeddingModel> = {};
openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
const embeddingModels = {
'text-embedding-3-small': {
displayName: 'Text Embedding 3 Small',
model: new OpenAIEmbeddings({
openAIApiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};
});
openAIApiKey,
modelName: 'text-embedding-3-small',
}),
},
'text-embedding-3-large': {
displayName: 'Text Embedding 3 Large',
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
},
};
return embeddingModels;
} catch (err) {
console.error(`Error loading OpenAI embeddings models: ${err}`);
logger.error(`Error loading OpenAI embeddings model: ${err}`);
return {};
}
};

View File

@@ -1,10 +1,6 @@
import logger from '../../utils/logger';
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const PROVIDER_INFO = {
key: 'transformers',
displayName: 'Hugging Face',
};
export const loadTransformersEmbeddingsModels = async () => {
try {
const embeddingModels = {
@@ -30,7 +26,7 @@ export const loadTransformersEmbeddingsModels = async () => {
return embeddingModels;
} catch (err) {
console.error(`Error loading Transformers embeddings model: ${err}`);
logger.error(`Error loading Transformers embeddings model: ${err}`);
return {};
}
};

View File

@@ -1,68 +0,0 @@
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import prompts from '../prompts';
export const searchHandlers: Record<string, MetaSearchAgent> = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
localResearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.localResearchPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
chat: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.chatPrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};

View File

@@ -1,5 +1,5 @@
import axios from 'axios';
import { getSearxngApiEndpoint } from './config';
import { getSearxngApiEndpoint } from '../config';
interface SearxngSearchOptions {
categories?: string[];
@@ -19,12 +19,6 @@ interface SearxngSearchResult {
iframe_src?: string;
}
interface SearxngResponse {
results: SearxngSearchResult[];
suggestions: string[];
searchUrl: string;
}
export const searchSearxng = async (
query: string,
opts?: SearxngSearchOptions,
@@ -36,12 +30,11 @@ export const searchSearxng = async (
if (opts) {
Object.keys(opts).forEach((key) => {
const value = opts[key as keyof SearxngSearchOptions];
if (Array.isArray(value)) {
url.searchParams.append(key, value.join(','));
if (Array.isArray(opts[key])) {
url.searchParams.append(key, opts[key].join(','));
return;
}
url.searchParams.append(key, value as string);
url.searchParams.append(key, opts[key]);
});
}
@@ -50,16 +43,5 @@ export const searchSearxng = async (
const results: SearxngSearchResult[] = res.data.results;
const suggestions: string[] = res.data.suggestions;
// Create a URL for viewing the search results in the SearXNG web interface
const searchUrl = new URL(searxngURL);
searchUrl.pathname = '/search';
searchUrl.searchParams.append('q', query);
if (opts?.engines?.length) {
searchUrl.searchParams.append('engines', opts.engines.join(','));
}
if (opts?.language) {
searchUrl.searchParams.append('language', opts.language);
}
return { results, suggestions, searchUrl: searchUrl.toString() };
return { results, suggestions };
};

View File

@@ -1,5 +0,0 @@
declare function computeDot(vectorA: number[], vectorB: number[]): number;
declare module 'compute-dot' {
export default computeDot;
}

View File

@@ -51,10 +51,6 @@ export const academicSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Academic', this means you will be searching for academic papers and articles on the web.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@@ -11,8 +11,7 @@ import {
wolframAlphaSearchResponsePrompt,
wolframAlphaSearchRetrieverPrompt,
} from './wolframAlpha';
import { localResearchPrompt } from './localResearch';
import { chatPrompt } from './chat';
import { writingAssistantPrompt } from './writingAssistant';
import {
youtubeSearchResponsePrompt,
youtubeSearchRetrieverPrompt,
@@ -27,8 +26,7 @@ export default {
redditSearchRetrieverPrompt,
wolframAlphaSearchResponsePrompt,
wolframAlphaSearchRetrieverPrompt,
localResearchPrompt,
chatPrompt,
writingAssistantPrompt,
youtubeSearchResponsePrompt,
youtubeSearchRetrieverPrompt,
};

View File

@@ -51,10 +51,6 @@ export const redditSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@@ -1,9 +1,8 @@
export const webSearchRetrieverPrompt = `
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it. You should condense the question to its essence and remove any unnecessary details. You should also make sure that the question is clear and easy to understand. You should not add any new information or change the meaning of the question. You should also make sure that the question is grammatically correct and free of spelling errors.
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
If it is a smple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
If you are a thinking or reasoning AI, you should avoid using \`<question>\` and \`</question>\` tags in your thinking. Those tags should only be used in the final output. You should also avoid using \`<links>\` and \`</links>\` tags in your thinking. Those tags should only be used in the final output.
There are several examples attached for your reference inside the below \`examples\` XML block
@@ -50,21 +49,6 @@ summarize
https://example.com
</links>
\`
6. Follow-up question: Get the current F1 constructor standings and return the results in a table
Rephrased question: \`
<question>
Current F1 constructor standings
</question>
\`
7. Follow-up question: What are the top 10 restaurants in New York? Show the results in a table and include a short description of each restaurant.
Rephrased question: \`
<question>
Top 10 restaurants in New York
</question>
\`
</examples>
Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
@@ -108,10 +92,6 @@ export const webSearchResponsePrompt = `
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@@ -51,10 +51,6 @@ export const wolframAlphaSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@@ -1,16 +1,12 @@
export const localResearchPrompt = `
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Local Research', this means you will be helping the user research and interact with local files with citations.
Since you are in local research mode, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode.
export const writingAssistantPrompt = `
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Writing Assistant', this means you will be helping the user write a response to a given query.
Since you are a writing assistant, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode.
You will be shared a context that can contain information from files user has uploaded to get answers from. You will have to generate answers upon that.
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
<context>
{context}
</context>

View File

@@ -51,10 +51,6 @@ export const youtubeSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcrip
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

66
src/routes/chats.ts Normal file
View File

@@ -0,0 +1,66 @@
import express from 'express';
import logger from '../utils/logger';
import db from '../db/index';
import { eq } from 'drizzle-orm';
import { chats, messages } from '../db/schema';
const router = express.Router();
router.get('/', async (_, res) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return res.status(200).json({ chats: chats });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chats: ${err.message}`);
}
});
router.get('/:id', async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, req.params.id),
});
return res.status(200).json({ chat: chatExists, messages: chatMessages });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chat: ${err.message}`);
}
});
router.delete(`/:id`, async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
await db.delete(chats).where(eq(chats.id, req.params.id)).execute();
await db
.delete(messages)
.where(eq(messages.chatId, req.params.id))
.execute();
return res.status(200).json({ message: 'Chat deleted successfully' });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in deleting chat: ${err.message}`);
}
});
export default router;

139
src/routes/config.ts Normal file
View File

@@ -0,0 +1,139 @@
import express from 'express';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import {
getGroqApiKey,
getOllamaApiEndpoint,
getAnthropicApiKey,
getGeminiApiKey,
getOpenaiApiKey,
updateConfig,
getConfigPassword,
isLibraryEnabled,
isCopilotEnabled,
isDiscoverEnabled,
getCustomOpenaiApiUrl,
getCustomOpenaiApiKey,
getCustomOpenaiModelName,
} from '../config';
import logger from '../utils/logger';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const authHeader = req.headers['authorization']?.split(' ')[1];
const password = getConfigPassword();
if (authHeader !== password) {
res.status(401).json({ message: 'Unauthorized' });
return;
}
const config = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
config['chatModelProviders'] = {};
config['embeddingModelProviders'] = {};
for (const provider in chatModelProviders) {
config['chatModelProviders'][provider] = Object.keys(
chatModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: chatModelProviders[provider][model].displayName,
};
});
}
for (const provider in embeddingModelProviders) {
config['embeddingModelProviders'][provider] = Object.keys(
embeddingModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: embeddingModelProviders[provider][model].displayName,
};
});
}
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
res.status(200).json(config);
} catch (err: any) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error getting config: ${err.message}`);
}
});
router.post('/', async (req, res) => {
const authHeader = req.headers['authorization']?.split(' ')[1];
const password = getConfigPassword();
if (authHeader !== password) {
res.status(401).json({ message: 'Unauthorized' });
return;
}
const config = req.body;
const updatedConfig = {
GENERAL: {
DISCOVER_ENABLED: config.isDiscoverEnabled,
LIBRARY_ENABLED: config.isLibraryEnabled,
COPILOT_ENABLED: config.isCopilotEnabled,
},
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
};
updateConfig(updatedConfig);
res.status(200).json({ message: 'Config updated' });
});
router.get('/preferences', (_, res) => {
const preferences = {
isLibraryEnabled: isLibraryEnabled(),
isCopilotEnabled: isCopilotEnabled(),
isDiscoverEnabled: isDiscoverEnabled(),
};
res.status(200).json(preferences);
});
export default router;

48
src/routes/discover.ts Normal file
View File

@@ -0,0 +1,48 @@
import express from 'express';
import { searchSearxng } from '../lib/searxng';
import logger from '../utils/logger';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const data = (
await Promise.all([
searchSearxng('site:businessinsider.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:www.exchangewire.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:yahoo.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:businessinsider.com tech', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:www.exchangewire.com tech', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:yahoo.com tech', {
engines: ['bing news'],
pageno: 1,
}),
])
)
.map((result) => result.results)
.flat()
.sort(() => Math.random() - 0.5);
return res.json({ blogs: data });
} catch (err: any) {
logger.error(`Error in discover route: ${err.message}`);
return res.status(500).json({ message: 'An error has occurred' });
}
});
export default router;

82
src/routes/images.ts Normal file
View File

@@ -0,0 +1,82 @@
import express from 'express';
import handleImageSearch from '../chains/imageSearchAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: ImageSearchBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const images = await handleImageSearch(
{ query: body.query, chat_history: chatHistory },
llm,
);
res.status(200).json({ images });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in image search: ${err.message}`);
}
});
export default router;

24
src/routes/index.ts Normal file
View File

@@ -0,0 +1,24 @@
import express from 'express';
import imagesRouter from './images';
import videosRouter from './videos';
import configRouter from './config';
import modelsRouter from './models';
import suggestionsRouter from './suggestions';
import chatsRouter from './chats';
import searchRouter from './search';
import discoverRouter from './discover';
import uploadsRouter from './uploads';
const router = express.Router();
router.use('/images', imagesRouter);
router.use('/videos', videosRouter);
router.use('/config', configRouter);
router.use('/models', modelsRouter);
router.use('/suggestions', suggestionsRouter);
router.use('/chats', chatsRouter);
router.use('/search', searchRouter);
router.use('/discover', discoverRouter);
router.use('/uploads', uploadsRouter);
export default router;

59
src/routes/models.ts Normal file
View File

@@ -0,0 +1,59 @@
import express from 'express';
import logger from '../utils/logger';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const [chatModelProvidersRaw, embeddingModelProvidersRaw] =
await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProviders = {};
const chatModelProvidersKeys = Object.keys(chatModelProvidersRaw);
chatModelProvidersKeys.forEach((provider) => {
chatModelProviders[provider] = {};
const models = Object.keys(chatModelProvidersRaw[provider]);
models.forEach((model) => {
chatModelProviders[provider][model] = {};
});
});
const embeddingModelProviders = {};
const embeddingModelProvidersKeys = Object.keys(embeddingModelProvidersRaw);
embeddingModelProvidersKeys.forEach((provider) => {
embeddingModelProviders[provider] = {};
const models = Object.keys(embeddingModelProvidersRaw[provider]);
models.forEach((model) => {
embeddingModelProviders[provider][model] = {};
});
});
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete chatModelProviders[provider][model].model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete embeddingModelProviders[provider][model].model;
});
});
res.status(200).json({ chatModelProviders, embeddingModelProviders });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(err.message);
}
});
export default router;

158
src/routes/search.ts Normal file
View File

@@ -0,0 +1,158 @@
import express from 'express';
import logger from '../utils/logger';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import { ChatOpenAI } from '@langchain/openai';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import { searchHandlers } from '../websocket/messageHandler';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '../search/metaSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface chatModel {
provider: string;
model: string;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
}
interface embeddingModel {
provider: string;
model: string;
}
interface ChatRequestBody {
optimizationMode: 'speed' | 'balanced';
focusMode: string;
chatModel?: chatModel;
embeddingModel?: embeddingModel;
query: string;
history: Array<[string, string]>;
}
router.post('/', async (req, res) => {
try {
const body: ChatRequestBody = req.body;
if (!body.focusMode || !body.query) {
return res.status(400).json({ message: 'Missing focus mode or query' });
}
body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
const embeddingModel =
body.embeddingModel?.model ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {
return res.status(400).json({ message: 'Invalid focus mode' });
}
const emitter = await searchHandler.searchAndAnswer(
body.query,
history,
llm,
embeddings,
body.optimizationMode,
[],
);
let message = '';
let sources = [];
emitter.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
message += parsedData.data;
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
}
});
emitter.on('end', () => {
res.status(200).json({ message, sources });
});
emitter.on('error', (data) => {
const parsedData = JSON.parse(data);
res.status(500).json({ message: parsedData.data });
});
} catch (err: any) {
logger.error(`Error in getting search results: ${err.message}`);
res.status(500).json({ message: 'An error has occurred.' });
}
});
export default router;

81
src/routes/suggestions.ts Normal file
View File

@@ -0,0 +1,81 @@
import express from 'express';
import generateSuggestions from '../chains/suggestionGeneratorAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsBody {
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: SuggestionsBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const suggestions = await generateSuggestions(
{ chat_history: chatHistory },
llm,
);
res.status(200).json({ suggestions: suggestions });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in generating suggestions: ${err.message}`);
}
});
export default router;

151
src/routes/uploads.ts Normal file
View File

@@ -0,0 +1,151 @@
import express from 'express';
import logger from '../utils/logger';
import multer from 'multer';
import path from 'path';
import crypto from 'crypto';
import fs from 'fs';
import { Embeddings } from '@langchain/core/embeddings';
import { getAvailableEmbeddingModelProviders } from '../lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
const router = express.Router();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
const storage = multer.diskStorage({
destination: (req, file, cb) => {
cb(null, path.join(process.cwd(), './uploads'));
},
filename: (req, file, cb) => {
const splitedFileName = file.originalname.split('.');
const fileExtension = splitedFileName[splitedFileName.length - 1];
if (!['pdf', 'docx', 'txt'].includes(fileExtension)) {
return cb(new Error('File type is not supported'), '');
}
cb(null, `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`);
},
});
const upload = multer({ storage });
router.post(
'/',
upload.fields([
{ name: 'files' },
{ name: 'embedding_model', maxCount: 1 },
{ name: 'embedding_model_provider', maxCount: 1 },
]),
async (req, res) => {
try {
const { embedding_model, embedding_model_provider } = req.body;
if (!embedding_model || !embedding_model_provider) {
res
.status(400)
.json({ message: 'Missing embedding model or provider' });
return;
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel: Embeddings =
embedding_model ?? Object.keys(embeddingModels[provider])[0];
let embeddingsModel: Embeddings | undefined;
if (
embeddingModels[provider] &&
embeddingModels[provider][embeddingModel]
) {
embeddingsModel = embeddingModels[provider][embeddingModel].model as
| Embeddings
| undefined;
}
if (!embeddingsModel) {
res.status(400).json({ message: 'Invalid LLM model selected' });
return;
}
const files = req.files['files'] as Express.Multer.File[];
if (!files || files.length === 0) {
res.status(400).json({ message: 'No files uploaded' });
return;
}
await Promise.all(
files.map(async (file) => {
let docs: Document[] = [];
if (file.mimetype === 'application/pdf') {
const loader = new PDFLoader(file.path);
docs = await loader.load();
} else if (
file.mimetype ===
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
) {
const loader = new DocxLoader(file.path);
docs = await loader.load();
} else if (file.mimetype === 'text/plain') {
const text = fs.readFileSync(file.path, 'utf-8');
docs = [
new Document({
pageContent: text,
metadata: {
title: file.originalname,
},
}),
];
}
const splitted = await splitter.splitDocuments(docs);
const json = JSON.stringify({
title: file.originalname,
contents: splitted.map((doc) => doc.pageContent),
});
const pathToSave = file.path.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(pathToSave, json);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsJSON = JSON.stringify({
title: file.originalname,
embeddings: embeddings,
});
const pathToSaveEmbeddings = file.path.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(pathToSaveEmbeddings, embeddingsJSON);
}),
);
res.status(200).json({
files: files.map((file) => {
return {
fileName: file.originalname,
fileExtension: file.filename.split('.').pop(),
fileId: file.filename.replace(/\.\w+$/, ''),
};
}),
});
} catch (err: any) {
logger.error(`Error in uploading file results: ${err.message}`);
res.status(500).json({ message: 'An error has occurred.' });
}
},
);
export default router;

82
src/routes/videos.ts Normal file
View File

@@ -0,0 +1,82 @@
import express from 'express';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import handleVideoSearch from '../chains/videoSearchAgent';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: VideoSearchBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const videos = await handleVideoSearch(
{ chat_history: chatHistory, query: body.query },
llm,
);
res.status(200).json({ videos });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in video search: ${err.message}`);
}
});
export default router;

View File

@@ -13,17 +13,18 @@ import {
} from '@langchain/core/runnables';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../outputParsers/lineOutputParser';
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document';
import { searchSearxng } from '../searxng';
import path from 'node:path';
import fs from 'node:fs';
import { searchSearxng } from '../lib/searxng';
import path from 'path';
import fs from 'fs';
import computeSimilarity from '../utils/computeSimilarity';
import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events';
import { StreamEvent } from '@langchain/core/tracers/log_stream';
import { IterableReadableStream } from '@langchain/core/utils/stream';
export interface MetaSearchAgentType {
searchAndAnswer: (
@@ -33,7 +34,6 @@ export interface MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) => Promise<eventEmitter>;
}
@@ -55,8 +55,6 @@ type BasicChainInput = {
class MetaSearchAgent implements MetaSearchAgentType {
private config: Config;
private strParser = new StringOutputParser();
private searchQuery?: string;
private searxngUrl?: string;
constructor(config: Config) {
this.config = config;
@@ -82,7 +80,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
let question = this.config.summarizer
? await questionOutputParser.parse(input)
: input;
console.log('question', question);
if (question === 'not_needed') {
return { query: '', docs: [] };
@@ -93,7 +90,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
question = 'summarize';
}
let docs: Document[] = [];
let docs = [];
const linkDocs = await getDocumentsFromLinks({ links });
@@ -206,17 +203,12 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: docs };
} else {
question = question.replace(/<think>.*?<\/think>/g, '');
const searxngResult = await searchSearxng(question, {
const res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
// Store the SearXNG URL for later use in emitting to the client
this.searxngUrl = searxngResult.searchUrl;
const documents = searxngResult.results.map(
const documents = res.results.map(
(result) =>
new Document({
pageContent:
@@ -232,7 +224,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
}),
);
return { query: question, docs: documents, searchQuery: question };
return { query: question, docs: documents };
}
}),
]);
@@ -243,11 +235,9 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds: string[],
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
systemInstructions: string,
) {
return RunnableSequence.from([
RunnableMap.from({
systemInstructions: () => systemInstructions,
query: (input: BasicChainInput) => input.query,
chat_history: (input: BasicChainInput) => input.chat_history,
date: () => new Date().toISOString(),
@@ -270,11 +260,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
query = searchRetrieverResult.query;
docs = searchRetrieverResult.docs;
// Store the search query in the context for emitting to the client
if (searchRetrieverResult.searchQuery) {
this.searchQuery = searchRetrieverResult.searchQuery;
}
}
const sortedDocs = await this.rerankDocs(
@@ -326,7 +311,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
const fileSimilaritySearchObject = content.contents.map(
(c: string, i: number) => {
(c: string, i) => {
return {
fileName: content.title,
content: c,
@@ -429,8 +414,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
return sortedDocs;
}
return [];
}
private processDocs(docs: Document[]) {
@@ -443,32 +426,19 @@ class MetaSearchAgent implements MetaSearchAgentType {
}
private async handleStream(
stream: AsyncGenerator<StreamEvent, any, any>,
stream: IterableReadableStream<StreamEvent>,
emitter: eventEmitter,
llm: BaseChatModel,
) {
for await (const event of stream) {
if (
event.event === 'on_chain_end' &&
event.name === 'FinalSourceRetriever'
) {
const sourcesData = event.data.output;
if (this.searchQuery) {
emitter.emit(
'data',
JSON.stringify({
type: 'sources',
data: sourcesData,
searchQuery: this.searchQuery,
searchUrl: this.searxngUrl,
}),
);
} else {
emitter.emit(
'data',
JSON.stringify({ type: 'sources', data: sourcesData }),
);
}
``;
emitter.emit(
'data',
JSON.stringify({ type: 'sources', data: event.data.output }),
);
}
if (
event.event === 'on_chain_stream' &&
@@ -483,50 +453,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
event.event === 'on_chain_end' &&
event.name === 'FinalResponseGenerator'
) {
// Get model name safely with better detection
let modelName = 'Unknown';
try {
// @ts-ignore - Different LLM implementations have different properties
if (llm.modelName) {
// @ts-ignore
modelName = llm.modelName;
// @ts-ignore
} else if (llm._llm && llm._llm.modelName) {
// @ts-ignore
modelName = llm._llm.modelName;
// @ts-ignore
} else if (llm.model && llm.model.modelName) {
// @ts-ignore
modelName = llm.model.modelName;
} else if ('model' in llm) {
// @ts-ignore
const model = llm.model;
if (typeof model === 'string') {
modelName = model;
// @ts-ignore
} else if (model && model.modelName) {
// @ts-ignore
modelName = model.modelName;
}
} else if (llm.constructor && llm.constructor.name) {
// Last resort: use the class name
modelName = llm.constructor.name;
}
} catch (e) {
console.error('Failed to get model name:', e);
}
// Send model info before ending
emitter.emit(
'stats',
JSON.stringify({
type: 'modelStats',
data: {
modelName,
},
}),
);
emitter.emit('end');
}
}
@@ -539,7 +465,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) {
const emitter = new eventEmitter();
@@ -548,7 +473,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds,
embeddings,
optimizationMode,
systemInstructions,
);
const stream = answeringChain.streamEvents(
@@ -561,7 +485,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
},
);
this.handleStream(stream, emitter, llm);
this.handleStream(stream, emitter);
return emitter;
}

View File

@@ -6,7 +6,7 @@ const computeSimilarity = (x: number[], y: number[]): number => {
const similarityMeasure = getSimilarityMeasure();
if (similarityMeasure === 'cosine') {
return cosineSimilarity(x, y) as number;
return cosineSimilarity(x, y);
} else if (similarityMeasure === 'dot') {
return dot(x, y);
}

View File

@@ -3,6 +3,7 @@ import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse';
import logger from './logger';
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splitter = new RecursiveCharacterTextSplitter();
@@ -64,7 +65,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splittedText = await splitter.splitText(parsedText);
const title = res.data
.toString('utf8')
.match(/<title.*>(.*?)<\/title>/)?.[1];
.match(/<title>(.*?)<\/title>/)?.[1];
const linkDocs = splittedText.map((text) => {
return new Document({
@@ -78,13 +79,12 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
docs.push(...linkDocs);
} catch (err) {
console.error(
'An error occurred while getting documents from links: ',
err,
logger.error(
`Error at generating documents from links: ${err.message}`,
);
docs.push(
new Document({
pageContent: `Failed to retrieve content from the link: ${err}`,
pageContent: `Failed to retrieve content from the link: ${err.message}`,
metadata: {
title: 'Failed to retrieve content',
url: link,

22
src/utils/logger.ts Normal file
View File

@@ -0,0 +1,22 @@
import winston from 'winston';
const logger = winston.createLogger({
level: 'info',
transports: [
new winston.transports.Console({
format: winston.format.combine(
winston.format.colorize(),
winston.format.simple(),
),
}),
new winston.transports.File({
filename: 'app.log',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.json(),
),
}),
],
});
export default logger;

View File

@@ -0,0 +1,122 @@
import { WebSocket } from 'ws';
import { handleMessage } from './messageHandler';
import {
getAvailableEmbeddingModelProviders,
getAvailableChatModelProviders,
} from '../lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import type { IncomingMessage } from 'http';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
export const handleConnection = async (
ws: WebSocket,
request: IncomingMessage,
) => {
try {
const searchParams = new URL(request.url, `http://${request.headers.host}`)
.searchParams;
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
searchParams.get('chatModelProvider') ||
Object.keys(chatModelProviders)[0];
const chatModel =
searchParams.get('chatModel') ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
searchParams.get('embeddingModelProvider') ||
Object.keys(embeddingModelProviders)[0];
const embeddingModel =
searchParams.get('embeddingModel') ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel] &&
chatModelProvider != 'custom_openai'
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
} else if (chatModelProvider == 'custom_openai') {
const customOpenaiApiKey = getCustomOpenaiApiKey();
const customOpenaiApiUrl = getCustomOpenaiApiUrl();
const customOpenaiModelName = getCustomOpenaiModelName();
if (customOpenaiApiKey && customOpenaiApiUrl && customOpenaiModelName) {
llm = new ChatOpenAI({
modelName: customOpenaiModelName,
openAIApiKey: customOpenaiApiKey,
temperature: 0.7,
configuration: {
baseURL: customOpenaiApiUrl,
},
}) as unknown as BaseChatModel;
}
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid LLM or embeddings model selected, please refresh the page and try again.',
key: 'INVALID_MODEL_SELECTED',
}),
);
ws.close();
}
const interval = setInterval(() => {
if (ws.readyState === ws.OPEN) {
ws.send(
JSON.stringify({
type: 'signal',
data: 'open',
}),
);
clearInterval(interval);
}
}, 5);
ws.on(
'message',
async (message) =>
await handleMessage(message.toString(), ws, llm, embeddings),
);
ws.on('close', () => logger.debug('Connection closed'));
} catch (err) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Internal server error.',
key: 'INTERNAL_SERVER_ERROR',
}),
);
ws.close();
logger.error(err);
}
};

8
src/websocket/index.ts Normal file
View File

@@ -0,0 +1,8 @@
import { initServer } from './websocketServer';
import http from 'http';
export const startWebSocketServer = (
server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
) => {
initServer(server);
};

View File

@@ -0,0 +1,281 @@
import { EventEmitter, WebSocket } from 'ws';
import { BaseMessage, AIMessage, HumanMessage } from '@langchain/core/messages';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import logger from '../utils/logger';
import db from '../db';
import { chats, messages as messagesSchema } from '../db/schema';
import { eq, gt, and } from 'drizzle-orm';
import crypto from 'crypto';
import { isLibraryEnabled } from '../config';
import { getFileDetails } from '../utils/files';
import MetaSearchAgent, {
MetaSearchAgentType,
} from '../search/metaSearchAgent';
import prompts from '../prompts';
type Message = {
messageId: string;
chatId: string;
content: string;
};
type WSMessage = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
type: string;
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
};
export const searchHandlers = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};
const handleEmitterEvents = (
emitter: EventEmitter,
ws: WebSocket,
messageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources = [];
const libraryEnabled = isLibraryEnabled();
emitter.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
ws.send(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: messageId,
}),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
ws.send(
JSON.stringify({
type: 'sources',
data: parsedData.data,
messageId: messageId,
}),
);
sources = parsedData.data;
}
});
emitter.on('end', () => {
ws.send(JSON.stringify({ type: 'messageEnd', messageId: messageId }));
if (libraryEnabled) {
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: messageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
})
.execute();
}
});
emitter.on('error', (data) => {
const parsedData = JSON.parse(data);
ws.send(
JSON.stringify({
type: 'error',
data: parsedData.data,
key: 'CHAIN_ERROR',
}),
);
});
};
export const handleMessage = async (
message: string,
ws: WebSocket,
llm: BaseChatModel,
embeddings: Embeddings,
) => {
try {
const parsedWSMessage = JSON.parse(message) as WSMessage;
const parsedMessage = parsedWSMessage.message;
if (parsedWSMessage.files.length > 0) {
/* TODO: Implement uploads in other classes/single meta class system*/
parsedWSMessage.focusMode = 'webSearch';
}
const humanMessageId =
parsedMessage.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
if (!parsedMessage.content)
return ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid message format',
key: 'INVALID_FORMAT',
}),
);
const history: BaseMessage[] = parsedWSMessage.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
if (parsedWSMessage.type === 'message') {
const handler: MetaSearchAgentType =
searchHandlers[parsedWSMessage.focusMode];
const libraryEnabled = isLibraryEnabled();
if (handler) {
try {
const emitter = await handler.searchAndAnswer(
parsedMessage.content,
history,
llm,
embeddings,
parsedWSMessage.optimizationMode,
parsedWSMessage.files,
);
handleEmitterEvents(emitter, ws, aiMessageId, parsedMessage.chatId);
if (libraryEnabled) {
const chat = await db.query.chats.findFirst({
where: eq(chats.id, parsedMessage.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: parsedMessage.chatId,
title: parsedMessage.content,
createdAt: new Date().toString(),
focusMode: parsedWSMessage.focusMode,
files: parsedWSMessage.files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: parsedMessage.content,
chatId: parsedMessage.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gt(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, parsedMessage.chatId),
),
)
.execute();
}
}
} catch (err) {
console.log(err);
}
} else {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid focus mode',
key: 'INVALID_FOCUS_MODE',
}),
);
}
}
} catch (err) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid message format',
key: 'INVALID_FORMAT',
}),
);
logger.error(`Failed to handle message: ${err}`);
}
};

View File

@@ -0,0 +1,16 @@
import { WebSocketServer } from 'ws';
import { handleConnection } from './connectionManager';
import http from 'http';
import { getPort } from '../config';
import logger from '../utils/logger';
export const initServer = (
server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>,
) => {
const port = getPort();
const wss = new WebSocketServer({ server });
wss.on('connection', handleConnection);
logger.info(`WebSocket server started on port ${port}`);
};

View File

@@ -1,27 +1,18 @@
{
"compilerOptions": {
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"lib": ["ESNext"],
"module": "Node16",
"moduleResolution": "Node16",
"target": "ESNext",
"outDir": "dist",
"sourceMap": false,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"plugins": [
{
"name": "next"
}
],
"paths": {
"@/*": ["./src/*"]
},
"target": "ES2017"
"experimentalDecorators": true,
"emitDecoratorMetadata": true,
"allowSyntheticDefaultImports": true,
"skipLibCheck": true,
"skipDefaultLibCheck": true
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
"exclude": ["node_modules"]
"include": ["src"],
"exclude": ["node_modules", "**/*.spec.ts"]
}

2
ui/.env.example Normal file
View File

@@ -0,0 +1,2 @@
NEXT_PUBLIC_WS_URL=ws://localhost:3001
NEXT_PUBLIC_API_URL=http://localhost:3001/api

34
ui/.gitignore vendored Normal file
View File

@@ -0,0 +1,34 @@
# dependencies
/node_modules
/.pnp
.pnp.js
.yarn/install-state.gz
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# local env files
.env*.local
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts

11
ui/.prettierrc.js Normal file
View File

@@ -0,0 +1,11 @@
/** @type {import("prettier").Config} */
const config = {
printWidth: 80,
trailingComma: 'all',
endOfLine: 'auto',
singleQuote: true,
tabWidth: 2,
};
module.exports = config;

View File

@@ -0,0 +1,7 @@
import ChatWindow from '@/components/ChatWindow';
const Page = ({ params }: { params: { chatId: string } }) => {
return <ChatWindow id={params.chatId} />;
};
export default Page;

View File

@@ -19,7 +19,7 @@ const Page = () => {
useEffect(() => {
const fetchData = async () => {
try {
const res = await fetch(`/api/discover`, {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

Some files were not shown because too many files have changed in this diff Show More