Compare commits

..

32 Commits

Author SHA1 Message Date
ItzCrazyKns
88ae67065b feat(config): add measurement unit 2025-10-21 15:59:15 +05:30
ItzCrazyKns
f35d12f94c Update perplexica-screenshot.png 2025-10-21 15:26:29 +05:30
ItzCrazyKns
3d17975d83 feat(model-select): use values from localStorage 2025-10-21 15:25:38 +05:30
Kushagra Srivastava
950717e0cf Delete app.dockerfile 2025-10-21 15:13:17 +05:30
Kushagra Srivastava
4f39b5746a Merge pull request #906 from ItzCrazyKns/canary
Release v1.11.0
2025-10-21 15:07:55 +05:30
ItzCrazyKns
a01fce4e64 feat(package): bump version 2025-10-21 15:03:47 +05:30
ItzCrazyKns
92ff47110d feat(app): rename standalone to slim 2025-10-21 15:03:15 +05:30
ItzCrazyKns
82efd35b55 feat(setup-config): only allow finalization when chat model exists 2025-10-21 14:24:44 +05:30
ItzCrazyKns
3d950bac07 feat(app): update documentation 2025-10-21 13:44:07 +05:30
ItzCrazyKns
77672003ff feat(app): remove docker compose, build standalone images 2025-10-21 13:43:55 +05:30
ItzCrazyKns
e9bd2a8032 feat(settingsButtonMobile): add size 2025-10-21 12:30:34 +05:30
ItzCrazyKns
49fed3e228 feat(setup-config): add model selection state 2025-10-21 12:23:35 +05:30
ItzCrazyKns
7fb7fb9692 feat(app): fix sizes & placement for smaller screens 2025-10-21 12:23:18 +05:30
ItzCrazyKns
ff37225253 feat(models-section): allow selecting chat model 2025-10-21 12:22:37 +05:30
ItzCrazyKns
3b745868b2 feat(app): add mobile settings button 2025-10-21 12:22:22 +05:30
ItzCrazyKns
c945bf1fc3 feat(settings): add textarea type, add systemInstructions 2025-10-21 12:22:06 +05:30
ItzCrazyKns
672fc3c3a8 feat(app): fix build errors 2025-10-20 16:39:38 +05:30
ItzCrazyKns
67c2672f39 feat(searxng): use fetch instead of axios 2025-10-20 16:36:15 +05:30
ItzCrazyKns
334326744c feat(app): use new packages, fix types 2025-10-20 16:36:04 +05:30
ItzCrazyKns
042ce33cf4 feat(providers): add rest of the providers 2025-10-20 16:35:44 +05:30
ItzCrazyKns
22b9a48b26 feat(config): use provider name without number on load from env 2025-10-20 16:35:12 +05:30
ItzCrazyKns
e024d46971 feat(chat): fix typo 2025-10-20 16:34:49 +05:30
ItzCrazyKns
af36f15f3b feat(package): update packages 2025-10-20 16:33:56 +05:30
ItzCrazyKns
3d2d056f64 Update Chat.tsx 2025-10-19 22:47:45 +05:30
ItzCrazyKns
d9ebf611ff feat(hf-transformer): dynamically load library 2025-10-19 21:06:52 +05:30
ItzCrazyKns
eef6ebb924 Update Section.tsx 2025-10-19 18:33:40 +05:30
ItzCrazyKns
65975ba6fc feat(providers): add transformers provider 2025-10-19 18:32:18 +05:30
ItzCrazyKns
51629b2cca feat(chat): auto scroll, stop scrolling when scrolled back 2025-10-19 18:30:21 +05:30
ItzCrazyKns
7d71643f42 feat(app): rename model selector, fix UI 2025-10-19 18:29:32 +05:30
ItzCrazyKns
4564175822 feat(settings): add embedding model selector 2025-10-19 18:29:22 +05:30
Kushagra Srivastava
9d52d01f31 Merge pull request #901 from ItzCrazyKns/feat/config-management-model-registry
Feat/config management model registry
2025-10-19 13:58:20 +05:30
ItzCrazyKns
5abd42d46d feat(package): remove ts-node 2025-10-11 18:02:31 +05:30
41 changed files with 2371 additions and 761 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 MiB

After

Width:  |  Height:  |  Size: 2.1 MiB

View File

@@ -11,6 +11,13 @@ on:
jobs:
build-amd64:
runs-on: ubuntu-latest
strategy:
matrix:
variant:
- name: full
dockerfile: Dockerfile
- name: slim
dockerfile: Dockerfile.slim
steps:
- name: Checkout code
uses: actions/checkout@v3
@@ -31,47 +38,54 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push AMD64 Docker image
- name: Build and push AMD64 Docker image (master)
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:amd64 \
-t itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--push .
- name: Build and push AMD64 Canary Docker image
if: github.ref == 'refs/heads/canary' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:canary-amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-canary-amd64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:canary-amd64 \
-t itzcrazykns1337/perplexica:${VARIANT}-canary-amd64 \
--push .
- name: Build and push AMD64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-amd64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
-t itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-amd64 \
--push .
build-arm64:
runs-on: ubuntu-24.04-arm
strategy:
matrix:
variant:
- name: full
dockerfile: Dockerfile
- name: slim
dockerfile: Dockerfile.slim
steps:
- name: Checkout code
uses: actions/checkout@v3
@@ -92,48 +106,51 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push ARM64 Docker image
- name: Build and push ARM64 Docker image (master)
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:arm64 \
-t itzcrazykns1337/perplexica:${VARIANT}-arm64 \
--push .
- name: Build and push ARM64 Canary Docker image
if: github.ref == 'refs/heads/canary' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:canary-arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-canary-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:canary-arm64 \
-t itzcrazykns1337/perplexica:${VARIANT}-canary-arm64 \
--push .
- name: Build and push ARM64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
-t itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-arm64 \
--push .
manifest:
needs: [build-amd64, build-arm64]
runs-on: ubuntu-latest
strategy:
matrix:
variant: [full, slim]
steps:
- name: Log in to DockerHub
uses: docker/login-action@v2
@@ -146,29 +163,55 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Create and push multi-arch manifest for main
- name: Create and push manifest for main
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:main \
--amend itzcrazykns1337/${IMAGE_NAME}:amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:main
VARIANT=${{ matrix.variant }}
docker manifest create itzcrazykns1337/perplexica:${VARIANT}-latest \
--amend itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-arm64
docker manifest push itzcrazykns1337/perplexica:${VARIANT}-latest
- name: Create and push multi-arch manifest for canary
if [ "$VARIANT" = "full" ]; then
docker manifest create itzcrazykns1337/perplexica:latest \
--amend itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-arm64
docker manifest push itzcrazykns1337/perplexica:latest
docker manifest create itzcrazykns1337/perplexica:main \
--amend itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-arm64
docker manifest push itzcrazykns1337/perplexica:main
fi
- name: Create and push manifest for canary
if: github.ref == 'refs/heads/canary' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:canary \
--amend itzcrazykns1337/${IMAGE_NAME}:canary-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:canary-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:canary
VARIANT=${{ matrix.variant }}
docker manifest create itzcrazykns1337/perplexica:${VARIANT}-canary \
--amend itzcrazykns1337/perplexica:${VARIANT}-canary-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-canary-arm64
docker manifest push itzcrazykns1337/perplexica:${VARIANT}-canary
- name: Create and push multi-arch manifest for releases
if [ "$VARIANT" = "full" ]; then
docker manifest create itzcrazykns1337/perplexica:canary \
--amend itzcrazykns1337/perplexica:${VARIANT}-canary-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-canary-arm64
docker manifest push itzcrazykns1337/perplexica:canary
fi
- name: Create and push manifest for releases
if: github.event_name == 'release'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}
VARIANT=${{ matrix.variant }}
docker manifest create itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}
if [ "$VARIANT" = "full" ]; then
docker manifest create itzcrazykns1337/perplexica:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/perplexica:${{ env.RELEASE_VERSION }}
fi

74
Dockerfile Normal file
View File

@@ -0,0 +1,74 @@
FROM node:24.5.0-slim AS builder
RUN apt-get update && apt-get install -y python3 python3-pip sqlite3 && rm -rf /var/lib/apt/lists/*
WORKDIR /home/perplexica
COPY package.json yarn.lock ./
RUN yarn install --frozen-lockfile --network-timeout 600000
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
COPY drizzle ./drizzle
RUN mkdir -p /home/perplexica/data
RUN yarn build
FROM node:24.5.0-slim
RUN apt-get update && \
apt-get install -y \
python3 \
python3-pip \
python3-venv \
python3-dev \
sqlite3 \
git \
build-essential \
libxslt-dev \
zlib1g-dev \
libffi-dev \
libssl-dev \
uwsgi \
uwsgi-plugin-python3 \
curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /home/perplexica
COPY --from=builder /home/perplexica/public ./public
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data
COPY drizzle ./drizzle
RUN mkdir /home/perplexica/uploads
RUN useradd --system --home-dir /usr/local/searxng --shell /bin/sh searxng
WORKDIR /usr/local/searxng
RUN git clone https://github.com/searxng/searxng.git . && \
python3 -m venv venv && \
. venv/bin/activate && \
pip install --upgrade pip setuptools wheel pyyaml && \
pip install -r requirements.txt && \
pip install uwsgi
RUN mkdir -p /etc/searxng
COPY searxng/settings.yml /etc/searxng/settings.yml
COPY searxng/limiter.toml /etc/searxng/limiter.toml
COPY searxng/uwsgi.ini /etc/searxng/uwsgi.ini
RUN chown -R searxng:searxng /usr/local/searxng /etc/searxng
WORKDIR /home/perplexica
COPY entrypoint.sh ./entrypoint.sh
RUN chmod +x ./entrypoint.sh
RUN sed -i 's/\r$//' ./entrypoint.sh || true
EXPOSE 3000 8080
ENV SEARXNG_API_URL=http://localhost:8080
CMD ["/home/perplexica/entrypoint.sh"]

View File

@@ -30,8 +30,6 @@ COPY drizzle ./drizzle
RUN mkdir /home/perplexica/uploads
COPY entrypoint.sh ./entrypoint.sh
RUN chmod +x ./entrypoint.sh
RUN sed -i 's/\r$//' ./entrypoint.sh || true
EXPOSE 3000
CMD ["/home/perplexica/entrypoint.sh"]
CMD ["node", "server.js"]

View File

@@ -76,6 +76,35 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
### Getting Started with Docker (Recommended)
Perplexica can be easily run using Docker. Simply run the following command:
```bash
docker run -p 3000:3000 --name perplexica itzcrazykns1337/perplexica:latest
```
This will pull and start the Perplexica container with the bundled SearxNG search engine. Once running, open your browser and navigate to http://localhost:3000. You can then configure your settings (API keys, models, etc.) directly in the setup screen.
**Note**: The image includes both Perplexica and SearxNG, so no additional setup is required.
#### Using Perplexica with Your Own SearxNG Instance
If you already have SearxNG running, you can use the slim version of Perplexica:
```bash
docker run -p 3000:3000 -e SEARXNG_API_URL=http://your-searxng-url:8080 --name perplexica itzcrazykns1337/perplexica:slim-latest
```
**Important**: Make sure your SearxNG instance has:
- JSON format enabled in the settings
- Wolfram Alpha search engine enabled
Replace `http://your-searxng-url:8080` with your actual SearxNG URL. Then configure your AI provider settings in the setup screen at http://localhost:3000.
#### Advanced Setup (Building from Source)
If you prefer to build from source or need more control:
1. Ensure Docker is installed and running on your system.
2. Clone the Perplexica repository:
@@ -85,39 +114,46 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
3. After cloning, navigate to the directory containing the project files.
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `LEMONADE`: Your Lemonade API URL. Since Lemonade runs directly on your local machine (not in Docker), you should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Lemonade on port 8000, use `http://host.docker.internal:8000`. For other ports, adjust accordingly. **You need to fill this if you wish to use Lemonade's models**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.`
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
**Note**: You can change these after starting Perplexica from the settings dialog.
- `SIMILARITY_MEASURE`: The similarity measure to use (This is filled by default; you can leave it as is if you are unsure about it.)
5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute:
4. Build and run using Docker:
```bash
docker compose up -d
docker build -t perplexica .
docker run -p 3000:3000 --name perplexica perplexica
```
6. Wait a few minutes for the setup to complete. You can access Perplexica at http://localhost:3000 in your web browser.
5. Access Perplexica at http://localhost:3000 and configure your settings in the setup screen.
**Note**: After the containers are built, you can start Perplexica directly from Docker without having to open a terminal.
### Non-Docker Installation
1. Install SearXNG and allow `JSON` format in the SearXNG settings.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm run start`
1. Install SearXNG and allow `JSON` format in the SearXNG settings. Make sure Wolfram Alpha search engine is also enabled.
2. Clone the repository:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
cd Perplexica
```
3. Install dependencies:
```bash
npm i
```
4. Build the application:
```bash
npm run build
```
5. Start the application:
```bash
npm run start
```
6. Open your browser and navigate to http://localhost:3000 to complete the setup and configure your settings (API keys, models, SearxNG URL, etc.) in the setup screen.
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.

View File

@@ -1,35 +0,0 @@
services:
searxng:
image: docker.io/searxng/searxng:latest
volumes:
- ./searxng:/etc/searxng:rw
ports:
- 4000:8080
networks:
- perplexica-network
restart: unless-stopped
app:
image: itzcrazykns1337/perplexica:main
build:
context: .
dockerfile: app.dockerfile
environment:
- DOCKER=true
- SEARXNG_API_URL=http://searxng:8080
- DATA_DIR=/home/perplexica
ports:
- 3000:3000
networks:
- perplexica-network
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
restart: unless-stopped
networks:
perplexica-network:
volumes:
backend-dbstore:
uploads:

View File

@@ -4,11 +4,55 @@
Perplexicas Search API makes it easy to use our AI-powered search engine. You can run different types of searches, pick the models you want to use, and get the most recent info. Follow the following headings to learn more about Perplexica's search API.
## Endpoint
## Endpoints
### **POST** `http://localhost:3000/api/search`
### Get Available Providers and Models
**Note**: Replace `3000` with any other port if you've changed the default PORT
Before making search requests, you'll need to get the available providers and their models.
#### **GET** `/api/providers`
**Full URL**: `http://localhost:3000/api/providers`
Returns a list of all active providers with their available chat and embedding models.
**Response Example:**
```json
{
"providers": [
{
"id": "550e8400-e29b-41d4-a716-446655440000",
"name": "OpenAI",
"chatModels": [
{
"name": "GPT 4 Omni Mini",
"key": "gpt-4o-mini"
},
{
"name": "GPT 4 Omni",
"key": "gpt-4o"
}
],
"embeddingModels": [
{
"name": "Text Embedding 3 Large",
"key": "text-embedding-3-large"
}
]
}
]
}
```
Use the `id` field as the `providerId` and the `key` field from the models arrays when making search requests.
### Search Query
#### **POST** `/api/search`
**Full URL**: `http://localhost:3000/api/search`
**Note**: Replace `localhost:3000` with your Perplexica instance URL if running on a different host or port
### Request
@@ -19,12 +63,12 @@ The API accepts a JSON object in the request body, where you define the focus mo
```json
{
"chatModel": {
"provider": "openai",
"name": "gpt-4o-mini"
"providerId": "550e8400-e29b-41d4-a716-446655440000",
"key": "gpt-4o-mini"
},
"embeddingModel": {
"provider": "openai",
"name": "text-embedding-3-large"
"providerId": "550e8400-e29b-41d4-a716-446655440000",
"key": "text-embedding-3-large"
},
"optimizationMode": "speed",
"focusMode": "webSearch",
@@ -38,20 +82,19 @@ The API accepts a JSON object in the request body, where you define the focus mo
}
```
**Note**: The `providerId` must be a valid UUID obtained from the `/api/providers` endpoint. The example above uses a sample UUID for demonstration.
### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. To get available providers and models, send a GET request to `http://localhost:3000/api/providers`.
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `name`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- Optional fields for custom OpenAI configuration:
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance.
- `providerId` (string): The UUID of the provider. You can get this from the `/api/providers` endpoint response.
- `key` (string): The model key/identifier (e.g., `gpt-4o-mini`, `llama3.1:latest`). Use the `key` value from the provider's `chatModels` array, not the display name.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. To get available providers and models, send a GET request to `http://localhost:3000/api/providers`.
- `provider`: The provider for the embedding model (e.g., `openai`).
- `name`: The specific embedding model (e.g., `text-embedding-3-large`).
- `providerId` (string): The UUID of the embedding provider. You can get this from the `/api/providers` endpoint response.
- `key` (string): The embedding model key (e.g., `text-embedding-3-large`, `nomic-embed-text`). Use the `key` value from the provider's `embeddingModels` array, not the display name.
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:
@@ -108,7 +151,7 @@ The response from the API includes both the final message and the sources used t
#### Streaming Response (stream: true)
When streaming is enabled, the API returns a stream of newline-delimited JSON objects. Each line contains a complete, valid JSON object. The response has Content-Type: application/json.
When streaming is enabled, the API returns a stream of newline-delimited JSON objects using Server-Sent Events (SSE). Each line contains a complete, valid JSON object. The response has `Content-Type: text/event-stream`.
Example of streamed response objects:

View File

@@ -2,45 +2,80 @@
To update Perplexica to the latest version, follow these steps:
## For Docker users
## For Docker users (Using pre-built images)
1. Clone the latest version of Perplexica from GitHub:
Simply pull the latest image and restart your container:
```bash
docker pull itzcrazykns1337/perplexica:latest
docker stop perplexica
docker rm perplexica
docker run -p 3000:3000 --name perplexica itzcrazykns1337/perplexica:latest
```
For slim version:
```bash
docker pull itzcrazykns1337/perplexica:slim-latest
docker stop perplexica
docker rm perplexica
docker run -p 3000:3000 -e SEARXNG_API_URL=http://your-searxng-url:8080 --name perplexica itzcrazykns1337/perplexica:slim-latest
```
Once updated, go to http://localhost:3000 and verify the latest changes. Your settings are preserved automatically.
## For Docker users (Building from source)
1. Navigate to your Perplexica directory and pull the latest changes:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
cd Perplexica
git pull origin master
```
2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. Pull the latest images from the registry.
2. Rebuild the Docker image:
```bash
docker compose pull
docker build -t perplexica .
```
5. Update and recreate the containers.
3. Stop and remove the old container, then start the new one:
```bash
docker compose up -d
docker stop perplexica
docker rm perplexica
docker run -p 3000:3000 -p 8080:8080 --name perplexica perplexica
```
6. Once the command completes, go to http://localhost:3000 and verify the latest changes.
4. Once the command completes, go to http://localhost:3000 and verify the latest changes.
## For non-Docker users
1. Clone the latest version of Perplexica from GitHub:
1. Navigate to your Perplexica directory and pull the latest changes:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
cd Perplexica
git pull origin master
```
2. Navigate to the project directory.
2. Install any new dependencies:
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm run start`
```bash
npm i
```
3. Rebuild the application:
```bash
npm run build
```
4. Restart the application:
```bash
npm run start
```
5. Go to http://localhost:3000 and verify the latest changes. Your settings are preserved automatically.
---

View File

@@ -1,4 +1,24 @@
#!/bin/sh
set -e
cd /usr/local/searxng
export SEARXNG_SETTINGS_PATH=/etc/searxng/settings.yml
# Start SearXNG in background with all output redirected to /dev/null
/usr/local/searxng/venv/bin/uwsgi \
--http-socket 0.0.0.0:8080 \
--ini /etc/searxng/uwsgi.ini \
--virtualenv /usr/local/searxng/venv \
--disable-logging > /dev/null 2>&1 &
echo "Starting SearXNG..."
sleep 5
until curl -s http://localhost:8080 > /dev/null 2>&1; do
sleep 1
done
echo "SearXNG started successfully"
cd /home/perplexica
echo "Starting Perplexica..."
exec node server.js

View File

@@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.11.0-rc3",
"version": "1.11.0",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@@ -13,18 +13,18 @@
"dependencies": {
"@headlessui/react": "^2.2.0",
"@headlessui/tailwindcss": "^0.2.2",
"@huggingface/transformers": "^3.7.5",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.24",
"@langchain/community": "^0.3.49",
"@langchain/core": "^0.3.66",
"@langchain/google-genai": "^0.2.15",
"@langchain/groq": "^0.2.3",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.6.2",
"@langchain/textsplitters": "^0.1.0",
"@langchain/anthropic": "^1.0.0",
"@langchain/community": "^1.0.0",
"@langchain/core": "^1.0.1",
"@langchain/google-genai": "^1.0.0",
"@langchain/groq": "^1.0.0",
"@langchain/ollama": "^1.0.0",
"@langchain/openai": "^1.0.0",
"@langchain/textsplitters": "^1.0.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
"axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
@@ -33,7 +33,7 @@
"framer-motion": "^12.23.24",
"html-to-text": "^9.0.5",
"jspdf": "^3.0.1",
"langchain": "^0.3.30",
"langchain": "^1.0.1",
"lucide-react": "^0.363.0",
"mammoth": "^1.9.1",
"markdown-to-jsx": "^7.7.2",
@@ -54,7 +54,7 @@
"@types/better-sqlite3": "^7.6.12",
"@types/html-to-text": "^9.0.4",
"@types/jspdf": "^2.0.0",
"@types/node": "^20",
"@types/node": "^24.8.1",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
@@ -65,7 +65,6 @@
"postcss": "^8",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"ts-node": "^10.9.2",
"typescript": "^5"
"typescript": "^5.9.3"
}
}

View File

@@ -97,7 +97,7 @@ const handleEmitterEvents = async (
encoder: TextEncoder,
chatId: string,
) => {
let recievedMessage = '';
let receivedMessage = '';
const aiMessageId = crypto.randomBytes(7).toString('hex');
stream.on('data', (data) => {
@@ -113,7 +113,7 @@ const handleEmitterEvents = async (
),
);
recievedMessage += parsedData.data;
receivedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
writer.write(
encoder.encode(
@@ -150,7 +150,7 @@ const handleEmitterEvents = async (
db.insert(messagesSchema)
.values({
content: recievedMessage,
content: receivedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',

View File

@@ -5,7 +5,7 @@ import crypto from 'crypto';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
import { Document } from '@langchain/core/documents';
import ModelRegistry from '@/lib/models/registry';
interface FileRes {

View File

@@ -16,7 +16,7 @@ const Chat = () => {
useEffect(() => {
const updateDividerWidth = () => {
if (dividerRef.current) {
setDividerWidth(dividerRef.current.scrollWidth);
setDividerWidth(dividerRef.current.offsetWidth);
}
};
@@ -31,13 +31,22 @@ const Chat = () => {
useEffect(() => {
const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
messageEnd.current?.scrollIntoView({ behavior: 'auto' });
};
if (chatTurns.length === 1) {
document.title = `${chatTurns[0].content.substring(0, 30)} - Perplexica`;
}
const messageEndBottom =
messageEnd.current?.getBoundingClientRect().bottom ?? 0;
const distanceFromMessageEnd = window.innerHeight - messageEndBottom;
if (distanceFromMessageEnd >= -100) {
scroll();
}
if (chatTurns[chatTurns.length - 1]?.role === 'user') {
scroll();
}

View File

@@ -4,14 +4,13 @@ import { File } from './ChatWindow';
import Link from 'next/link';
import WeatherWidget from './WeatherWidget';
import NewsArticleWidget from './NewsArticleWidget';
import SettingsButtonMobile from '@/components/Settings/SettingsButtonMobile';
const EmptyChat = () => {
return (
<div className="relative">
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
<Link href="/settings">
<Settings className="cursor-pointer lg:hidden" />
</Link>
<SettingsButtonMobile />
</div>
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-4">
<div className="flex flex-col items-center justify-center w-full space-y-8">

View File

@@ -5,8 +5,7 @@ import Focus from './MessageInputActions/Focus';
import Optimization from './MessageInputActions/Optimization';
import Attach from './MessageInputActions/Attach';
import { useChat } from '@/lib/hooks/useChat';
import AttachSmall from './MessageInputActions/AttachSmall';
import ModelSelector from './MessageInputActions/ModelSelector';
import ModelSelector from './MessageInputActions/ChatModelSelector';
const EmptyChatMessageInput = () => {
const { sendMessage } = useChat();

View File

@@ -97,7 +97,7 @@ const ModelSelector = () => {
leaveTo="opacity-0 translate-y-1"
>
<PopoverPanel className="absolute z-10 w-[230px] sm:w-[270px] md:w-[300px] -right-4">
<div className="bg-light-primary dark:bg-dark-primary border rounded-lg border-light-200 dark:border-dark-200 w-full flex flex-col shadow-lg overflow-hidden">
<div className="bg-light-primary dark:bg-dark-primary max-h-[300px] sm:max-w-none border rounded-lg border-light-200 dark:border-dark-200 w-full flex flex-col shadow-lg overflow-hidden">
<div className="p-4 border-b border-light-200 dark:border-dark-200">
<div className="relative">
<Search
@@ -109,7 +109,7 @@ const ModelSelector = () => {
placeholder="Search models..."
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="w-full pl-9 pr-3 py-2 bg-light-secondary dark:bg-dark-secondary rounded-lg text-xs text-black dark:text-white placeholder:text-black/40 dark:placeholder:text-white/40 focus:outline-none focus:ring-2 focus:ring-sky-500/20 border border-transparent focus:border-sky-500/30 transition duration-200"
className="w-full pl-9 pr-3 py-2 bg-light-secondary dark:bg-dark-secondary rounded-lg placeholder:text-sm text-sm text-black dark:text-white placeholder:text-black/40 dark:placeholder:text-white/40 focus:outline-none focus:ring-2 focus:ring-sky-500/20 border border-transparent focus:border-sky-500/30 transition duration-200"
/>
</div>
</div>

View File

@@ -78,7 +78,7 @@ const ModelProvider = ({
)}
onClick={() => setOpen(!open)}
>
<p className="text-black dark:text-white font-medium">
<p className="text-sm lg:text-base text-black dark:text-white font-medium">
{modelProvider.name}
</p>
<div className="flex items-center gap-4">
@@ -115,7 +115,7 @@ const ModelProvider = ({
{modelProvider.chatModels.length > 0 && (
<div className="flex flex-col gap-y-2">
<div className="flex flex-row w-full justify-between items-center">
<p className="text-xs text-black/70 dark:text-white/70">
<p className="text-[11px] lg:text-xs text-black/70 dark:text-white/70">
Chat models
</p>
<AddModel
@@ -126,7 +126,7 @@ const ModelProvider = ({
</div>
<div className="flex flex-col gap-2">
{modelProvider.chatModels.some((m) => m.key === 'error') ? (
<div className="flex flex-row items-center gap-2 text-sm text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<div className="flex flex-row items-center gap-2 text-xs lg:text-sm text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<AlertCircle size={16} className="shrink-0" />
<span className="break-words">
{
@@ -141,7 +141,7 @@ const ModelProvider = ({
{modelProvider.chatModels.map((model, index) => (
<div
key={`${modelProvider.id}-chat-${model.key}-${index}`}
className="flex flex-row items-center space-x-1 text-sm text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5"
className="flex flex-row items-center space-x-1 text-xs lg:text-sm text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5"
>
<span>{model.name}</span>
<button
@@ -161,7 +161,7 @@ const ModelProvider = ({
{modelProvider.embeddingModels.length > 0 && (
<div className="flex flex-col gap-y-2">
<div className="flex flex-row w-full justify-between items-center">
<p className="text-xs text-black/70 dark:text-white/70">
<p className="text-[11px] lg:text-xs text-black/70 dark:text-white/70">
Embedding models
</p>
<AddModel
@@ -174,7 +174,7 @@ const ModelProvider = ({
{modelProvider.embeddingModels.some(
(m) => m.key === 'error',
) ? (
<div className="flex flex-row items-center gap-2 text-sm text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<div className="flex flex-row items-center gap-2 text-xs lg:text-sm text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<AlertCircle size={16} className="shrink-0" />
<span className="break-words">
{
@@ -189,7 +189,7 @@ const ModelProvider = ({
{modelProvider.embeddingModels.map((model, index) => (
<div
key={`${modelProvider.id}-embedding-${model.key}-${index}`}
className="flex flex-row items-center space-x-1 text-sm text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5"
className="flex flex-row items-center space-x-1 text-xs lg:text-sm text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5"
>
<span>{model.name}</span>
<button

View File

@@ -0,0 +1,89 @@
import Select from '@/components/ui/Select';
import { ConfigModelProvider } from '@/lib/config/types';
import { useState } from 'react';
import { toast } from 'sonner';
const ModelSelect = ({
providers,
type,
}: {
providers: ConfigModelProvider[];
type: 'chat' | 'embedding';
}) => {
const [selectedModel, setSelectedModel] = useState<string>(
type === 'chat'
? `${localStorage.getItem('chatModelProviderId')}/${localStorage.getItem('chatModelKey')}`
: `${localStorage.getItem('embeddingModelProviderId')}/${localStorage.getItem('embeddingModelKey')}`,
);
const [loading, setLoading] = useState(false);
const handleSave = async (newValue: string) => {
setLoading(true);
setSelectedModel(newValue);
try {
if (type === 'chat') {
localStorage.setItem('chatModelProviderId', newValue.split('/')[0]);
localStorage.setItem(
'chatModelKey',
newValue.split('/').slice(1).join('/'),
);
} else {
localStorage.setItem(
'embeddingModelProviderId',
newValue.split('/')[0],
);
localStorage.setItem(
'embeddingModelKey',
newValue.split('/').slice(1).join('/'),
);
}
} catch (error) {
console.error('Error saving config:', error);
toast.error('Failed to save configuration.');
} finally {
setLoading(false);
}
};
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-sm lg:text-base text-black dark:text-white">
Select {type === 'chat' ? 'Chat Model' : 'Embedding Model'}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{type === 'chat'
? 'Select the model to use for chat responses'
: 'Select the model to use for embeddings'}
</p>
</div>
<Select
value={selectedModel}
onChange={(event) => handleSave(event.target.value)}
options={
type === 'chat'
? providers.flatMap((provider) =>
provider.chatModels.map((model) => ({
value: `${provider.id}/${model.key}`,
label: `${provider.name} - ${model.name}`,
})),
)
: providers.flatMap((provider) =>
provider.embeddingModels.map((model) => ({
value: `${provider.id}/${model.key}`,
label: `${provider.name} - ${model.name}`,
})),
)
}
className="!text-xs lg:!text-sm"
loading={loading}
disabled={loading}
/>
</div>
</section>
);
};
export default ModelSelect;

View File

@@ -6,6 +6,7 @@ import {
UIConfigField,
} from '@/lib/config/types';
import ModelProvider from './ModelProvider';
import ModelSelect from './ModelSelect';
const Models = ({
fields,
@@ -17,14 +18,32 @@ const Models = ({
const [providers, setProviders] = useState<ConfigModelProvider[]>(values);
return (
<div className="flex-1 space-y-6 overflow-y-auto px-6 py-6">
<div className="flex flex-row justify-between items-center">
<p className="text-sm text-black/70 dark:text-white/70">
<div className="flex-1 space-y-6 overflow-y-auto py-6">
<div className="flex flex-col px-6 gap-y-4">
<h3 className="text-xs lg:text-sm text-black/70 dark:text-white/70">
Select models
</h3>
<ModelSelect
providers={values.filter((p) =>
p.chatModels.some((m) => m.key != 'error'),
)}
type="chat"
/>
<ModelSelect
providers={values.filter((p) =>
p.embeddingModels.some((m) => m.key != 'error'),
)}
type="embedding"
/>
</div>
<div className="border-t border-light-200 dark:border-dark-200" />
<div className="flex flex-row justify-between items-center px-6 ">
<p className="text-xs lg:text-sm text-black/70 dark:text-white/70">
Manage model provider
</p>
<AddProvider modelProviders={fields} setProviders={setProviders} />
</div>
<div className="flex flex-col gap-y-4">
<div className="flex flex-col px-6 gap-y-4">
{providers.map((provider) => (
<ModelProvider
key={`provider-${provider.id}`}

View File

@@ -0,0 +1,21 @@
import { Settings } from 'lucide-react';
import { useState } from 'react';
import SettingsDialogue from './SettingsDialogue';
import { AnimatePresence } from 'framer-motion';
const SettingsButtonMobile = () => {
const [isOpen, setIsOpen] = useState<boolean>(false);
return (
<>
<button className="lg:hidden" onClick={() => setIsOpen(true)}>
<Settings size={18}/>
</button>
<AnimatePresence>
{isOpen && <SettingsDialogue isOpen={isOpen} setIsOpen={setIsOpen} />}
</AnimatePresence>
</>
);
};
export default SettingsButtonMobile;

View File

@@ -1,5 +1,11 @@
import { Dialog, DialogPanel } from '@headlessui/react';
import { BrainCog, ChevronLeft, Search, Settings } from 'lucide-react';
import {
ArrowLeft,
BrainCog,
ChevronLeft,
Search,
Settings,
} from 'lucide-react';
import General from './Sections/General';
import { motion } from 'framer-motion';
import { useEffect, useState } from 'react';
@@ -8,9 +14,11 @@ import Loader from '../ui/Loader';
import { cn } from '@/lib/utils';
import Models from './Sections/Models/Section';
import SearchSection from './Sections/Search';
import Select from '@/components/ui/Select';
const sections = [
{
key: 'general',
name: 'General',
description: 'Adjust common settings.',
icon: Settings,
@@ -18,6 +26,7 @@ const sections = [
dataAdd: 'general',
},
{
key: 'models',
name: 'Models',
description: 'Configure model settings.',
icon: BrainCog,
@@ -25,6 +34,7 @@ const sections = [
dataAdd: 'modelProviders',
},
{
key: 'search',
name: 'Search',
description: 'Manage search settings.',
icon: Search,
@@ -42,7 +52,12 @@ const SettingsDialogue = ({
}) => {
const [isLoading, setIsLoading] = useState(true);
const [config, setConfig] = useState<any>(null);
const [activeSection, setActiveSection] = useState(sections[0]);
const [activeSection, setActiveSection] = useState<string>(sections[0].key);
const [selectedSection, setSelectedSection] = useState(sections[0]);
useEffect(() => {
setSelectedSection(sections.find((s) => s.key === activeSection)!);
}, [activeSection]);
useEffect(() => {
if (isOpen) {
@@ -83,14 +98,14 @@ const SettingsDialogue = ({
transition={{ duration: 0.1 }}
className="fixed inset-0 flex w-screen items-center justify-center p-4 bg-black/30 backdrop-blur-sm h-screen"
>
<DialogPanel className="space-y-4 border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary backdrop-blur-lg rounded-xl h-[calc(100vh-2%)] w-[calc(100vw-2%)] md:h-[calc(100vh-7%)] md:w-[calc(100vw-7%)] lg:h-[calc(100vh-20%)] lg:w-[calc(100vw-30%)]">
<DialogPanel className="space-y-4 border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary backdrop-blur-lg rounded-xl h-[calc(100vh-2%)] w-[calc(100vw-2%)] md:h-[calc(100vh-7%)] md:w-[calc(100vw-7%)] lg:h-[calc(100vh-20%)] lg:w-[calc(100vw-30%)] overflow-hidden flex flex-col">
{isLoading ? (
<div className="flex items-center justify-center h-full w-full">
<Loader />
</div>
) : (
<div className="flex flex-1 inset-0 h-full">
<div className="w-[240px] border-r border-white-200 dark:border-dark-200 h-full px-3 pt-3 flex flex-col">
<div className="flex flex-1 inset-0 h-full overflow-hidden">
<div className="hidden lg:flex flex-col w-[240px] border-r border-white-200 dark:border-dark-200 h-full px-3 pt-3 overflow-y-auto">
<button
onClick={() => setIsOpen(false)}
className="group flex flex-row items-center hover:bg-light-200 hover:dark:bg-dark-200 p-2 rounded-lg"
@@ -109,11 +124,11 @@ const SettingsDialogue = ({
key={section.dataAdd}
className={cn(
`flex flex-row items-center space-x-2 px-2 py-1.5 rounded-lg w-full text-sm hover:bg-light-200 hover:dark:bg-dark-200 transition duration-200 active:scale-95`,
activeSection.name === section.name
activeSection === section.key
? 'bg-light-200 dark:bg-dark-200 text-black/90 dark:text-white/90'
: ' text-black/70 dark:text-white/70',
)}
onClick={() => setActiveSection(section)}
onClick={() => setActiveSection(section.key)}
>
<section.icon size={17} />
<p>{section.name}</p>
@@ -121,23 +136,50 @@ const SettingsDialogue = ({
))}
</div>
</div>
<div className="w-full">
{activeSection.component && (
<div className="flex h-full flex-col">
<div className="border-b border-light-200/60 px-6 pb-6 pt-8 dark:border-dark-200/60">
<div className="w-full flex flex-col overflow-hidden">
<div className="flex flex-row lg:hidden w-full justify-between px-[20px] my-4 flex-shrink-0">
<button
onClick={() => setIsOpen(false)}
className="group flex flex-row items-center hover:bg-light-200 hover:dark:bg-dark-200 rounded-lg mr-[40%]"
>
<ArrowLeft
size={18}
className="text-black/50 dark:text-white/50 group-hover:text-black/70 group-hover:dark:text-white/70"
/>
</button>
<Select
options={sections.map((section) => {
return {
value: section.key,
key: section.key,
label: section.name,
};
})}
value={activeSection}
onChange={(e) => {
setActiveSection(e.target.value);
}}
className="!text-xs lg:!text-sm"
/>
</div>
{selectedSection.component && (
<div className="flex flex-1 flex-col overflow-hidden">
<div className="border-b border-light-200/60 px-6 pb-6 lg:pt-6 dark:border-dark-200/60 flex-shrink-0">
<div className="flex flex-col">
<h4 className="font-medium text-black dark:text-white">
{activeSection.name}
<h4 className="font-medium text-black dark:text-white text-sm lg:text-base">
{selectedSection.name}
</h4>
<p className="text-xs text-black/50 dark:text-white/50">
{activeSection.description}
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{selectedSection.description}
</p>
</div>
</div>
<activeSection.component
fields={config.fields[activeSection.dataAdd]}
values={config.values[activeSection.dataAdd]}
/>
<div className="flex-1 overflow-y-auto">
<selectedSection.component
fields={config.fields[selectedSection.dataAdd]}
values={config.values[selectedSection.dataAdd]}
/>
</div>
</div>
)}
</div>

View File

@@ -1,6 +1,7 @@
import {
SelectUIConfigField,
StringUIConfigField,
TextareaUIConfigField,
UIConfigField,
} from '@/lib/config/types';
import { useState } from 'react';
@@ -58,11 +59,13 @@ const SettingsSelect = ({
};
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-5">
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-base text-black dark:text-white">{field.name}</h4>
<p className="text-xs text-black/50 dark:text-white/50">
<h4 className="text-sm lg:text-base text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{field.description}
</p>
</div>
@@ -73,7 +76,7 @@ const SettingsSelect = ({
value: option.value,
label: option.name,
}))}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60 cursor-pointer capitalize pr-12"
className="!text-xs lg:!text-sm"
loading={loading}
disabled={loading}
/>
@@ -127,11 +130,13 @@ const SettingsInput = ({
};
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-5">
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-base text-black dark:text-white">{field.name}</h4>
<p className="text-xs text-black/50 dark:text-white/50">
<h4 className="text-sm lg:text-base text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{field.description}
</p>
</div>
@@ -140,7 +145,7 @@ const SettingsInput = ({
value={value ?? field.default ?? ''}
onChange={(event) => setValue(event.target.value)}
onBlur={(event) => handleSave(event.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-3 py-2 lg:px-4 lg:py-3 pr-10 !text-xs lg:!text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={field.placeholder}
type="text"
disabled={loading}
@@ -156,6 +161,82 @@ const SettingsInput = ({
);
};
const SettingsTextarea = ({
field,
value,
setValue,
dataAdd,
}: {
field: TextareaUIConfigField;
value?: any;
setValue: (value: any) => void;
dataAdd: string;
}) => {
const [loading, setLoading] = useState(false);
const handleSave = async (newValue: any) => {
setLoading(true);
setValue(newValue);
try {
if (field.scope === 'client') {
localStorage.setItem(field.key, newValue);
} else {
const res = await fetch('/api/config', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
key: `${dataAdd}.${field.key}`,
value: newValue,
}),
});
if (!res.ok) {
console.error('Failed to save config:', await res.text());
throw new Error('Failed to save configuration');
}
}
} catch (error) {
console.error('Error saving config:', error);
toast.error('Failed to save configuration.');
} finally {
setTimeout(() => setLoading(false), 150);
}
};
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-sm lg:text-base text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{field.description}
</p>
</div>
<div className="relative">
<textarea
value={value ?? field.default ?? ''}
onChange={(event) => setValue(event.target.value)}
onBlur={(event) => handleSave(event.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-3 py-2 lg:px-4 lg:py-3 pr-10 !text-xs lg:!text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={field.placeholder}
rows={4}
disabled={loading}
/>
{loading && (
<span className="pointer-events-none absolute right-3 translate-y-3 text-black/40 dark:text-white/40">
<Loader2 className="h-4 w-4 animate-spin" />
</span>
)}
</div>
</div>
</section>
);
};
const SettingsField = ({
field,
value,
@@ -186,6 +267,15 @@ const SettingsField = ({
dataAdd={dataAdd}
/>
);
case 'textarea':
return (
<SettingsTextarea
field={field}
value={val}
setValue={setVal}
dataAdd={dataAdd}
/>
);
default:
return <div>Unsupported field type: {field.type}</div>;
}

View File

@@ -9,6 +9,7 @@ import { useEffect, useState } from 'react';
import { toast } from 'sonner';
import AddProvider from '../Settings/Sections/Models/AddProviderDialog';
import ModelProvider from '../Settings/Sections/Models/ModelProvider';
import ModelSelect from '@/components/Settings/Sections/Models/ModelSelect';
const SetupConfig = ({
configSections,
@@ -62,7 +63,8 @@ const SetupConfig = ({
}
};
const hasProviders = providers.length > 0;
const hasProviders =
providers.filter((p) => p.chatModels.length > 0).length > 0;
return (
<div className="w-[95vw] md:w-[80vw] lg:w-[65vw] mx-auto px-2 sm:px-4 md:px-6 flex flex-col space-y-6">
@@ -124,8 +126,57 @@ const SetupConfig = ({
</motion.div>
)}
{setupState === 3 && (
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{
opacity: 1,
y: 0,
transition: { duration: 0.5, delay: 0.1 },
}}
className="w-full h-[calc(95vh-80px)] bg-light-primary dark:bg-dark-primary border border-light-200 dark:border-dark-200 rounded-xl shadow-sm flex flex-col overflow-hidden"
>
<div className="flex-1 overflow-y-auto px-3 sm:px-4 md:px-6 py-4 md:py-6">
<div className="flex flex-row justify-between items-center mb-4 md:mb-6 pb-3 md:pb-4 border-b border-light-200 dark:border-dark-200">
<div>
<p className="text-xs sm:text-sm font-medium text-black dark:text-white">
Select models
</p>
<p className="text-[10px] sm:text-xs text-black/50 dark:text-white/50 mt-0.5">
Select models which you wish to use.
</p>
</div>
</div>
<div className="space-y-3 md:space-y-4">
<ModelSelect providers={providers} type="chat" />
<ModelSelect providers={providers} type="embedding" />
</div>
</div>
</motion.div>
)}
<div className="flex flex-row items-center justify-between pt-2">
<a></a>
{setupState === 2 && (
<motion.button
initial={{ opacity: 0, x: 10 }}
animate={{
opacity: 1,
x: 0,
transition: { duration: 0.5 },
}}
onClick={() => {
setSetupState(3);
}}
disabled={!hasProviders || isLoading}
className="flex flex-row items-center gap-1.5 md:gap-2 px-3 md:px-5 py-2 md:py-2.5 rounded-lg bg-[#24A0ED] text-white hover:bg-[#1e8fd1] active:scale-95 transition-all duration-200 font-medium text-xs sm:text-sm disabled:bg-light-200 dark:disabled:bg-dark-200 disabled:text-black/40 dark:disabled:text-white/40 disabled:cursor-not-allowed disabled:active:scale-100"
>
<span>Next</span>
<ArrowRight className="w-4 h-4 md:w-[18px] md:h-[18px]" />
</motion.button>
)}
{setupState === 3 && (
<motion.button
initial={{ opacity: 0, x: 10 }}
animate={{

View File

@@ -3,7 +3,7 @@ import { Loader2, ChevronDown } from 'lucide-react';
import { SelectHTMLAttributes, forwardRef } from 'react';
interface SelectProps extends SelectHTMLAttributes<HTMLSelectElement> {
options: { value: string; label: string; disabled?: boolean }[];
options: { value: any; label: string; disabled?: boolean }[];
loading?: boolean;
}
@@ -21,7 +21,7 @@ export const Select = forwardRef<HTMLSelectElement, SelectProps>(
ref={ref}
disabled={disabled || loading}
className={cn(
'bg-light-secondary dark:bg-dark-secondary px-3 py-2 flex items-center overflow-hidden border border-light-200 dark:border-dark-200 dark:text-white rounded-lg text-sm appearance-none w-full pr-10',
'bg-light-secondary dark:bg-dark-secondary px-3 py-2 flex items-center overflow-hidden border border-light-200 dark:border-dark-200 dark:text-white rounded-lg appearance-none w-full pr-10 text-xs lg:text-sm',
className,
)}
>

View File

@@ -40,6 +40,35 @@ class ConfigManager {
default: 'dark',
scope: 'client',
},
{
name: 'System Instructions',
key: 'systemInstructions',
type: 'textarea',
required: false,
description: 'Add custom behavior or tone for the model.',
placeholder:
'e.g., "Respond in a friendly and concise tone" or "Use British English and format answers as bullet points."',
scope: 'client',
},
{
name: 'Measurement Unit',
key: 'measureUnit',
type: 'select',
options: [
{
name: 'Imperial',
value: 'Imperial',
},
{
name: 'Metric',
value: 'Metric',
},
],
required: false,
description: 'Choose between Metric and Imperial measurement unit.',
default: 'Metric',
scope: 'client',
},
],
modelProviders: [],
search: [
@@ -124,7 +153,7 @@ class ConfigManager {
providerConfigSections.forEach((provider) => {
const newProvider: ConfigModelProvider & { required?: string[] } = {
id: crypto.randomUUID(),
name: `${provider.name} ${Math.floor(Math.random() * 1000)}`,
name: `${provider.name}`,
type: provider.key,
chatModels: [],
embeddingModels: [],

View File

@@ -32,10 +32,17 @@ type PasswordUIConfigField = BaseUIConfigField & {
default?: string;
};
type TextareaUIConfigField = BaseUIConfigField & {
type: 'textarea';
placeholder?: string;
default?: string;
};
type UIConfigField =
| StringUIConfigField
| SelectUIConfigField
| PasswordUIConfigField;
| PasswordUIConfigField
| TextareaUIConfigField;
type ConfigModelProvider = {
id: string;
@@ -87,4 +94,5 @@ export type {
StringUIConfigField,
ModelProviderUISection,
ConfigModelProvider,
TextareaUIConfigField,
};

View File

@@ -1,6 +1,6 @@
import { sql } from 'drizzle-orm';
import { text, integer, sqliteTable } from 'drizzle-orm/sqlite-core';
import { Document } from 'langchain/document';
import { Document } from '@langchain/core/documents';
export const messages = sqliteTable('messages', {
id: integer('id').primaryKey(),

View File

@@ -28,8 +28,6 @@ export class HuggingFaceTransformersEmbeddings
timeout?: number;
private pipelinePromise: Promise<any> | undefined;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});
@@ -67,12 +65,8 @@ export class HuggingFaceTransformersEmbeddings
}
private async runEmbedding(texts: string[]) {
const { pipeline } = await import('@xenova/transformers');
const pipe = await (this.pipelinePromise ??= pipeline(
'feature-extraction',
this.model,
));
const { pipeline } = await import('@huggingface/transformers');
const pipe = await pipeline('feature-extraction', this.model);
return this.caller.call(async () => {
const output = await pipe(texts, { pooling: 'mean', normalize: true });

View File

@@ -0,0 +1,152 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface AimlConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your AI/ML API key',
required: true,
placeholder: 'AI/ML API Key',
env: 'AIML_API_KEY',
scope: 'server',
},
];
class AimlProvider extends BaseModelProvider<AimlConfig> {
constructor(id: string, name: string, config: AimlConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const res = await fetch('https://api.aimlapi.com/models', {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
},
});
const data = await res.json();
const chatModels: Model[] = data.data
.filter((m: any) => m.type === 'chat-completion')
.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
const embeddingModels: Model[] = data.data
.filter((m: any) => m.type === 'embedding')
.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: embeddingModels,
chat: chatModels,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to AI/ML API. Please ensure your API key is correct and the service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading AI/ML API Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
configuration: {
baseURL: 'https://api.aimlapi.com',
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading AI/ML API Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: this.config.apiKey,
model: key,
configuration: {
baseURL: 'https://api.aimlapi.com',
},
});
}
static parseAndValidate(raw: any): AimlConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'aiml',
name: 'AI/ML API',
};
}
}
export default AimlProvider;

View File

@@ -0,0 +1,115 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatAnthropic } from '@langchain/anthropic';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface AnthropicConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Anthropic API key',
required: true,
placeholder: 'Anthropic API Key',
env: 'ANTHROPIC_API_KEY',
scope: 'server',
},
];
class AnthropicProvider extends BaseModelProvider<AnthropicConfig> {
constructor(id: string, name: string, config: AnthropicConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch('https://api.anthropic.com/v1/models?limit=999', {
method: 'GET',
headers: {
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
'Content-type': 'application/json',
},
});
if (!res.ok) {
throw new Error(`Failed to fetch Anthropic models: ${res.statusText}`);
}
const data = (await res.json()).data;
const models: Model[] = data.map((m: any) => {
return {
key: m.id,
name: m.display_name,
};
});
return {
embedding: [],
chat: models,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Anthropic Chat Model. Invalid Model Selected',
);
}
return new ChatAnthropic({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('Anthropic provider does not support embedding models.');
}
static parseAndValidate(raw: any): AnthropicConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'anthropic',
name: 'Anthropic',
};
}
}
export default AnthropicProvider;

View File

@@ -0,0 +1,107 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface DeepSeekConfig {
apiKey: string;
}
const defaultChatModels: Model[] = [
{
name: 'Deepseek Chat / DeepSeek V3.2 Exp',
key: 'deepseek-chat',
},
{
name: 'Deepseek Reasoner / DeepSeek V3.2 Exp',
key: 'deepseek-reasoner',
},
];
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your DeepSeek API key',
required: true,
placeholder: 'DeepSeek API Key',
env: 'DEEPSEEK_API_KEY',
scope: 'server',
},
];
class DeepSeekProvider extends BaseModelProvider<DeepSeekConfig> {
constructor(id: string, name: string, config: DeepSeekConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
return {
embedding: [],
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading DeepSeek Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
configuration: {
baseURL: 'https://api.deepseek.com',
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('DeepSeek provider does not support embedding models.');
}
static parseAndValidate(raw: any): DeepSeekConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'deepseek',
name: 'Deepseek AI',
};
}
}
export default DeepSeekProvider;

View File

@@ -0,0 +1,140 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface GeminiConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Google Gemini API key',
required: true,
placeholder: 'Google Gemini API Key',
env: 'GEMINI_API_KEY',
scope: 'server',
},
];
class GeminiProvider extends BaseModelProvider<GeminiConfig> {
constructor(id: string, name: string, config: GeminiConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models?key=${this.config.apiKey}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
},
);
const data = await res.json();
let defaultEmbeddingModels: Model[] = [];
let defaultChatModels: Model[] = [];
data.models.forEach((m: any) => {
if (m.supportedGenerationMethods.includes('embedText')) {
defaultEmbeddingModels.push({
key: m.name,
name: m.displayName,
});
} else if (m.supportedGenerationMethods.includes('generateContent')) {
defaultChatModels.push({
key: m.name,
name: m.displayName,
});
}
});
return {
embedding: defaultEmbeddingModels,
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Chat Model. Invalid Model Selected',
);
}
return new ChatGoogleGenerativeAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Embedding Model. Invalid Model Selected.',
);
}
return new GoogleGenerativeAIEmbeddings({
apiKey: this.config.apiKey,
model: key,
});
}
static parseAndValidate(raw: any): GeminiConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'gemini',
name: 'Google Gemini',
};
}
}
export default GeminiProvider;

View File

@@ -0,0 +1,118 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatGroq } from '@langchain/groq';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface GroqConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Groq API key',
required: true,
placeholder: 'Groq API Key',
env: 'GROQ_API_KEY',
scope: 'server',
},
];
class GroqProvider extends BaseModelProvider<GroqConfig> {
constructor(id: string, name: string, config: GroqConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const res = await fetch('https://api.groq.com/openai/v1/models', {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: [],
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Groq API. Please ensure your API key is correct and the Groq service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error('Error Loading Groq Chat Model. Invalid Model Selected');
}
return new ChatGroq({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('Groq provider does not support embedding models.');
}
static parseAndValidate(raw: any): GroqConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'groq',
name: 'Groq',
};
}
}
export default GroqProvider;

View File

@@ -2,10 +2,26 @@ import { ModelProviderUISection } from '@/lib/config/types';
import { ProviderConstructor } from './baseProvider';
import OpenAIProvider from './openai';
import OllamaProvider from './ollama';
import TransformersProvider from './transformers';
import AnthropicProvider from './anthropic';
import GeminiProvider from './gemini';
import GroqProvider from './groq';
import DeepSeekProvider from './deepseek';
import LMStudioProvider from './lmstudio';
import LemonadeProvider from './lemonade';
import AimlProvider from '@/lib/models/providers/aiml';
export const providers: Record<string, ProviderConstructor<any>> = {
openai: OpenAIProvider,
ollama: OllamaProvider,
transformers: TransformersProvider,
anthropic: AnthropicProvider,
gemini: GeminiProvider,
groq: GroqProvider,
deepseek: DeepSeekProvider,
aiml: AimlProvider,
lmstudio: LMStudioProvider,
lemonade: LemonadeProvider,
};
export const getModelProvidersUIConfigSection =

View File

@@ -0,0 +1,158 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface LemonadeConfig {
baseURL: string;
apiKey?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for Lemonade API',
required: true,
placeholder: 'https://api.lemonade.ai/v1',
env: 'LEMONADE_BASE_URL',
scope: 'server',
},
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Lemonade API key (optional)',
required: false,
placeholder: 'Lemonade API Key',
env: 'LEMONADE_API_KEY',
scope: 'server',
},
];
class LemonadeProvider extends BaseModelProvider<LemonadeConfig> {
constructor(id: string, name: string, config: LemonadeConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
};
if (this.config.apiKey) {
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
}
const res = await fetch(`${this.config.baseURL}/models`, {
method: 'GET',
headers,
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Lemonade API. Please ensure the base URL is correct and the service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey || 'not-needed',
temperature: 0.7,
model: key,
configuration: {
baseURL: this.config.baseURL,
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: this.config.apiKey || 'not-needed',
model: key,
configuration: {
baseURL: this.config.baseURL,
},
});
}
static parseAndValidate(raw: any): LemonadeConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
apiKey: raw.apiKey ? String(raw.apiKey) : undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lemonade',
name: 'Lemonade',
};
}
}
export default LemonadeProvider;

View File

@@ -0,0 +1,148 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface LMStudioConfig {
baseURL: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for LM Studio server',
required: true,
placeholder: 'http://localhost:1234',
env: 'LM_STUDIO_BASE_URL',
scope: 'server',
},
];
class LMStudioProvider extends BaseModelProvider<LMStudioConfig> {
constructor(id: string, name: string, config: LMStudioConfig) {
super(id, name, config);
}
private normalizeBaseURL(url: string): string {
const trimmed = url.trim().replace(/\/+$/, '');
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`;
}
async getDefaultModels(): Promise<ModelList> {
try {
const baseURL = this.normalizeBaseURL(this.config.baseURL);
const res = await fetch(`${baseURL}/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to LM Studio. Please ensure the base URL is correct and the LM Studio server is running.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: 'lm-studio',
temperature: 0.7,
model: key,
streaming: true,
configuration: {
baseURL: this.normalizeBaseURL(this.config.baseURL),
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: 'lm-studio',
model: key,
configuration: {
baseURL: this.normalizeBaseURL(this.config.baseURL),
},
});
}
static parseAndValidate(raw: any): LMStudioConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lmstudio',
name: 'LM Studio',
};
}
}
export default LMStudioProvider;

View File

@@ -0,0 +1,88 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import { HuggingFaceTransformersEmbeddings } from '@/lib/huggingfaceTransformer';
interface TransformersConfig {}
const defaultEmbeddingModels: Model[] = [
{
name: 'all-MiniLM-L6-v2',
key: 'Xenova/all-MiniLM-L6-v2',
},
{
name: 'mxbai-embed-large-v1',
key: 'mixedbread-ai/mxbai-embed-large-v1',
},
{
name: 'nomic-embed-text-v1',
key: 'Xenova/nomic-embed-text-v1',
},
];
const providerConfigFields: UIConfigField[] = [];
class TransformersProvider extends BaseModelProvider<TransformersConfig> {
constructor(id: string, name: string, config: TransformersConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
return {
embedding: [...defaultEmbeddingModels],
chat: [],
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
throw new Error('Transformers Provider does not support chat models.');
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading OpenAI Embedding Model. Invalid Model Selected.',
);
}
return new HuggingFaceTransformersEmbeddings({
model: key,
});
}
static parseAndValidate(raw: any): TransformersConfig {
return {};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'transformers',
name: 'Transformers',
};
}
}
export default TransformersProvider;

View File

@@ -16,7 +16,7 @@ import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document';
import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../searxng';
import path from 'node:path';
import fs from 'node:fs';

View File

@@ -39,10 +39,11 @@ export const searchSearxng = async (
});
}
const res = await axios.get(url.toString());
const res = await fetch(url);
const data = await res.json();
const results: SearxngSearchResult[] = res.data.results;
const suggestions: string[] = res.data.suggestions;
const results: SearxngSearchResult[] = data.results;
const suggestions: string[] = data.suggestions;
return { results, suggestions };
};

View File

@@ -1,6 +1,6 @@
import axios from 'axios';
import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse';

1068
yarn.lock

File diff suppressed because it is too large Load Diff