Compare commits

...

87 Commits

Author SHA1 Message Date
ItzCrazyKns
1614cfa5e5 feat(app): add widgets 2025-11-20 14:55:50 +05:30
ItzCrazyKns
036b44611f feat(search): add classifier 2025-11-20 14:55:24 +05:30
ItzCrazyKns
8b515201f3 feat(app): add search types 2025-11-20 14:53:03 +05:30
ItzCrazyKns
cbcb03c7ac feat(llm): update return type to partial 2025-11-20 14:52:41 +05:30
ItzCrazyKns
afc68ca91f feat(ollamaLLM): disable thinking in obj mode 2025-11-20 14:52:24 +05:30
ItzCrazyKns
3cc8882b28 feat(prompts): add classifier prompt 2025-11-20 14:51:49 +05:30
ItzCrazyKns
c3830795cb feat(app): add new session manager 2025-11-20 14:51:17 +05:30
ItzCrazyKns
f44ad973aa feat(types): add llm types 2025-11-18 14:39:43 +05:30
ItzCrazyKns
4bcbdad6cb feat(providers): implement custom classes 2025-11-18 14:39:04 +05:30
ItzCrazyKns
5272c7fd3e feat(models): add new base classes 2025-11-18 14:38:12 +05:30
ItzCrazyKns
657a577ec8 feat(app): enhance UI 2025-11-18 14:37:41 +05:30
ItzCrazyKns
f6dac43d7a feat(types): add message & chunk type 2025-11-18 01:17:19 +05:30
ItzCrazyKns
a00f2231d4 feat(chat-window): remove loading state 2025-11-14 23:17:41 +05:30
ItzCrazyKns
1da9b7655c Merge branch 'canary' into feat/improve-search-architecture 2025-11-14 14:38:58 +05:30
ItzCrazyKns
2edef888a3 Merge branch 'master' into canary 2025-11-14 13:29:22 +05:30
ItzCrazyKns
2dc8078848 Update Exa sponsor image and README styling 2025-11-14 13:23:50 +05:30
ItzCrazyKns
8df81c20cf Update README.md 2025-11-14 13:19:49 +05:30
ItzCrazyKns
34bd02236d Update README.md 2025-11-14 13:17:52 +05:30
ItzCrazyKns
2430376a0c feat(readme): update sponsers 2025-11-14 13:15:59 +05:30
ItzCrazyKns
bd5628b390 feat(package): bump langchain package 2025-11-14 11:45:48 +05:30
ItzCrazyKns
3d5d04eda0 Merge branch 'canary' into feat/improve-search-architecture 2025-11-13 11:54:24 +05:30
ItzCrazyKns
07a17925b1 feat(media-search): supply full history 2025-11-13 11:53:53 +05:30
ItzCrazyKns
3bcf646af1 feat(search-route): handle history processing after llm validation 2025-11-13 11:52:12 +05:30
ItzCrazyKns
e499c0b96e feat(app): migrate video search chain 2025-11-13 11:51:25 +05:30
ItzCrazyKns
33b736e1e8 feat(app): migrate image search chain 2025-11-13 11:51:13 +05:30
Kushagra Srivastava
5e1746f646 Merge pull request #928 from ItzCrazyKns/master
Merge master into canary
2025-11-13 11:49:42 +05:30
ItzCrazyKns
41fe009847 feat(app): migrate suggestion chain 2025-11-13 11:47:28 +05:30
ItzCrazyKns
1a8889c71c feat(app): add new agents directory 2025-11-10 16:45:48 +05:30
ItzCrazyKns
70c1f7230c feat(assets): remove old preview 2025-11-08 21:31:56 +05:30
ItzCrazyKns
c0771095a6 feat(app): lint & beautify 2025-10-30 17:21:48 +05:30
ItzCrazyKns
0856896aff feat(settings): fix text size, enhance UI 2025-10-30 17:21:40 +05:30
ItzCrazyKns
3da53aed03 Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-10-30 11:36:30 +05:30
ItzCrazyKns
244675759c feat(config): add getAutoMediaSearch, update uses 2025-10-30 11:29:14 +05:30
ItzCrazyKns
ce6a37aaff feat(settingsFields): add switch field 2025-10-30 11:28:15 +05:30
ItzCrazyKns
c3abba8462 feat(settings): separate personalization & preferences 2025-10-29 23:13:51 +05:30
ItzCrazyKns
f709aa8224 feat(config): add new switch config field 2025-10-29 23:12:09 +05:30
Kushagra Srivastava
22695f4ef6 Merge pull request #916 from skoved/gemini-embedding-fix
fix: list all available gemini embedding models
2025-10-28 21:56:44 +05:30
skoved
75ef2e0282 fix: list all available gemini embedding models
the new settings window does not list all available gemini embedding models. this happens because some gemini embedding models have `embedContent` instead of `embedText`
2025-10-28 11:31:41 -04:00
ItzCrazyKns
b0d97c4c83 feat(readme): revert to screenshot for now 2025-10-27 16:49:57 +05:30
ItzCrazyKns
6527388e25 Update demo.gif 2025-10-27 15:27:50 +05:30
ItzCrazyKns
7397e33f29 feat(app): rename providers to connection, enhance UX 2025-10-27 15:08:50 +05:30
ItzCrazyKns
f6ffa9ebe0 feat(readme): enhance readme 2025-10-27 13:09:59 +05:30
ItzCrazyKns
f9e675823b Create demo.gif 2025-10-27 12:57:34 +05:30
ItzCrazyKns
2e736613c5 Merge branch 'master' into canary 2025-10-27 11:43:18 +05:30
ItzCrazyKns
295334b195 feat(app): fix empty message being sent 2025-10-24 23:40:01 +05:30
ItzCrazyKns
b106abd77f feat(package): bump version 2025-10-24 23:00:07 +05:30
ItzCrazyKns
2d80fc400d feat(app): lint & beautify 2025-10-24 22:58:10 +05:30
ItzCrazyKns
097a5c55c6 feat(layout): add everything inside chat provider 2025-10-24 22:57:56 +05:30
ItzCrazyKns
d0719429b4 feat(app): fix issues with model selection 2025-10-24 22:56:23 +05:30
ItzCrazyKns
600d4ceb29 feat(hf-transformer): use langchain's inbuilt transformer class 2025-10-23 23:06:05 +05:30
ItzCrazyKns
4f50462f1d feat(package): bump version 2025-10-23 21:04:33 +05:30
ItzCrazyKns
231bc22a36 feat(docker): update searxng build script 2025-10-23 19:07:22 +05:30
ItzCrazyKns
046daf442a feat(docker): update searxng build script 2025-10-23 19:06:27 +05:30
ItzCrazyKns
cb1d85e458 feat(readme): add volumes 2025-10-21 16:57:57 +05:30
ItzCrazyKns
ce78b4ff62 feat(app): show "add model" button 2025-10-21 16:32:40 +05:30
ItzCrazyKns
88ae67065b feat(config): add measurement unit 2025-10-21 15:59:15 +05:30
ItzCrazyKns
f35d12f94c Update perplexica-screenshot.png 2025-10-21 15:26:29 +05:30
ItzCrazyKns
3d17975d83 feat(model-select): use values from localStorage 2025-10-21 15:25:38 +05:30
Kushagra Srivastava
950717e0cf Delete app.dockerfile 2025-10-21 15:13:17 +05:30
Kushagra Srivastava
4f39b5746a Merge pull request #906 from ItzCrazyKns/canary
Release v1.11.0
2025-10-21 15:07:55 +05:30
ItzCrazyKns
a01fce4e64 feat(package): bump version 2025-10-21 15:03:47 +05:30
ItzCrazyKns
92ff47110d feat(app): rename standalone to slim 2025-10-21 15:03:15 +05:30
ItzCrazyKns
82efd35b55 feat(setup-config): only allow finalization when chat model exists 2025-10-21 14:24:44 +05:30
ItzCrazyKns
3d950bac07 feat(app): update documentation 2025-10-21 13:44:07 +05:30
ItzCrazyKns
77672003ff feat(app): remove docker compose, build standalone images 2025-10-21 13:43:55 +05:30
ItzCrazyKns
e9bd2a8032 feat(settingsButtonMobile): add size 2025-10-21 12:30:34 +05:30
ItzCrazyKns
49fed3e228 feat(setup-config): add model selection state 2025-10-21 12:23:35 +05:30
ItzCrazyKns
7fb7fb9692 feat(app): fix sizes & placement for smaller screens 2025-10-21 12:23:18 +05:30
ItzCrazyKns
ff37225253 feat(models-section): allow selecting chat model 2025-10-21 12:22:37 +05:30
ItzCrazyKns
3b745868b2 feat(app): add mobile settings button 2025-10-21 12:22:22 +05:30
ItzCrazyKns
c945bf1fc3 feat(settings): add textarea type, add systemInstructions 2025-10-21 12:22:06 +05:30
ItzCrazyKns
672fc3c3a8 feat(app): fix build errors 2025-10-20 16:39:38 +05:30
ItzCrazyKns
67c2672f39 feat(searxng): use fetch instead of axios 2025-10-20 16:36:15 +05:30
ItzCrazyKns
334326744c feat(app): use new packages, fix types 2025-10-20 16:36:04 +05:30
ItzCrazyKns
042ce33cf4 feat(providers): add rest of the providers 2025-10-20 16:35:44 +05:30
ItzCrazyKns
22b9a48b26 feat(config): use provider name without number on load from env 2025-10-20 16:35:12 +05:30
ItzCrazyKns
e024d46971 feat(chat): fix typo 2025-10-20 16:34:49 +05:30
ItzCrazyKns
af36f15f3b feat(package): update packages 2025-10-20 16:33:56 +05:30
ItzCrazyKns
3d2d056f64 Update Chat.tsx 2025-10-19 22:47:45 +05:30
ItzCrazyKns
d9ebf611ff feat(hf-transformer): dynamically load library 2025-10-19 21:06:52 +05:30
ItzCrazyKns
eef6ebb924 Update Section.tsx 2025-10-19 18:33:40 +05:30
ItzCrazyKns
65975ba6fc feat(providers): add transformers provider 2025-10-19 18:32:18 +05:30
ItzCrazyKns
51629b2cca feat(chat): auto scroll, stop scrolling when scrolled back 2025-10-19 18:30:21 +05:30
ItzCrazyKns
7d71643f42 feat(app): rename model selector, fix UI 2025-10-19 18:29:32 +05:30
ItzCrazyKns
4564175822 feat(settings): add embedding model selector 2025-10-19 18:29:22 +05:30
Kushagra Srivastava
9d52d01f31 Merge pull request #901 from ItzCrazyKns/feat/config-management-model-registry
Feat/config management model registry
2025-10-19 13:58:20 +05:30
ItzCrazyKns
5abd42d46d feat(package): remove ts-node 2025-10-11 18:02:31 +05:30
98 changed files with 4397 additions and 1563 deletions

BIN
.assets/demo.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 MiB

After

Width:  |  Height:  |  Size: 2.1 MiB

BIN
.assets/sponsers/exa.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

BIN
.assets/sponsers/warp.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 433 KiB

View File

@@ -11,6 +11,13 @@ on:
jobs:
build-amd64:
runs-on: ubuntu-latest
strategy:
matrix:
variant:
- name: full
dockerfile: Dockerfile
- name: slim
dockerfile: Dockerfile.slim
steps:
- name: Checkout code
uses: actions/checkout@v3
@@ -31,47 +38,54 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push AMD64 Docker image
- name: Build and push AMD64 Docker image (master)
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:amd64 \
-t itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--push .
- name: Build and push AMD64 Canary Docker image
if: github.ref == 'refs/heads/canary' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:canary-amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-canary-amd64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:canary-amd64 \
-t itzcrazykns1337/perplexica:${VARIANT}-canary-amd64 \
--push .
- name: Build and push AMD64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-amd64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
-t itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-amd64 \
--push .
build-arm64:
runs-on: ubuntu-24.04-arm
strategy:
matrix:
variant:
- name: full
dockerfile: Dockerfile
- name: slim
dockerfile: Dockerfile.slim
steps:
- name: Checkout code
uses: actions/checkout@v3
@@ -92,48 +106,51 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push ARM64 Docker image
- name: Build and push ARM64 Docker image (master)
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:arm64 \
-t itzcrazykns1337/perplexica:${VARIANT}-arm64 \
--push .
- name: Build and push ARM64 Canary Docker image
if: github.ref == 'refs/heads/canary' && github.event_name == 'push'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:canary-arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-canary-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:canary-arm64 \
-t itzcrazykns1337/perplexica:${VARIANT}-canary-arm64 \
--push .
- name: Build and push ARM64 release Docker image
if: github.event_name == 'release'
run: |
DOCKERFILE=app.dockerfile
IMAGE_NAME=perplexica
DOCKERFILE=${{ matrix.variant.dockerfile }}
VARIANT=${{ matrix.variant.name }}
docker buildx build --platform linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-arm64 \
--cache-to=type=inline \
--provenance false \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64 \
-t itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-arm64 \
--push .
manifest:
needs: [build-amd64, build-arm64]
runs-on: ubuntu-latest
strategy:
matrix:
variant: [full, slim]
steps:
- name: Log in to DockerHub
uses: docker/login-action@v2
@@ -146,29 +163,55 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Create and push multi-arch manifest for main
- name: Create and push manifest for main
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:main \
--amend itzcrazykns1337/${IMAGE_NAME}:amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:main
VARIANT=${{ matrix.variant }}
docker manifest create itzcrazykns1337/perplexica:${VARIANT}-latest \
--amend itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-arm64
docker manifest push itzcrazykns1337/perplexica:${VARIANT}-latest
- name: Create and push multi-arch manifest for canary
if [ "$VARIANT" = "full" ]; then
docker manifest create itzcrazykns1337/perplexica:latest \
--amend itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-arm64
docker manifest push itzcrazykns1337/perplexica:latest
docker manifest create itzcrazykns1337/perplexica:main \
--amend itzcrazykns1337/perplexica:${VARIANT}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-arm64
docker manifest push itzcrazykns1337/perplexica:main
fi
- name: Create and push manifest for canary
if: github.ref == 'refs/heads/canary' && github.event_name == 'push'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:canary \
--amend itzcrazykns1337/${IMAGE_NAME}:canary-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:canary-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:canary
VARIANT=${{ matrix.variant }}
docker manifest create itzcrazykns1337/perplexica:${VARIANT}-canary \
--amend itzcrazykns1337/perplexica:${VARIANT}-canary-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-canary-arm64
docker manifest push itzcrazykns1337/perplexica:${VARIANT}-canary
- name: Create and push multi-arch manifest for releases
if [ "$VARIANT" = "full" ]; then
docker manifest create itzcrazykns1337/perplexica:canary \
--amend itzcrazykns1337/perplexica:${VARIANT}-canary-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-canary-arm64
docker manifest push itzcrazykns1337/perplexica:canary
fi
- name: Create and push manifest for releases
if: github.event_name == 'release'
run: |
IMAGE_NAME=perplexica
docker manifest create itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }}
VARIANT=${{ matrix.variant }}
docker manifest create itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}
if [ "$VARIANT" = "full" ]; then
docker manifest create itzcrazykns1337/perplexica:${{ env.RELEASE_VERSION }} \
--amend itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-amd64 \
--amend itzcrazykns1337/perplexica:${VARIANT}-${{ env.RELEASE_VERSION }}-arm64
docker manifest push itzcrazykns1337/perplexica:${{ env.RELEASE_VERSION }}
fi

74
Dockerfile Normal file
View File

@@ -0,0 +1,74 @@
FROM node:24.5.0-slim AS builder
RUN apt-get update && apt-get install -y python3 python3-pip sqlite3 && rm -rf /var/lib/apt/lists/*
WORKDIR /home/perplexica
COPY package.json yarn.lock ./
RUN yarn install --frozen-lockfile --network-timeout 600000
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
COPY drizzle ./drizzle
RUN mkdir -p /home/perplexica/data
RUN yarn build
FROM node:24.5.0-slim
RUN apt-get update && apt-get install -y \
python3-dev python3-babel python3-venv python-is-python3 \
uwsgi uwsgi-plugin-python3 \
git build-essential libxslt-dev zlib1g-dev libffi-dev libssl-dev \
curl sudo \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /home/perplexica
COPY --from=builder /home/perplexica/public ./public
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data
COPY drizzle ./drizzle
RUN mkdir /home/perplexica/uploads
RUN useradd --shell /bin/bash --system \
--home-dir "/usr/local/searxng" \
--comment 'Privacy-respecting metasearch engine' \
searxng
RUN mkdir "/usr/local/searxng"
RUN mkdir -p /etc/searxng
RUN chown -R "searxng:searxng" "/usr/local/searxng"
COPY searxng/settings.yml /etc/searxng/settings.yml
COPY searxng/limiter.toml /etc/searxng/limiter.toml
COPY searxng/uwsgi.ini /etc/searxng/uwsgi.ini
RUN chown -R searxng:searxng /etc/searxng
USER searxng
RUN git clone "https://github.com/searxng/searxng" \
"/usr/local/searxng/searxng-src"
RUN python3 -m venv "/usr/local/searxng/searx-pyenv"
RUN "/usr/local/searxng/searx-pyenv/bin/pip" install --upgrade pip setuptools wheel pyyaml msgspec
RUN cd "/usr/local/searxng/searxng-src" && \
"/usr/local/searxng/searx-pyenv/bin/pip" install --use-pep517 --no-build-isolation -e .
USER root
WORKDIR /home/perplexica
COPY entrypoint.sh ./entrypoint.sh
RUN chmod +x ./entrypoint.sh
RUN sed -i 's/\r$//' ./entrypoint.sh || true
RUN echo "searxng ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
EXPOSE 3000 8080
ENV SEARXNG_API_URL=http://localhost:8080
CMD ["/home/perplexica/entrypoint.sh"]

View File

@@ -30,8 +30,6 @@ COPY drizzle ./drizzle
RUN mkdir /home/perplexica/uploads
COPY entrypoint.sh ./entrypoint.sh
RUN chmod +x ./entrypoint.sh
RUN sed -i 's/\r$//' ./entrypoint.sh || true
EXPOSE 3000
CMD ["/home/perplexica/entrypoint.sh"]
CMD ["node", "server.js"]

206
README.md
View File

@@ -1,74 +1,76 @@
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
<div align="center" markdown="1">
<sup>Special thanks to:</sup>
<br>
<br>
<a href="https://www.warp.dev/perplexica">
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/775dd593-9b5f-40f1-bf48-479faff4c27b">
</a>
### [Warp, the AI Devtool that lives in your terminal](https://www.warp.dev/perplexica)
[Available for MacOS, Linux, & Windows](https://www.warp.dev/perplexica)
</div>
<hr/>
# Perplexica 🔍
[![GitHub Repo stars](https://img.shields.io/github/stars/ItzCrazyKns/Perplexica?style=social)](https://github.com/ItzCrazyKns/Perplexica/stargazers)
[![GitHub forks](https://img.shields.io/github/forks/ItzCrazyKns/Perplexica?style=social)](https://github.com/ItzCrazyKns/Perplexica/network/members)
[![GitHub watchers](https://img.shields.io/github/watchers/ItzCrazyKns/Perplexica?style=social)](https://github.com/ItzCrazyKns/Perplexica/watchers)
[![Docker Pulls](https://img.shields.io/docker/pulls/itzcrazykns1337/perplexica?color=blue)](https://hub.docker.com/r/itzcrazykns1337/perplexica)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://github.com/ItzCrazyKns/Perplexica/blob/master/LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/ItzCrazyKns/Perplexica?color=green)](https://github.com/ItzCrazyKns/Perplexica/commits/master)
[![Discord](https://dcbadge.limes.pink/api/server/26aArMy8tT?style=flat)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?)
Perplexica is a **privacy-focused AI answering engine** that runs entirely on your own hardware. It combines knowledge from the vast internet with support for **local LLMs** (Ollama) and cloud providers (OpenAI, Claude, Groq), delivering accurate answers with **cited sources** while keeping your searches completely private.
## Table of Contents <!-- omit in toc -->
- [Overview](#overview)
- [Preview](#preview)
- [Features](#features)
- [Installation](#installation)
- [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended)
- [Non-Docker Installation](#non-docker-installation)
- [Ollama Connection Errors](#ollama-connection-errors)
- [Lemonade Connection Errors](#lemonade-connection-errors)
- [Using as a Search Engine](#using-as-a-search-engine)
- [Using Perplexica's API](#using-perplexicas-api)
- [Expose Perplexica to a network](#expose-perplexica-to-network)
- [One-Click Deployment](#one-click-deployment)
- [Upcoming Features](#upcoming-features)
- [Support Us](#support-us)
- [Donations](#donations)
- [Contribution](#contribution)
- [Help and Support](#help-and-support)
## Overview
Perplexica is an open-source AI-powered searching tool or an AI-powered search engine that goes deep into the internet to find answers. Inspired by Perplexity AI, it's an open-source option that not just searches the web but understands your questions. It uses advanced machine learning algorithms like similarity searching and embeddings to refine results and provides clear answers with sources cited.
Using SearxNG to stay current and fully open source, Perplexica ensures you always get the most up-to-date information without compromising your privacy.
![preview](.assets/perplexica-screenshot.png)
Want to know more about its architecture and how it works? You can read it [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/architecture/README.md).
## Preview
## ✨ Features
![video-preview](.assets/perplexica-preview.gif)
🤖 **Support for all major AI providers** - Use local LLMs through Ollama or connect to OpenAI, Anthropic Claude, Google Gemini, Groq, and more. Mix and match models based on your needs.
## Features
**Smart search modes** - Choose Balanced Mode for everyday searches, Fast Mode when you need quick answers, or wait for Quality Mode (coming soon) for deep research.
- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, and Mistral.
- **Two Main Modes:**
- **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page.
- **Normal Mode:** Processes your query and performs a web search.
- **Focus Modes:** Special modes to better answer specific types of questions. Perplexica currently has 6 focus modes:
- **All Mode:** Searches the entire web to find the best results.
- **Writing Assistant Mode:** Helpful for writing tasks that do not require searching the web.
- **Academic Search Mode:** Finds articles and papers, ideal for academic research.
- **YouTube Search Mode:** Finds YouTube videos based on the search query.
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
- **Reddit Search Mode:** Searches Reddit for discussions and opinions related to the query.
- **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index. Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevant source out of it, ensuring you always get the latest information without the overhead of daily data updates.
- **API**: Integrate Perplexica into your existing applications and make use of its capibilities.
🎯 **Six specialized focus modes** - Get better results with modes designed for specific tasks: Academic papers, YouTube videos, Reddit discussions, Wolfram Alpha calculations, writing assistance, or general web search.
It has many more features like image and video search. Some of the planned features are mentioned in [upcoming features](#upcoming-features).
🔍 **Web search powered by SearxNG** - Access multiple search engines while keeping your identity private. Support for Tavily and Exa coming soon for even better results.
📷 **Image and video search** - Find visual content alongside text results. Search isn't limited to just articles anymore.
📄 **File uploads** - Upload documents and ask questions about them. PDFs, text files, images - Perplexica understands them all.
🌐 **Search specific domains** - Limit your search to specific websites when you know where to look. Perfect for technical documentation or research papers.
💡 **Smart suggestions** - Get intelligent search suggestions as you type, helping you formulate better queries.
📚 **Discover** - Browse interesting articles and trending content throughout the day. Stay informed without even searching.
🕒 **Search history** - Every search is saved locally so you can revisit your discoveries anytime. Your research is never lost.
**More coming soon** - We're actively developing new features based on community feedback. Join our Discord to help shape Perplexica's future!
## Sponsors
Perplexica's development is powered by the generous support of our sponsors. Their contributions help keep this project free, open-source, and accessible to everyone.
<div align="center">
<a href="https://www.warp.dev/perplexica">
<img alt="Warp Terminal" src=".assets/sponsers/warp.png" width="100%">
</a>
### **✨ [Try Warp - The AI-Powered Terminal →](https://www.warp.dev/perplexica)**
Warp is revolutionizing development workflows with AI-powered features, modern UX, and blazing-fast performance. Used by developers at top companies worldwide.
</div>
---
We'd also like to thank the following partners for their generous support:
<table>
<tr>
<td>
<a href="https://dashboard.exa.ai" target="_blank">
<img src=".assets/sponsers/exa.png" alt="Exa" style="max-width: 8rem; max-height: 8rem; border-radius: .75rem;" />
</a>
</td>
<td>
<a href="https://dashboard.exa.ai">Exa</a> • The Perfect Web Search API for LLMs - web search, crawling, deep research, and answer APIs
</td>
</tr>
</table>
## Installation
@@ -76,6 +78,35 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
### Getting Started with Docker (Recommended)
Perplexica can be easily run using Docker. Simply run the following command:
```bash
docker run -d -p 3000:3000 -v perplexica-data:/home/perplexica/data -v perplexica-uploads:/home/perplexica/uploads --name perplexica itzcrazykns1337/perplexica:latest
```
This will pull and start the Perplexica container with the bundled SearxNG search engine. Once running, open your browser and navigate to http://localhost:3000. You can then configure your settings (API keys, models, etc.) directly in the setup screen.
**Note**: The image includes both Perplexica and SearxNG, so no additional setup is required. The `-v` flags create persistent volumes for your data and uploaded files.
#### Using Perplexica with Your Own SearxNG Instance
If you already have SearxNG running, you can use the slim version of Perplexica:
```bash
docker run -d -p 3000:3000 -e SEARXNG_API_URL=http://your-searxng-url:8080 -v perplexica-data:/home/perplexica/data -v perplexica-uploads:/home/perplexica/uploads --name perplexica itzcrazykns1337/perplexica:slim-latest
```
**Important**: Make sure your SearxNG instance has:
- JSON format enabled in the settings
- Wolfram Alpha search engine enabled
Replace `http://your-searxng-url:8080` with your actual SearxNG URL. Then configure your AI provider settings in the setup screen at http://localhost:3000.
#### Advanced Setup (Building from Source)
If you prefer to build from source or need more control:
1. Ensure Docker is installed and running on your system.
2. Clone the Perplexica repository:
@@ -85,39 +116,46 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
3. After cloning, navigate to the directory containing the project files.
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `LEMONADE`: Your Lemonade API URL. Since Lemonade runs directly on your local machine (not in Docker), you should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Lemonade on port 8000, use `http://host.docker.internal:8000`. For other ports, adjust accordingly. **You need to fill this if you wish to use Lemonade's models**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.`
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
**Note**: You can change these after starting Perplexica from the settings dialog.
- `SIMILARITY_MEASURE`: The similarity measure to use (This is filled by default; you can leave it as is if you are unsure about it.)
5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute:
4. Build and run using Docker:
```bash
docker compose up -d
docker build -t perplexica .
docker run -d -p 3000:3000 -v perplexica-data:/home/perplexica/data -v perplexica-uploads:/home/perplexica/uploads --name perplexica perplexica
```
6. Wait a few minutes for the setup to complete. You can access Perplexica at http://localhost:3000 in your web browser.
5. Access Perplexica at http://localhost:3000 and configure your settings in the setup screen.
**Note**: After the containers are built, you can start Perplexica directly from Docker without having to open a terminal.
### Non-Docker Installation
1. Install SearXNG and allow `JSON` format in the SearXNG settings.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm run start`
1. Install SearXNG and allow `JSON` format in the SearXNG settings. Make sure Wolfram Alpha search engine is also enabled.
2. Clone the repository:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
cd Perplexica
```
3. Install dependencies:
```bash
npm i
```
4. Build the application:
```bash
npm run build
```
5. Start the application:
```bash
npm run start
```
6. Open your browser and navigate to http://localhost:3000 to complete the setup and configure your settings (API keys, models, SearxNG URL, etc.) in the setup screen.
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.

View File

@@ -1,35 +1,15 @@
services:
searxng:
image: docker.io/searxng/searxng:latest
volumes:
- ./searxng:/etc/searxng:rw
perplexica:
image: itzcrazykns1337/perplexica:latest
ports:
- 4000:8080
networks:
- perplexica-network
restart: unless-stopped
app:
image: itzcrazykns1337/perplexica:main
build:
context: .
dockerfile: app.dockerfile
environment:
- DOCKER=true
- SEARXNG_API_URL=http://searxng:8080
- DATA_DIR=/home/perplexica
ports:
- 3000:3000
networks:
- perplexica-network
- '3000:3000'
volumes:
- backend-dbstore:/home/perplexica/data
- data:/home/perplexica/data
- uploads:/home/perplexica/uploads
restart: unless-stopped
networks:
perplexica-network:
volumes:
backend-dbstore:
data:
name: 'perplexica-data'
uploads:
name: 'perplexica-uploads'

View File

@@ -4,11 +4,56 @@
Perplexicas Search API makes it easy to use our AI-powered search engine. You can run different types of searches, pick the models you want to use, and get the most recent info. Follow the following headings to learn more about Perplexica's search API.
## Endpoint
## Endpoints
### **POST** `http://localhost:3000/api/search`
### Get Available Providers and Models
**Note**: Replace `3000` with any other port if you've changed the default PORT
Before making search requests, you'll need to get the available providers and their models.
#### **GET** `/api/providers`
**Full URL**: `http://localhost:3000/api/providers`
Returns a list of all active providers with their available chat and embedding models.
**Response Example:**
```json
{
"providers": [
{
"id": "550e8400-e29b-41d4-a716-446655440000",
"name": "OpenAI",
"chatModels": [
{
"name": "GPT 4 Omni Mini",
"key": "gpt-4o-mini"
},
{
"name": "GPT 4 Omni",
"key": "gpt-4o"
}
],
"embeddingModels": [
{
"name": "Text Embedding 3 Large",
"key": "text-embedding-3-large"
}
]
}
]
}
```
Use the `id` field as the `providerId` and the `key` field from the models arrays when making search requests.
### Search Query
#### **POST** `/api/search`
**Full URL**: `http://localhost:3000/api/search`
**Note**: Replace `localhost:3000` with your Perplexica instance URL if running on a different host or port
### Request
@@ -19,12 +64,12 @@ The API accepts a JSON object in the request body, where you define the focus mo
```json
{
"chatModel": {
"provider": "openai",
"name": "gpt-4o-mini"
"providerId": "550e8400-e29b-41d4-a716-446655440000",
"key": "gpt-4o-mini"
},
"embeddingModel": {
"provider": "openai",
"name": "text-embedding-3-large"
"providerId": "550e8400-e29b-41d4-a716-446655440000",
"key": "text-embedding-3-large"
},
"optimizationMode": "speed",
"focusMode": "webSearch",
@@ -38,20 +83,19 @@ The API accepts a JSON object in the request body, where you define the focus mo
}
```
**Note**: The `providerId` must be a valid UUID obtained from the `/api/providers` endpoint. The example above uses a sample UUID for demonstration.
### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. To get available providers and models, send a GET request to `http://localhost:3000/api/providers`.
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `name`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
- Optional fields for custom OpenAI configuration:
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance.
- `providerId` (string): The UUID of the provider. You can get this from the `/api/providers` endpoint response.
- `key` (string): The model key/identifier (e.g., `gpt-4o-mini`, `llama3.1:latest`). Use the `key` value from the provider's `chatModels` array, not the display name.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3000/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. To get available providers and models, send a GET request to `http://localhost:3000/api/providers`.
- `provider`: The provider for the embedding model (e.g., `openai`).
- `name`: The specific embedding model (e.g., `text-embedding-3-large`).
- `providerId` (string): The UUID of the embedding provider. You can get this from the `/api/providers` endpoint response.
- `key` (string): The embedding model key (e.g., `text-embedding-3-large`, `nomic-embed-text`). Use the `key` value from the provider's `embeddingModels` array, not the display name.
- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:
@@ -108,7 +152,7 @@ The response from the API includes both the final message and the sources used t
#### Streaming Response (stream: true)
When streaming is enabled, the API returns a stream of newline-delimited JSON objects. Each line contains a complete, valid JSON object. The response has Content-Type: application/json.
When streaming is enabled, the API returns a stream of newline-delimited JSON objects using Server-Sent Events (SSE). Each line contains a complete, valid JSON object. The response has `Content-Type: text/event-stream`.
Example of streamed response objects:

View File

@@ -2,45 +2,80 @@
To update Perplexica to the latest version, follow these steps:
## For Docker users
## For Docker users (Using pre-built images)
1. Clone the latest version of Perplexica from GitHub:
Simply pull the latest image and restart your container:
```bash
docker pull itzcrazykns1337/perplexica:latest
docker stop perplexica
docker rm perplexica
docker run -d -p 3000:3000 -v perplexica-data:/home/perplexica/data -v perplexica-uploads:/home/perplexica/uploads --name perplexica itzcrazykns1337/perplexica:latest
```
For slim version:
```bash
docker pull itzcrazykns1337/perplexica:slim-latest
docker stop perplexica
docker rm perplexica
docker run -d -p 3000:3000 -e SEARXNG_API_URL=http://your-searxng-url:8080 -v perplexica-data:/home/perplexica/data -v perplexica-uploads:/home/perplexica/uploads --name perplexica itzcrazykns1337/perplexica:slim-latest
```
Once updated, go to http://localhost:3000 and verify the latest changes. Your settings are preserved automatically.
## For Docker users (Building from source)
1. Navigate to your Perplexica directory and pull the latest changes:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
cd Perplexica
git pull origin master
```
2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. Pull the latest images from the registry.
2. Rebuild the Docker image:
```bash
docker compose pull
docker build -t perplexica .
```
5. Update and recreate the containers.
3. Stop and remove the old container, then start the new one:
```bash
docker compose up -d
docker stop perplexica
docker rm perplexica
docker run -p 3000:3000 -p 8080:8080 --name perplexica perplexica
```
6. Once the command completes, go to http://localhost:3000 and verify the latest changes.
4. Once the command completes, go to http://localhost:3000 and verify the latest changes.
## For non-Docker users
1. Clone the latest version of Perplexica from GitHub:
1. Navigate to your Perplexica directory and pull the latest changes:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
cd Perplexica
git pull origin master
```
2. Navigate to the project directory.
2. Install any new dependencies:
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm run start`
```bash
npm i
```
3. Rebuild the application:
```bash
npm run build
```
4. Restart the application:
```bash
npm run start
```
5. Go to http://localhost:3000 and verify the latest changes. Your settings are preserved automatically.
---

View File

@@ -1,4 +1,32 @@
#!/bin/sh
set -e
echo "Starting SearXNG..."
sudo -H -u searxng bash -c "cd /usr/local/searxng/searxng-src && export SEARXNG_SETTINGS_PATH='/etc/searxng/settings.yml' && export FLASK_APP=searx/webapp.py && /usr/local/searxng/searx-pyenv/bin/python -m flask run --host=0.0.0.0 --port=8080" &
SEARXNG_PID=$!
echo "Waiting for SearXNG to be ready..."
sleep 5
COUNTER=0
MAX_TRIES=30
until curl -s http://localhost:8080 > /dev/null 2>&1; do
COUNTER=$((COUNTER+1))
if [ $COUNTER -ge $MAX_TRIES ]; then
echo "Warning: SearXNG health check timeout, but continuing..."
break
fi
sleep 1
done
if curl -s http://localhost:8080 > /dev/null 2>&1; then
echo "SearXNG started successfully (PID: $SEARXNG_PID)"
else
echo "SearXNG may not be fully ready, but continuing (PID: $SEARXNG_PID)"
fi
cd /home/perplexica
echo "Starting Perplexica..."
exec node server.js

View File

@@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.11.0-rc3",
"version": "1.11.2",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@@ -13,18 +13,19 @@
"dependencies": {
"@headlessui/react": "^2.2.0",
"@headlessui/tailwindcss": "^0.2.2",
"@huggingface/transformers": "^3.7.5",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.24",
"@langchain/community": "^0.3.49",
"@langchain/core": "^0.3.66",
"@langchain/google-genai": "^0.2.15",
"@langchain/groq": "^0.2.3",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.6.2",
"@langchain/textsplitters": "^0.1.0",
"@langchain/anthropic": "^1.0.1",
"@langchain/community": "^1.0.3",
"@langchain/core": "^1.0.5",
"@langchain/google-genai": "^1.0.1",
"@langchain/groq": "^1.0.1",
"@langchain/langgraph": "^1.0.1",
"@langchain/ollama": "^1.0.1",
"@langchain/openai": "^1.1.1",
"@langchain/textsplitters": "^1.0.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
"axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
@@ -33,7 +34,7 @@
"framer-motion": "^12.23.24",
"html-to-text": "^9.0.5",
"jspdf": "^3.0.1",
"langchain": "^0.3.30",
"langchain": "^1.0.4",
"lucide-react": "^0.363.0",
"mammoth": "^1.9.1",
"markdown-to-jsx": "^7.7.2",
@@ -48,13 +49,13 @@
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4"
"zod": "^4.1.12"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/html-to-text": "^9.0.4",
"@types/jspdf": "^2.0.0",
"@types/node": "^20",
"@types/node": "^24.8.1",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
@@ -65,7 +66,6 @@
"postcss": "^8",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"ts-node": "^10.9.2",
"typescript": "^5"
"typescript": "^5.9.3"
}
}

View File

@@ -97,7 +97,7 @@ const handleEmitterEvents = async (
encoder: TextEncoder,
chatId: string,
) => {
let recievedMessage = '';
let receivedMessage = '';
const aiMessageId = crypto.randomBytes(7).toString('hex');
stream.on('data', (data) => {
@@ -113,7 +113,7 @@ const handleEmitterEvents = async (
),
);
recievedMessage += parsedData.data;
receivedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
writer.write(
encoder.encode(
@@ -150,7 +150,7 @@ const handleEmitterEvents = async (
db.insert(messagesSchema)
.values({
content: recievedMessage,
content: receivedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',

View File

@@ -1,4 +1,4 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import searchImages from '@/lib/agents/media/image';
import ModelRegistry from '@/lib/models/registry';
import { ModelWithProvider } from '@/lib/models/types';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
@@ -13,6 +13,13 @@ export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
@@ -23,16 +30,9 @@ export const POST = async (req: Request) => {
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const images = await handleImageSearch(
const images = await searchImages(
{
chat_history: chatHistory,
chatHistory: chatHistory,
query: body.query,
},
llm,

View File

@@ -30,12 +30,6 @@ export const POST = async (req: Request) => {
body.optimizationMode = body.optimizationMode || 'balanced';
body.stream = body.stream || false;
const history: BaseMessage[] = body.history.map((msg) => {
return msg[0] === 'human'
? new HumanMessage({ content: msg[1] })
: new AIMessage({ content: msg[1] });
});
const registry = new ModelRegistry();
const [llm, embeddings] = await Promise.all([
@@ -46,6 +40,12 @@ export const POST = async (req: Request) => {
),
]);
const history: BaseMessage[] = body.history.map((msg) => {
return msg[0] === 'human'
? new HumanMessage({ content: msg[1] })
: new AIMessage({ content: msg[1] });
});
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {
@@ -128,7 +128,7 @@ export const POST = async (req: Request) => {
try {
controller.close();
} catch (error) {}
} catch (error) { }
});
emitter.on('data', (data: string) => {

View File

@@ -1,7 +1,6 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import generateSuggestions from '@/lib/agents/suggestions';
import ModelRegistry from '@/lib/models/registry';
import { ModelWithProvider } from '@/lib/models/types';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
interface SuggestionsGenerationBody {
@@ -13,6 +12,13 @@ export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
@@ -23,16 +29,9 @@ export const POST = async (req: Request) => {
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
chatHistory,
},
llm,
);

View File

@@ -5,7 +5,7 @@ import crypto from 'crypto';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
import { Document } from '@langchain/core/documents';
import ModelRegistry from '@/lib/models/registry';
interface FileRes {

View File

@@ -1,4 +1,4 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import handleVideoSearch from '@/lib/agents/media/video';
import ModelRegistry from '@/lib/models/registry';
import { ModelWithProvider } from '@/lib/models/types';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
@@ -13,6 +13,13 @@ export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
@@ -23,16 +30,9 @@ export const POST = async (req: Request) => {
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
chatHistory: chatHistory,
query: body.query,
},
llm,

View File

@@ -1,17 +1,10 @@
'use client';
import ChatWindow from '@/components/ChatWindow';
import { useParams } from 'next/navigation';
import React from 'react';
import { ChatProvider } from '@/lib/hooks/useChat';
const Page = () => {
const { chatId }: { chatId: string } = useParams();
return (
<ChatProvider id={chatId}>
<ChatWindow />
</ChatProvider>
);
return <ChatWindow />;
};
export default Page;

View File

@@ -9,6 +9,7 @@ import { Toaster } from 'sonner';
import ThemeProvider from '@/components/theme/Provider';
import configManager from '@/lib/config';
import SetupWizard from '@/components/Setup/SetupWizard';
import { ChatProvider } from '@/lib/hooks/useChat';
const montserrat = Montserrat({
weight: ['300', '400', '500', '700'],
@@ -36,7 +37,7 @@ export default function RootLayout({
<body className={cn('h-full', montserrat.className)}>
<ThemeProvider>
{setupComplete ? (
<>
<ChatProvider>
<Sidebar>{children}</Sidebar>
<Toaster
toastOptions={{
@@ -47,7 +48,7 @@ export default function RootLayout({
},
}}
/>
</>
</ChatProvider>
) : (
<SetupWizard configSections={configSections} />
)}

View File

@@ -1,7 +1,5 @@
import ChatWindow from '@/components/ChatWindow';
import { ChatProvider } from '@/lib/hooks/useChat';
import { Metadata } from 'next';
import { Suspense } from 'react';
export const metadata: Metadata = {
title: 'Chat - Perplexica',
@@ -9,15 +7,7 @@ export const metadata: Metadata = {
};
const Home = () => {
return (
<div>
<Suspense>
<ChatProvider>
<ChatWindow />
</ChatProvider>
</Suspense>
</div>
);
return <ChatWindow />;
};
export default Home;

View File

@@ -16,7 +16,7 @@ const Chat = () => {
useEffect(() => {
const updateDividerWidth = () => {
if (dividerRef.current) {
setDividerWidth(dividerRef.current.scrollWidth);
setDividerWidth(dividerRef.current.offsetWidth);
}
};
@@ -31,13 +31,22 @@ const Chat = () => {
useEffect(() => {
const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
messageEnd.current?.scrollIntoView({ behavior: 'auto' });
};
if (chatTurns.length === 1) {
document.title = `${chatTurns[0].content.substring(0, 30)} - Perplexica`;
}
const messageEndBottom =
messageEnd.current?.getBoundingClientRect().bottom ?? 0;
const distanceFromMessageEnd = window.innerHeight - messageEndBottom;
if (distanceFromMessageEnd >= -100) {
scroll();
}
if (chatTurns[chatTurns.length - 1]?.role === 'user') {
scroll();
}

View File

@@ -8,7 +8,7 @@ import { Settings } from 'lucide-react';
import Link from 'next/link';
import NextError from 'next/error';
import { useChat } from '@/lib/hooks/useChat';
import Loader from './ui/Loader';
import SettingsButtonMobile from './Settings/SettingsButtonMobile';
export interface BaseMessage {
chatId: string;
@@ -51,14 +51,12 @@ export interface File {
}
const ChatWindow = () => {
const { hasError, isReady, notFound, messages } = useChat();
const { hasError, notFound, messages } = useChat();
if (hasError) {
return (
<div className="relative">
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
<Link href="/settings">
<Settings className="cursor-pointer lg:hidden" />
</Link>
<SettingsButtonMobile />
</div>
<div className="flex flex-col items-center justify-center min-h-screen">
<p className="dark:text-white/70 text-black/70 text-sm">
@@ -69,24 +67,18 @@ const ChatWindow = () => {
);
}
return isReady ? (
notFound ? (
<NextError statusCode={404} />
) : (
<div>
{messages.length > 0 ? (
<>
<Navbar />
<Chat />
</>
) : (
<EmptyChat />
)}
</div>
)
return notFound ? (
<NextError statusCode={404} />
) : (
<div className="flex flex-row items-center justify-center min-h-screen">
<Loader />
<div>
{messages.length > 0 ? (
<>
<Navbar />
<Chat />
</>
) : (
<EmptyChat />
)}
</div>
);
};

View File

@@ -4,14 +4,13 @@ import { File } from './ChatWindow';
import Link from 'next/link';
import WeatherWidget from './WeatherWidget';
import NewsArticleWidget from './NewsArticleWidget';
import SettingsButtonMobile from '@/components/Settings/SettingsButtonMobile';
const EmptyChat = () => {
return (
<div className="relative">
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
<Link href="/settings">
<Settings className="cursor-pointer lg:hidden" />
</Link>
<SettingsButtonMobile />
</div>
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-4">
<div className="flex flex-col items-center justify-center w-full space-y-8">

View File

@@ -5,8 +5,7 @@ import Focus from './MessageInputActions/Focus';
import Optimization from './MessageInputActions/Optimization';
import Attach from './MessageInputActions/Attach';
import { useChat } from '@/lib/hooks/useChat';
import AttachSmall from './MessageInputActions/AttachSmall';
import ModelSelector from './MessageInputActions/ModelSelector';
import ModelSelector from './MessageInputActions/ChatModelSelector';
const EmptyChatMessageInput = () => {
const { sendMessage } = useChat();

View File

@@ -20,9 +20,9 @@ const Copy = ({
setCopied(true);
setTimeout(() => setCopied(false), 1000);
}}
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
className="p-2 text-black/70 dark:text-white/70 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
>
{copied ? <Check size={18} /> : <ClipboardList size={18} />}
{copied ? <Check size={16} /> : <ClipboardList size={16} />}
</button>
);
};

View File

@@ -1,4 +1,4 @@
import { ArrowLeftRight } from 'lucide-react';
import { ArrowLeftRight, Repeat } from 'lucide-react';
const Rewrite = ({
rewrite,
@@ -10,12 +10,11 @@ const Rewrite = ({
return (
<button
onClick={() => rewrite(messageId)}
className="py-2 px-3 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white flex flex-row items-center space-x-1"
className="p-2 text-black/70 dark:text-white/70 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white flex flex-row items-center space-x-1"
>
<ArrowLeftRight size={18} />
<p className="text-xs font-medium">Rewrite</p>
<Repeat size={16} />
</button>
);
};
1;
export default Rewrite;

View File

@@ -10,6 +10,7 @@ import {
StopCircle,
Layers3,
Plus,
CornerDownRight,
} from 'lucide-react';
import Markdown, { MarkdownToJSX } from 'markdown-to-jsx';
import Copy from './MessageActions/Copy';
@@ -122,14 +123,14 @@ const MessageBox = ({
</Markdown>
{loading && isLast ? null : (
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
<div className="flex flex-row items-center space-x-1">
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4">
<div className="flex flex-row items-center -ml-2">
<Rewrite
rewrite={rewrite}
messageId={section.assistantMessage.messageId}
/>
</div>
<div className="flex flex-row items-center space-x-1">
<div className="flex flex-row items-center -mr-2">
<Copy
initialMessage={section.assistantMessage.content}
section={section}
@@ -142,12 +143,12 @@ const MessageBox = ({
start();
}
}}
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
className="p-2 text-black/70 dark:text-white/70 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
>
{speechStatus === 'started' ? (
<StopCircle size={18} />
<StopCircle size={16} />
) : (
<Volume2 size={18} />
<Volume2 size={16} />
)}
</button>
</div>
@@ -159,7 +160,7 @@ const MessageBox = ({
section.suggestions.length > 0 &&
section.assistantMessage &&
!loading && (
<div className="mt-8 pt-6 border-t border-light-200/50 dark:border-dark-200/50">
<div className="mt-6">
<div className="flex flex-row items-center space-x-2 mb-4">
<Layers3
className="text-black dark:text-white"
@@ -173,20 +174,24 @@ const MessageBox = ({
{section.suggestions.map(
(suggestion: string, i: number) => (
<div key={i}>
{i > 0 && (
<div className="h-px bg-light-200/40 dark:bg-dark-200/40 mx-3" />
)}
<div className="h-px bg-light-200/40 dark:bg-dark-200/40" />
<button
onClick={() => sendMessage(suggestion)}
className="group w-full px-3 py-4 text-left transition-colors duration-200"
className="group w-full py-4 text-left transition-colors duration-200"
>
<div className="flex items-center justify-between gap-3">
<p className="text-sm text-black/70 dark:text-white/70 group-hover:text-[#24A0ED] transition-colors duration-200 leading-relaxed">
{suggestion}
</p>
<div className="flex flex-row space-x-3 items-center ">
<CornerDownRight
size={17}
className="group-hover:text-sky-400 transition-colors duration-200"
/>
<p className="text-sm text-black/70 dark:text-white/70 group-hover:text-sky-400 transition-colors duration-200 leading-relaxed">
{suggestion}
</p>
</div>
<Plus
size={16}
className="text-black/40 dark:text-white/40 group-hover:text-[#24A0ED] transition-colors duration-200 flex-shrink-0"
className="text-black/40 dark:text-white/40 group-hover:text-sky-400 transition-colors duration-200 flex-shrink-0"
/>
</div>
</button>
@@ -205,11 +210,11 @@ const MessageBox = ({
<div className="lg:sticky lg:top-20 flex flex-col items-center space-y-3 w-full lg:w-3/12 z-30 h-full pb-4">
<SearchImages
query={section.userMessage.content}
chatHistory={chatTurns.slice(0, sectionIndex * 2)}
chatHistory={chatTurns}
messageId={section.assistantMessage.messageId}
/>
<SearchVideos
chatHistory={chatTurns.slice(0, sectionIndex * 2)}
chatHistory={chatTurns}
query={section.userMessage.content}
messageId={section.assistantMessage.messageId}
/>

View File

@@ -8,17 +8,16 @@ import {
PopoverPanel,
Transition,
} from '@headlessui/react';
import { Fragment, useEffect, useState } from 'react';
import { Fragment, useEffect, useMemo, useState } from 'react';
import { MinimalProvider } from '@/lib/models/types';
import { useChat } from '@/lib/hooks/useChat';
const ModelSelector = () => {
const [providers, setProviders] = useState<MinimalProvider[]>([]);
const [isLoading, setIsLoading] = useState(true);
const [searchQuery, setSearchQuery] = useState('');
const [selectedModel, setSelectedModel] = useState<{
providerId: string;
modelKey: string;
} | null>(null);
const { setChatModelProvider, chatModelProvider } = useChat();
useEffect(() => {
const loadProviders = async () => {
@@ -30,28 +29,8 @@ const ModelSelector = () => {
throw new Error('Failed to fetch providers');
}
const data = await res.json();
setProviders(data.providers || []);
const savedProviderId = localStorage.getItem('chatModelProviderId');
const savedModelKey = localStorage.getItem('chatModelKey');
if (savedProviderId && savedModelKey) {
setSelectedModel({
providerId: savedProviderId,
modelKey: savedModelKey,
});
} else if (data.providers && data.providers.length > 0) {
const firstProvider = data.providers.find(
(p: MinimalProvider) => p.chatModels.length > 0,
);
if (firstProvider && firstProvider.chatModels[0]) {
setSelectedModel({
providerId: firstProvider.id,
modelKey: firstProvider.chatModels[0].key,
});
}
}
const data: { providers: MinimalProvider[] } = await res.json();
setProviders(data.providers);
} catch (error) {
console.error('Error loading providers:', error);
} finally {
@@ -62,13 +41,32 @@ const ModelSelector = () => {
loadProviders();
}, []);
const orderedProviders = useMemo(() => {
if (!chatModelProvider?.providerId) return providers;
const currentProviderIndex = providers.findIndex(
(p) => p.id === chatModelProvider.providerId,
);
if (currentProviderIndex === -1) {
return providers;
}
const selectedProvider = providers[currentProviderIndex];
const remainingProviders = providers.filter(
(_, index) => index !== currentProviderIndex,
);
return [selectedProvider, ...remainingProviders];
}, [providers, chatModelProvider]);
const handleModelSelect = (providerId: string, modelKey: string) => {
setSelectedModel({ providerId, modelKey });
setChatModelProvider({ providerId, key: modelKey });
localStorage.setItem('chatModelProviderId', providerId);
localStorage.setItem('chatModelKey', modelKey);
};
const filteredProviders = providers
const filteredProviders = orderedProviders
.map((provider) => ({
...provider,
chatModels: provider.chatModels.filter(
@@ -97,7 +95,7 @@ const ModelSelector = () => {
leaveTo="opacity-0 translate-y-1"
>
<PopoverPanel className="absolute z-10 w-[230px] sm:w-[270px] md:w-[300px] -right-4">
<div className="bg-light-primary dark:bg-dark-primary border rounded-lg border-light-200 dark:border-dark-200 w-full flex flex-col shadow-lg overflow-hidden">
<div className="bg-light-primary dark:bg-dark-primary max-h-[300px] sm:max-w-none border rounded-lg border-light-200 dark:border-dark-200 w-full flex flex-col shadow-lg overflow-hidden">
<div className="p-4 border-b border-light-200 dark:border-dark-200">
<div className="relative">
<Search
@@ -109,7 +107,7 @@ const ModelSelector = () => {
placeholder="Search models..."
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="w-full pl-9 pr-3 py-2 bg-light-secondary dark:bg-dark-secondary rounded-lg text-xs text-black dark:text-white placeholder:text-black/40 dark:placeholder:text-white/40 focus:outline-none focus:ring-2 focus:ring-sky-500/20 border border-transparent focus:border-sky-500/30 transition duration-200"
className="w-full pl-9 pr-3 py-2 bg-light-secondary dark:bg-dark-secondary rounded-lg placeholder:text-sm text-sm text-black dark:text-white placeholder:text-black/40 dark:placeholder:text-white/40 focus:outline-none focus:ring-2 focus:ring-sky-500/20 border border-transparent focus:border-sky-500/30 transition duration-200"
/>
</div>
</div>
@@ -140,15 +138,16 @@ const ModelSelector = () => {
<div className="flex flex-col px-2 py-2 space-y-0.5">
{provider.chatModels.map((model) => (
<PopoverButton
<button
key={model.key}
onClick={() =>
handleModelSelect(provider.id, model.key)
}
type="button"
className={cn(
'px-3 py-2 flex items-center justify-between text-start duration-200 cursor-pointer transition rounded-lg group',
selectedModel?.providerId === provider.id &&
selectedModel?.modelKey === model.key
chatModelProvider?.providerId === provider.id &&
chatModelProvider?.key === model.key
? 'bg-light-secondary dark:bg-dark-secondary'
: 'hover:bg-light-secondary dark:hover:bg-dark-secondary',
)}
@@ -158,8 +157,9 @@ const ModelSelector = () => {
size={15}
className={cn(
'shrink-0',
selectedModel?.providerId === provider.id &&
selectedModel?.modelKey === model.key
chatModelProvider?.providerId ===
provider.id &&
chatModelProvider?.key === model.key
? 'text-sky-500'
: 'text-black/50 dark:text-white/50 group-hover:text-black/70 group-hover:dark:text-white/70',
)}
@@ -167,8 +167,9 @@ const ModelSelector = () => {
<p
className={cn(
'text-sm truncate',
selectedModel?.providerId === provider.id &&
selectedModel?.modelKey === model.key
chatModelProvider?.providerId ===
provider.id &&
chatModelProvider?.key === model.key
? 'text-sky-500 font-medium'
: 'text-black/70 dark:text-white/70 group-hover:text-black dark:group-hover:text-white',
)}
@@ -176,7 +177,7 @@ const ModelSelector = () => {
{model.name}
</p>
</div>
</PopoverButton>
</button>
))}
</div>

View File

@@ -97,7 +97,7 @@ const AddModel = ({
>
<DialogPanel className="w-full mx-4 lg:w-[600px] max-h-[85vh] flex flex-col border bg-light-primary dark:bg-dark-primary border-light-secondary dark:border-dark-secondary rounded-lg">
<div className="px-6 pt-6 pb-4">
<h3 className="text-black/90 dark:text-white/90 font-medium">
<h3 className="text-black/90 dark:text-white/90 font-medium text-sm">
Add new {type === 'chat' ? 'chat' : 'embedding'} model
</h3>
</div>
@@ -115,7 +115,7 @@ const AddModel = ({
<input
value={modelName}
onChange={(e) => setModelName(e.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder="e.g., GPT-4"
type="text"
required
@@ -128,7 +128,7 @@ const AddModel = ({
<input
value={modelKey}
onChange={(e) => setModelKey(e.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder="e.g., gpt-4"
type="text"
required
@@ -140,7 +140,7 @@ const AddModel = ({
<button
type="submit"
disabled={loading}
className="px-4 py-2 rounded-lg text-sm bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
className="px-4 py-2 rounded-lg text-[13px] bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
>
{loading ? (
<Loader2 className="animate-spin" size={16} />

View File

@@ -82,10 +82,10 @@ const AddProvider = ({
setProviders((prev) => [...prev, data]);
toast.success('Provider added successfully.');
toast.success('Connection added successfully.');
} catch (error) {
console.error('Error adding provider:', error);
toast.error('Failed to add provider.');
toast.error('Failed to add connection.');
} finally {
setLoading(false);
setOpen(false);
@@ -96,10 +96,10 @@ const AddProvider = ({
<>
<button
onClick={() => setOpen(true)}
className="px-3 md:px-4 py-1.5 md:py-2 rounded-lg text-xs sm:text-sm border border-light-200 dark:border-dark-200 text-black dark:text-white bg-light-secondary/50 dark:bg-dark-secondary/50 hover:bg-light-secondary hover:dark:bg-dark-secondary hover:border-light-300 hover:dark:border-dark-300 flex flex-row items-center space-x-1 active:scale-95 transition duration-200"
className="px-3 md:px-4 py-1.5 md:py-2 rounded-lg text-xs sm:text-xs border border-light-200 dark:border-dark-200 text-black dark:text-white bg-light-secondary/50 dark:bg-dark-secondary/50 hover:bg-light-secondary hover:dark:bg-dark-secondary hover:border-light-300 hover:dark:border-dark-300 flex flex-row items-center space-x-1 active:scale-95 transition duration-200"
>
<Plus className="w-3.5 h-3.5 md:w-4 md:h-4" />
<span>Add Provider</span>
<span>Add Connection</span>
</button>
<AnimatePresence>
{open && (
@@ -119,8 +119,8 @@ const AddProvider = ({
<DialogPanel className="w-full mx-4 lg:w-[600px] max-h-[85vh] flex flex-col border bg-light-primary dark:bg-dark-primary border-light-secondary dark:border-dark-secondary rounded-lg">
<form onSubmit={handleSubmit} className="flex flex-col flex-1">
<div className="px-6 pt-6 pb-4">
<h3 className="text-black/90 dark:text-white/90 font-medium">
Add new provider
<h3 className="text-black/90 dark:text-white/90 font-medium text-sm">
Add new connection
</h3>
</div>
<div className="border-t border-light-200 dark:border-dark-200" />
@@ -128,7 +128,7 @@ const AddProvider = ({
<div className="flex flex-col space-y-4">
<div className="flex flex-col items-start space-y-2">
<label className="text-xs text-black/70 dark:text-white/70">
Select provider type
Select connection type
</label>
<Select
value={selectedProvider ?? ''}
@@ -149,13 +149,13 @@ const AddProvider = ({
className="flex flex-col items-start space-y-2"
>
<label className="text-xs text-black/70 dark:text-white/70">
Name*
Connection Name*
</label>
<input
value={name}
onChange={(e) => setName(e.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={'Provider Name'}
placeholder={'e.g., My OpenAI Connection'}
type="text"
required={true}
/>
@@ -178,7 +178,7 @@ const AddProvider = ({
[field.key]: event.target.value,
}))
}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={
(field as StringUIConfigField).placeholder
}
@@ -194,12 +194,12 @@ const AddProvider = ({
<button
type="submit"
disabled={loading}
className="px-4 py-2 rounded-lg text-sm bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
className="px-4 py-2 rounded-lg text-[13px] bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
>
{loading ? (
<Loader2 className="animate-spin" size={16} />
) : (
'Add Provider'
'Add Connection'
)}
</button>
</div>

View File

@@ -34,10 +34,10 @@ const DeleteProvider = ({
return prev.filter((p) => p.id !== modelProvider.id);
});
toast.success('Provider deleted successfully.');
toast.success('Connection deleted successfully.');
} catch (error) {
console.error('Error deleting provider:', error);
toast.error('Failed to delete provider.');
toast.error('Failed to delete connection.');
} finally {
setLoading(false);
}
@@ -51,7 +51,7 @@ const DeleteProvider = ({
setOpen(true);
}}
className="group p-1.5 rounded-md hover:bg-light-200 hover:dark:bg-dark-200 transition-colors group"
title="Delete provider"
title="Delete connection"
>
<Trash2
size={14}
@@ -76,14 +76,15 @@ const DeleteProvider = ({
<DialogPanel className="w-full mx-4 lg:w-[600px] max-h-[85vh] flex flex-col border bg-light-primary dark:bg-dark-primary border-light-secondary dark:border-dark-secondary rounded-lg">
<div className="px-6 pt-6 pb-4">
<h3 className="text-black/90 dark:text-white/90 font-medium">
Delete provider
Delete connection
</h3>
</div>
<div className="border-t border-light-200 dark:border-dark-200" />
<div className="flex-1 overflow-y-auto px-6 py-4">
<p className="text-SM text-black/60 dark:text-white/60">
Are you sure you want to delete the provider &quot;
<p className="text-sm text-black/60 dark:text-white/60">
Are you sure you want to delete the connection &quot;
{modelProvider.name}&quot;? This action cannot be undone.
All associated models will also be removed.
</p>
</div>
<div className="px-6 py-6 flex justify-end space-x-2">

View File

@@ -1,7 +1,7 @@
import { UIConfigField, ConfigModelProvider } from '@/lib/config/types';
import { cn } from '@/lib/utils';
import { AnimatePresence, motion } from 'framer-motion';
import { AlertCircle, ChevronDown, Pencil, Trash2, X } from 'lucide-react';
import { AlertCircle, Plug2, Plus, Pencil, Trash2, X } from 'lucide-react';
import { useState } from 'react';
import { toast } from 'sonner';
import AddModel from './AddModelDialog';
@@ -17,7 +17,7 @@ const ModelProvider = ({
fields: UIConfigField[];
setProviders: React.Dispatch<React.SetStateAction<ConfigModelProvider[]>>;
}) => {
const [open, setOpen] = useState(false);
const [open, setOpen] = useState(true);
const handleModelDelete = async (
type: 'chat' | 'embedding',
@@ -66,150 +66,157 @@ const ModelProvider = ({
}
};
const modelCount =
modelProvider.chatModels.filter((m) => m.key !== 'error').length +
modelProvider.embeddingModels.filter((m) => m.key !== 'error').length;
const hasError =
modelProvider.chatModels.some((m) => m.key === 'error') ||
modelProvider.embeddingModels.some((m) => m.key === 'error');
return (
<div
key={modelProvider.id}
className="border border-light-200 dark:border-dark-200 rounded-lg overflow-hidden"
className="border border-light-200 dark:border-dark-200 rounded-lg overflow-hidden bg-light-primary dark:bg-dark-primary"
>
<div
className={cn(
'group px-5 py-4 flex flex-row justify-between w-full cursor-pointer hover:bg-light-secondary hover:dark:bg-dark-secondary transition duration-200 items-center',
!open && 'rounded-lg',
)}
onClick={() => setOpen(!open)}
>
<p className="text-black dark:text-white font-medium">
{modelProvider.name}
</p>
<div className="flex items-center gap-4">
<div className="flex flex-row items-center">
<UpdateProvider
fields={fields}
modelProvider={modelProvider}
setProviders={setProviders}
/>
<DeleteProvider
modelProvider={modelProvider}
setProviders={setProviders}
/>
<div className="px-5 py-3.5 flex flex-row justify-between w-full items-center border-b border-light-200 dark:border-dark-200 bg-light-secondary/30 dark:bg-dark-secondary/30">
<div className="flex items-center gap-2.5">
<div className="p-1.5 rounded-md bg-sky-500/10 dark:bg-sky-500/10">
<Plug2 size={14} className="text-sky-500" />
</div>
<ChevronDown
size={16}
className={cn(
open ? 'rotate-180' : '',
'transition duration-200 text-black/70 dark:text-white/70 group-hover:text-sky-500',
<div className="flex flex-col">
<p className="text-sm lg:text-sm text-black dark:text-white font-medium">
{modelProvider.name}
</p>
{modelCount > 0 && (
<p className="text-[10px] lg:text-[11px] text-black/50 dark:text-white/50">
{modelCount} model{modelCount !== 1 ? 's' : ''} configured
</p>
)}
</div>
</div>
<div className="flex flex-row items-center gap-1">
<UpdateProvider
fields={fields}
modelProvider={modelProvider}
setProviders={setProviders}
/>
<DeleteProvider
modelProvider={modelProvider}
setProviders={setProviders}
/>
</div>
</div>
<AnimatePresence>
{open && (
<motion.div
initial={{ height: 0, opacity: 0 }}
animate={{ height: 'auto', opacity: 1 }}
exit={{ height: 0, opacity: 0 }}
transition={{ duration: 0.1 }}
>
<div className="border-t border-light-200 dark:border-dark-200" />
<div className="flex flex-col gap-y-4 px-5 py-4">
{modelProvider.chatModels.length > 0 && (
<div className="flex flex-col gap-y-2">
<div className="flex flex-row w-full justify-between items-center">
<p className="text-xs text-black/70 dark:text-white/70">
Chat models
</p>
<AddModel
providerId={modelProvider.id}
setProviders={setProviders}
type="chat"
/>
<div className="flex flex-col gap-y-4 px-5 py-4">
<div className="flex flex-col gap-y-2">
<div className="flex flex-row w-full justify-between items-center">
<p className="text-[11px] lg:text-[11px] font-medium text-black/70 dark:text-white/70 uppercase tracking-wide">
Chat Models
</p>
{!modelProvider.chatModels.some((m) => m.key === 'error') && (
<AddModel
providerId={modelProvider.id}
setProviders={setProviders}
type="chat"
/>
)}
</div>
<div className="flex flex-col gap-2">
{modelProvider.chatModels.some((m) => m.key === 'error') ? (
<div className="flex flex-row items-center gap-2 text-xs lg:text-xs text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<AlertCircle size={16} className="shrink-0" />
<span className="break-words">
{
modelProvider.chatModels.find((m) => m.key === 'error')
?.name
}
</span>
</div>
) : modelProvider.chatModels.filter((m) => m.key !== 'error')
.length === 0 && !hasError ? (
<div className="flex flex-col items-center justify-center py-4 px-4 rounded-lg border-2 border-dashed border-light-200 dark:border-dark-200 bg-light-secondary/20 dark:bg-dark-secondary/20">
<p className="text-xs text-black/50 dark:text-white/50 text-center">
No chat models configured
</p>
</div>
) : modelProvider.chatModels.filter((m) => m.key !== 'error')
.length > 0 ? (
<div className="flex flex-row flex-wrap gap-2">
{modelProvider.chatModels.map((model, index) => (
<div
key={`${modelProvider.id}-chat-${model.key}-${index}`}
className="flex flex-row items-center space-x-1.5 text-xs lg:text-xs text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5 border border-light-200 dark:border-dark-200"
>
<span>{model.name}</span>
<button
onClick={() => {
handleModelDelete('chat', model.key);
}}
className="hover:text-red-500 dark:hover:text-red-400 transition-colors"
>
<X size={12} />
</button>
</div>
<div className="flex flex-col gap-2">
{modelProvider.chatModels.some((m) => m.key === 'error') ? (
<div className="flex flex-row items-center gap-2 text-sm text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<AlertCircle size={16} className="shrink-0" />
<span className="break-words">
{
modelProvider.chatModels.find(
(m) => m.key === 'error',
)?.name
}
</span>
</div>
) : (
<div className="flex flex-row flex-wrap gap-2">
{modelProvider.chatModels.map((model, index) => (
<div
key={`${modelProvider.id}-chat-${model.key}-${index}`}
className="flex flex-row items-center space-x-1 text-sm text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5"
>
<span>{model.name}</span>
<button
onClick={() => {
handleModelDelete('chat', model.key);
}}
>
<X size={12} />
</button>
</div>
))}
</div>
)}
))}
</div>
) : null}
</div>
</div>
<div className="flex flex-col gap-y-2">
<div className="flex flex-row w-full justify-between items-center">
<p className="text-[11px] lg:text-[11px] font-medium text-black/70 dark:text-white/70 uppercase tracking-wide">
Embedding Models
</p>
{!modelProvider.embeddingModels.some((m) => m.key === 'error') && (
<AddModel
providerId={modelProvider.id}
setProviders={setProviders}
type="embedding"
/>
)}
</div>
<div className="flex flex-col gap-2">
{modelProvider.embeddingModels.some((m) => m.key === 'error') ? (
<div className="flex flex-row items-center gap-2 text-xs lg:text-xs text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<AlertCircle size={16} className="shrink-0" />
<span className="break-words">
{
modelProvider.embeddingModels.find((m) => m.key === 'error')
?.name
}
</span>
</div>
) : modelProvider.embeddingModels.filter((m) => m.key !== 'error')
.length === 0 && !hasError ? (
<div className="flex flex-col items-center justify-center py-4 px-4 rounded-lg border-2 border-dashed border-light-200 dark:border-dark-200 bg-light-secondary/20 dark:bg-dark-secondary/20">
<p className="text-xs text-black/50 dark:text-white/50 text-center">
No embedding models configured
</p>
</div>
) : modelProvider.embeddingModels.filter((m) => m.key !== 'error')
.length > 0 ? (
<div className="flex flex-row flex-wrap gap-2">
{modelProvider.embeddingModels.map((model, index) => (
<div
key={`${modelProvider.id}-embedding-${model.key}-${index}`}
className="flex flex-row items-center space-x-1.5 text-xs lg:text-xs text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5 border border-light-200 dark:border-dark-200"
>
<span>{model.name}</span>
<button
onClick={() => {
handleModelDelete('embedding', model.key);
}}
className="hover:text-red-500 dark:hover:text-red-400 transition-colors"
>
<X size={12} />
</button>
</div>
</div>
)}
{modelProvider.embeddingModels.length > 0 && (
<div className="flex flex-col gap-y-2">
<div className="flex flex-row w-full justify-between items-center">
<p className="text-xs text-black/70 dark:text-white/70">
Embedding models
</p>
<AddModel
providerId={modelProvider.id}
setProviders={setProviders}
type="embedding"
/>
</div>
<div className="flex flex-col gap-2">
{modelProvider.embeddingModels.some(
(m) => m.key === 'error',
) ? (
<div className="flex flex-row items-center gap-2 text-sm text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<AlertCircle size={16} className="shrink-0" />
<span className="break-words">
{
modelProvider.embeddingModels.find(
(m) => m.key === 'error',
)?.name
}
</span>
</div>
) : (
<div className="flex flex-row flex-wrap gap-2">
{modelProvider.embeddingModels.map((model, index) => (
<div
key={`${modelProvider.id}-embedding-${model.key}-${index}`}
className="flex flex-row items-center space-x-1 text-sm text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5"
>
<span>{model.name}</span>
<button
onClick={() => {
handleModelDelete('embedding', model.key);
}}
>
<X size={12} />
</button>
</div>
))}
</div>
)}
</div>
</div>
)}
</div>
</motion.div>
)}
</AnimatePresence>
))}
</div>
) : null}
</div>
</div>
</div>
</div>
);
};

View File

@@ -0,0 +1,98 @@
import Select from '@/components/ui/Select';
import { ConfigModelProvider } from '@/lib/config/types';
import { useChat } from '@/lib/hooks/useChat';
import { useState } from 'react';
import { toast } from 'sonner';
const ModelSelect = ({
providers,
type,
}: {
providers: ConfigModelProvider[];
type: 'chat' | 'embedding';
}) => {
const [selectedModel, setSelectedModel] = useState<string>(
type === 'chat'
? `${localStorage.getItem('chatModelProviderId')}/${localStorage.getItem('chatModelKey')}`
: `${localStorage.getItem('embeddingModelProviderId')}/${localStorage.getItem('embeddingModelKey')}`,
);
const [loading, setLoading] = useState(false);
const { setChatModelProvider, setEmbeddingModelProvider } = useChat();
const handleSave = async (newValue: string) => {
setLoading(true);
setSelectedModel(newValue);
try {
if (type === 'chat') {
const providerId = newValue.split('/')[0];
const modelKey = newValue.split('/').slice(1).join('/');
localStorage.setItem('chatModelProviderId', providerId);
localStorage.setItem('chatModelKey', modelKey);
setChatModelProvider({
providerId: providerId,
key: modelKey,
});
} else {
const providerId = newValue.split('/')[0];
const modelKey = newValue.split('/').slice(1).join('/');
localStorage.setItem('embeddingModelProviderId', providerId);
localStorage.setItem('embeddingModelKey', modelKey);
setEmbeddingModelProvider({
providerId: providerId,
key: modelKey,
});
}
} catch (error) {
console.error('Error saving config:', error);
toast.error('Failed to save configuration.');
} finally {
setLoading(false);
}
};
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-sm lg:text-sm text-black dark:text-white">
Select {type === 'chat' ? 'Chat Model' : 'Embedding Model'}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{type === 'chat'
? 'Choose which model to use for generating responses'
: 'Choose which model to use for generating embeddings'}
</p>
</div>
<Select
value={selectedModel}
onChange={(event) => handleSave(event.target.value)}
options={
type === 'chat'
? providers.flatMap((provider) =>
provider.chatModels.map((model) => ({
value: `${provider.id}/${model.key}`,
label: `${provider.name} - ${model.name}`,
})),
)
: providers.flatMap((provider) =>
provider.embeddingModels.map((model) => ({
value: `${provider.id}/${model.key}`,
label: `${provider.name} - ${model.name}`,
})),
)
}
className="!text-xs lg:!text-[13px]"
loading={loading}
disabled={loading}
/>
</div>
</section>
);
};
export default ModelSelect;

View File

@@ -6,6 +6,7 @@ import {
UIConfigField,
} from '@/lib/config/types';
import ModelProvider from './ModelProvider';
import ModelSelect from './ModelSelect';
const Models = ({
fields,
@@ -17,25 +18,71 @@ const Models = ({
const [providers, setProviders] = useState<ConfigModelProvider[]>(values);
return (
<div className="flex-1 space-y-6 overflow-y-auto px-6 py-6">
<div className="flex flex-row justify-between items-center">
<p className="text-sm text-black/70 dark:text-white/70">
Manage model provider
<div className="flex-1 space-y-6 overflow-y-auto py-6">
<div className="flex flex-col px-6 gap-y-4">
<h3 className="text-xs lg:text-xs text-black/70 dark:text-white/70">
Select models
</h3>
<ModelSelect
providers={values.filter((p) =>
p.chatModels.some((m) => m.key != 'error'),
)}
type="chat"
/>
<ModelSelect
providers={values.filter((p) =>
p.embeddingModels.some((m) => m.key != 'error'),
)}
type="embedding"
/>
</div>
<div className="border-t border-light-200 dark:border-dark-200" />
<div className="flex flex-row justify-between items-center px-6 ">
<p className="text-xs lg:text-xs text-black/70 dark:text-white/70">
Manage connections
</p>
<AddProvider modelProviders={fields} setProviders={setProviders} />
</div>
<div className="flex flex-col gap-y-4">
{providers.map((provider) => (
<ModelProvider
key={`provider-${provider.id}`}
fields={
(fields.find((f) => f.key === provider.type)?.fields ??
[]) as UIConfigField[]
}
modelProvider={provider}
setProviders={setProviders}
/>
))}
<div className="flex flex-col px-6 gap-y-4">
{providers.length === 0 ? (
<div className="flex flex-col items-center justify-center py-12 px-4 rounded-lg border-2 border-dashed border-light-200 dark:border-dark-200 bg-light-secondary/10 dark:bg-dark-secondary/10">
<div className="p-3 rounded-full bg-sky-500/10 dark:bg-sky-500/10 mb-3">
<svg
xmlns="http://www.w3.org/2000/svg"
className="w-8 h-8 text-sky-500"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth={2}
d="M13 10V3L4 14h7v7l9-11h-7z"
/>
</svg>
</div>
<p className="text-sm font-medium text-black/70 dark:text-white/70 mb-1">
No connections yet
</p>
<p className="text-xs text-black/50 dark:text-white/50 text-center max-w-sm mb-4">
Add your first connection to start using AI models. Connect to
OpenAI, Anthropic, Ollama, and more.
</p>
</div>
) : (
providers.map((provider) => (
<ModelProvider
key={`provider-${provider.id}`}
fields={
(fields.find((f) => f.key === provider.type)?.fields ??
[]) as UIConfigField[]
}
modelProvider={provider}
setProviders={setProviders}
/>
))
)}
</div>
</div>
);

View File

@@ -67,10 +67,10 @@ const UpdateProvider = ({
});
});
toast.success('Provider updated successfully.');
toast.success('Connection updated successfully.');
} catch (error) {
console.error('Error updating provider:', error);
toast.error('Failed to update provider.');
toast.error('Failed to update connection.');
} finally {
setLoading(false);
setOpen(false);
@@ -109,8 +109,8 @@ const UpdateProvider = ({
<DialogPanel className="w-full mx-4 lg:w-[600px] max-h-[85vh] flex flex-col border bg-light-primary dark:bg-dark-primary border-light-secondary dark:border-dark-secondary rounded-lg">
<form onSubmit={handleSubmit} className="flex flex-col flex-1">
<div className="px-6 pt-6 pb-4">
<h3 className="text-black/90 dark:text-white/90 font-medium">
Update provider
<h3 className="text-black/90 dark:text-white/90 font-medium text-sm">
Update connection
</h3>
</div>
<div className="border-t border-light-200 dark:border-dark-200" />
@@ -121,13 +121,13 @@ const UpdateProvider = ({
className="flex flex-col items-start space-y-2"
>
<label className="text-xs text-black/70 dark:text-white/70">
Name*
Connection Name*
</label>
<input
value={name}
onChange={(event) => setName(event.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={'Provider Name'}
placeholder={'Connection Name'}
type="text"
required={true}
/>
@@ -150,7 +150,7 @@ const UpdateProvider = ({
[field.key]: event.target.value,
}))
}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={
(field as StringUIConfigField).placeholder
}
@@ -166,12 +166,12 @@ const UpdateProvider = ({
<button
type="submit"
disabled={loading}
className="px-4 py-2 rounded-lg text-sm bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
className="px-4 py-2 rounded-lg text-[13px] bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
>
{loading ? (
<Loader2 className="animate-spin" size={16} />
) : (
'Update Provider'
'Update Connection'
)}
</button>
</div>

View File

@@ -0,0 +1,29 @@
import { UIConfigField } from '@/lib/config/types';
import SettingsField from '../SettingsField';
const Personalization = ({
fields,
values,
}: {
fields: UIConfigField[];
values: Record<string, any>;
}) => {
return (
<div className="flex-1 space-y-6 overflow-y-auto px-6 py-6">
{fields.map((field) => (
<SettingsField
key={field.key}
field={field}
value={
(field.scope === 'client'
? localStorage.getItem(field.key)
: values[field.key]) ?? field.default
}
dataAdd="personalization"
/>
))}
</div>
);
};
export default Personalization;

View File

@@ -1,7 +1,7 @@
import { UIConfigField } from '@/lib/config/types';
import SettingsField from '../SettingsField';
const General = ({
const Preferences = ({
fields,
values,
}: {
@@ -19,11 +19,11 @@ const General = ({
? localStorage.getItem(field.key)
: values[field.key]) ?? field.default
}
dataAdd="general"
dataAdd="preferences"
/>
))}
</div>
);
};
export default General;
export default Preferences;

View File

@@ -0,0 +1,21 @@
import { Settings } from 'lucide-react';
import { useState } from 'react';
import SettingsDialogue from './SettingsDialogue';
import { AnimatePresence } from 'framer-motion';
const SettingsButtonMobile = () => {
const [isOpen, setIsOpen] = useState<boolean>(false);
return (
<>
<button className="lg:hidden" onClick={() => setIsOpen(true)}>
<Settings size={18} />
</button>
<AnimatePresence>
{isOpen && <SettingsDialogue isOpen={isOpen} setIsOpen={setIsOpen} />}
</AnimatePresence>
</>
);
};
export default SettingsButtonMobile;

View File

@@ -1,6 +1,13 @@
import { Dialog, DialogPanel } from '@headlessui/react';
import { BrainCog, ChevronLeft, Search, Settings } from 'lucide-react';
import General from './Sections/General';
import {
ArrowLeft,
BrainCog,
ChevronLeft,
Search,
Sliders,
ToggleRight,
} from 'lucide-react';
import Preferences from './Sections/Preferences';
import { motion } from 'framer-motion';
import { useEffect, useState } from 'react';
import { toast } from 'sonner';
@@ -8,23 +15,36 @@ import Loader from '../ui/Loader';
import { cn } from '@/lib/utils';
import Models from './Sections/Models/Section';
import SearchSection from './Sections/Search';
import Select from '@/components/ui/Select';
import Personalization from './Sections/Personalization';
const sections = [
{
name: 'General',
description: 'Adjust common settings.',
icon: Settings,
component: General,
dataAdd: 'general',
key: 'preferences',
name: 'Preferences',
description: 'Customize your application preferences.',
icon: Sliders,
component: Preferences,
dataAdd: 'preferences',
},
{
key: 'personalization',
name: 'Personalization',
description: 'Customize the behavior and tone of the model.',
icon: ToggleRight,
component: Personalization,
dataAdd: 'personalization',
},
{
key: 'models',
name: 'Models',
description: 'Configure model settings.',
description: 'Connect to AI services and manage connections.',
icon: BrainCog,
component: Models,
dataAdd: 'modelProviders',
},
{
key: 'search',
name: 'Search',
description: 'Manage search settings.',
icon: Search,
@@ -42,7 +62,12 @@ const SettingsDialogue = ({
}) => {
const [isLoading, setIsLoading] = useState(true);
const [config, setConfig] = useState<any>(null);
const [activeSection, setActiveSection] = useState(sections[0]);
const [activeSection, setActiveSection] = useState<string>(sections[0].key);
const [selectedSection, setSelectedSection] = useState(sections[0]);
useEffect(() => {
setSelectedSection(sections.find((s) => s.key === activeSection)!);
}, [activeSection]);
useEffect(() => {
if (isOpen) {
@@ -83,14 +108,14 @@ const SettingsDialogue = ({
transition={{ duration: 0.1 }}
className="fixed inset-0 flex w-screen items-center justify-center p-4 bg-black/30 backdrop-blur-sm h-screen"
>
<DialogPanel className="space-y-4 border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary backdrop-blur-lg rounded-xl h-[calc(100vh-2%)] w-[calc(100vw-2%)] md:h-[calc(100vh-7%)] md:w-[calc(100vw-7%)] lg:h-[calc(100vh-20%)] lg:w-[calc(100vw-30%)]">
<DialogPanel className="space-y-4 border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary backdrop-blur-lg rounded-xl h-[calc(100vh-2%)] w-[calc(100vw-2%)] md:h-[calc(100vh-7%)] md:w-[calc(100vw-7%)] lg:h-[calc(100vh-20%)] lg:w-[calc(100vw-30%)] overflow-hidden flex flex-col">
{isLoading ? (
<div className="flex items-center justify-center h-full w-full">
<Loader />
</div>
) : (
<div className="flex flex-1 inset-0 h-full">
<div className="w-[240px] border-r border-white-200 dark:border-dark-200 h-full px-3 pt-3 flex flex-col">
<div className="flex flex-1 inset-0 h-full overflow-hidden">
<div className="hidden lg:flex flex-col w-[240px] border-r border-white-200 dark:border-dark-200 h-full px-3 pt-3 overflow-y-auto">
<button
onClick={() => setIsOpen(false)}
className="group flex flex-row items-center hover:bg-light-200 hover:dark:bg-dark-200 p-2 rounded-lg"
@@ -109,11 +134,11 @@ const SettingsDialogue = ({
key={section.dataAdd}
className={cn(
`flex flex-row items-center space-x-2 px-2 py-1.5 rounded-lg w-full text-sm hover:bg-light-200 hover:dark:bg-dark-200 transition duration-200 active:scale-95`,
activeSection.name === section.name
activeSection === section.key
? 'bg-light-200 dark:bg-dark-200 text-black/90 dark:text-white/90'
: ' text-black/70 dark:text-white/70',
)}
onClick={() => setActiveSection(section)}
onClick={() => setActiveSection(section.key)}
>
<section.icon size={17} />
<p>{section.name}</p>
@@ -121,23 +146,50 @@ const SettingsDialogue = ({
))}
</div>
</div>
<div className="w-full">
{activeSection.component && (
<div className="flex h-full flex-col">
<div className="border-b border-light-200/60 px-6 pb-6 pt-8 dark:border-dark-200/60">
<div className="w-full flex flex-col overflow-hidden">
<div className="flex flex-row lg:hidden w-full justify-between px-[20px] my-4 flex-shrink-0">
<button
onClick={() => setIsOpen(false)}
className="group flex flex-row items-center hover:bg-light-200 hover:dark:bg-dark-200 rounded-lg mr-[40%]"
>
<ArrowLeft
size={18}
className="text-black/50 dark:text-white/50 group-hover:text-black/70 group-hover:dark:text-white/70"
/>
</button>
<Select
options={sections.map((section) => {
return {
value: section.key,
key: section.key,
label: section.name,
};
})}
value={activeSection}
onChange={(e) => {
setActiveSection(e.target.value);
}}
className="!text-xs lg:!text-sm"
/>
</div>
{selectedSection.component && (
<div className="flex flex-1 flex-col overflow-hidden">
<div className="border-b border-light-200/60 px-6 pb-6 lg:pt-6 dark:border-dark-200/60 flex-shrink-0">
<div className="flex flex-col">
<h4 className="font-medium text-black dark:text-white">
{activeSection.name}
<h4 className="font-medium text-black dark:text-white text-sm lg:text-sm">
{selectedSection.name}
</h4>
<p className="text-xs text-black/50 dark:text-white/50">
{activeSection.description}
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{selectedSection.description}
</p>
</div>
</div>
<activeSection.component
fields={config.fields[activeSection.dataAdd]}
values={config.values[activeSection.dataAdd]}
/>
<div className="flex-1 overflow-y-auto">
<selectedSection.component
fields={config.fields[selectedSection.dataAdd]}
values={config.values[selectedSection.dataAdd]}
/>
</div>
</div>
)}
</div>

View File

@@ -1,6 +1,8 @@
import {
SelectUIConfigField,
StringUIConfigField,
SwitchUIConfigField,
TextareaUIConfigField,
UIConfigField,
} from '@/lib/config/types';
import { useState } from 'react';
@@ -8,6 +10,7 @@ import Select from '../ui/Select';
import { toast } from 'sonner';
import { useTheme } from 'next-themes';
import { Loader2 } from 'lucide-react';
import { Switch } from '@headlessui/react';
const SettingsSelect = ({
field,
@@ -58,11 +61,13 @@ const SettingsSelect = ({
};
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-5">
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-base text-black dark:text-white">{field.name}</h4>
<p className="text-xs text-black/50 dark:text-white/50">
<h4 className="text-sm lg:text-sm text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{field.description}
</p>
</div>
@@ -73,7 +78,7 @@ const SettingsSelect = ({
value: option.value,
label: option.name,
}))}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60 cursor-pointer capitalize pr-12"
className="!text-xs lg:!text-sm"
loading={loading}
disabled={loading}
/>
@@ -127,11 +132,13 @@ const SettingsInput = ({
};
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-5">
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-base text-black dark:text-white">{field.name}</h4>
<p className="text-xs text-black/50 dark:text-white/50">
<h4 className="text-sm lg:text-sm text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{field.description}
</p>
</div>
@@ -140,7 +147,7 @@ const SettingsInput = ({
value={value ?? field.default ?? ''}
onChange={(event) => setValue(event.target.value)}
onBlur={(event) => handleSave(event.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-3 py-2 lg:px-4 lg:py-3 pr-10 !text-xs lg:!text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={field.placeholder}
type="text"
disabled={loading}
@@ -156,6 +163,155 @@ const SettingsInput = ({
);
};
const SettingsTextarea = ({
field,
value,
setValue,
dataAdd,
}: {
field: TextareaUIConfigField;
value?: any;
setValue: (value: any) => void;
dataAdd: string;
}) => {
const [loading, setLoading] = useState(false);
const handleSave = async (newValue: any) => {
setLoading(true);
setValue(newValue);
try {
if (field.scope === 'client') {
localStorage.setItem(field.key, newValue);
} else {
const res = await fetch('/api/config', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
key: `${dataAdd}.${field.key}`,
value: newValue,
}),
});
if (!res.ok) {
console.error('Failed to save config:', await res.text());
throw new Error('Failed to save configuration');
}
}
} catch (error) {
console.error('Error saving config:', error);
toast.error('Failed to save configuration.');
} finally {
setTimeout(() => setLoading(false), 150);
}
};
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-sm lg:text-sm text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{field.description}
</p>
</div>
<div className="relative">
<textarea
value={value ?? field.default ?? ''}
onChange={(event) => setValue(event.target.value)}
onBlur={(event) => handleSave(event.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-3 py-2 lg:px-4 lg:py-3 pr-10 !text-xs lg:!text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={field.placeholder}
rows={4}
disabled={loading}
/>
{loading && (
<span className="pointer-events-none absolute right-3 translate-y-3 text-black/40 dark:text-white/40">
<Loader2 className="h-4 w-4 animate-spin" />
</span>
)}
</div>
</div>
</section>
);
};
const SettingsSwitch = ({
field,
value,
setValue,
dataAdd,
}: {
field: SwitchUIConfigField;
value?: any;
setValue: (value: any) => void;
dataAdd: string;
}) => {
const [loading, setLoading] = useState(false);
const handleSave = async (newValue: boolean) => {
setLoading(true);
setValue(newValue);
try {
if (field.scope === 'client') {
localStorage.setItem(field.key, String(newValue));
} else {
const res = await fetch('/api/config', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
key: `${dataAdd}.${field.key}`,
value: newValue,
}),
});
if (!res.ok) {
console.error('Failed to save config:', await res.text());
throw new Error('Failed to save configuration');
}
}
} catch (error) {
console.error('Error saving config:', error);
toast.error('Failed to save configuration.');
} finally {
setTimeout(() => setLoading(false), 150);
}
};
const isChecked = value === true || value === 'true';
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="flex flex-row items-center space-x-3 lg:space-x-5 w-full justify-between">
<div>
<h4 className="text-sm lg:text-sm text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{field.description}
</p>
</div>
<Switch
checked={isChecked}
onChange={handleSave}
disabled={loading}
className="group relative flex h-6 w-12 shrink-0 cursor-pointer rounded-full bg-white/10 p-1 duration-200 ease-in-out focus:outline-none transition-colors disabled:opacity-60 disabled:cursor-not-allowed data-[checked]:bg-sky-500"
>
<span
aria-hidden="true"
className="pointer-events-none inline-block size-4 translate-x-0 rounded-full bg-white shadow-lg ring-0 transition duration-200 ease-in-out group-data-[checked]:translate-x-6"
/>
</Switch>
</div>
</section>
);
};
const SettingsField = ({
field,
value,
@@ -186,6 +342,24 @@ const SettingsField = ({
dataAdd={dataAdd}
/>
);
case 'textarea':
return (
<SettingsTextarea
field={field}
value={val}
setValue={setVal}
dataAdd={dataAdd}
/>
);
case 'switch':
return (
<SettingsSwitch
field={field}
value={val}
setValue={setVal}
dataAdd={dataAdd}
/>
);
default:
return <div>Unsupported field type: {field.type}</div>;
}

View File

@@ -9,6 +9,7 @@ import { useEffect, useState } from 'react';
import { toast } from 'sonner';
import AddProvider from '../Settings/Sections/Models/AddProviderDialog';
import ModelProvider from '../Settings/Sections/Models/ModelProvider';
import ModelSelect from '@/components/Settings/Sections/Models/ModelSelect';
const SetupConfig = ({
configSections,
@@ -62,7 +63,11 @@ const SetupConfig = ({
}
};
const hasProviders = providers.length > 0;
const visibleProviders = providers.filter(
(p) => p.name.toLowerCase() !== 'transformers',
);
const hasProviders =
visibleProviders.filter((p) => p.chatModels.length > 0).length > 0;
return (
<div className="w-[95vw] md:w-[80vw] lg:w-[65vw] mx-auto px-2 sm:px-4 md:px-6 flex flex-col space-y-6">
@@ -80,10 +85,10 @@ const SetupConfig = ({
<div className="flex flex-row justify-between items-center mb-4 md:mb-6 pb-3 md:pb-4 border-b border-light-200 dark:border-dark-200">
<div>
<p className="text-xs sm:text-sm font-medium text-black dark:text-white">
Manage Providers
Manage Connections
</p>
<p className="text-[10px] sm:text-xs text-black/50 dark:text-white/50 mt-0.5">
Add and configure your model providers
Add connections to access AI models
</p>
</div>
<AddProvider
@@ -99,14 +104,17 @@ const SetupConfig = ({
Loading providers...
</p>
</div>
) : providers.length === 0 ? (
) : visibleProviders.length === 0 ? (
<div className="flex flex-col items-center justify-center py-8 md:py-12 text-center">
<p className="text-xs sm:text-sm font-medium text-black/70 dark:text-white/70">
No providers configured
No connections configured
</p>
<p className="text-[10px] sm:text-xs text-black/50 dark:text-white/50 mt-1">
Click &quot;Add Connection&quot; above to get started
</p>
</div>
) : (
providers.map((provider) => (
visibleProviders.map((provider) => (
<ModelProvider
key={`provider-${provider.id}`}
fields={
@@ -124,8 +132,57 @@ const SetupConfig = ({
</motion.div>
)}
{setupState === 3 && (
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{
opacity: 1,
y: 0,
transition: { duration: 0.5, delay: 0.1 },
}}
className="w-full h-[calc(95vh-80px)] bg-light-primary dark:bg-dark-primary border border-light-200 dark:border-dark-200 rounded-xl shadow-sm flex flex-col overflow-hidden"
>
<div className="flex-1 overflow-y-auto px-3 sm:px-4 md:px-6 py-4 md:py-6">
<div className="flex flex-row justify-between items-center mb-4 md:mb-6 pb-3 md:pb-4 border-b border-light-200 dark:border-dark-200">
<div>
<p className="text-xs sm:text-sm font-medium text-black dark:text-white">
Select models
</p>
<p className="text-[10px] sm:text-xs text-black/50 dark:text-white/50 mt-0.5">
Select models which you wish to use.
</p>
</div>
</div>
<div className="space-y-3 md:space-y-4">
<ModelSelect providers={providers} type="chat" />
<ModelSelect providers={providers} type="embedding" />
</div>
</div>
</motion.div>
)}
<div className="flex flex-row items-center justify-between pt-2">
<a></a>
{setupState === 2 && (
<motion.button
initial={{ opacity: 0, x: 10 }}
animate={{
opacity: 1,
x: 0,
transition: { duration: 0.5 },
}}
onClick={() => {
setSetupState(3);
}}
disabled={!hasProviders || isLoading}
className="flex flex-row items-center gap-1.5 md:gap-2 px-3 md:px-5 py-2 md:py-2.5 rounded-lg bg-[#24A0ED] text-white hover:bg-[#1e8fd1] active:scale-95 transition-all duration-200 font-medium text-xs sm:text-sm disabled:bg-light-200 dark:disabled:bg-dark-200 disabled:text-black/40 dark:disabled:text-white/40 disabled:cursor-not-allowed disabled:active:scale-100"
>
<span>Next</span>
<ArrowRight className="w-4 h-4 md:w-[18px] md:h-[18px]" />
</motion.button>
)}
{setupState === 3 && (
<motion.button
initial={{ opacity: 0, x: 10 }}
animate={{

View File

@@ -3,7 +3,7 @@ import { Loader2, ChevronDown } from 'lucide-react';
import { SelectHTMLAttributes, forwardRef } from 'react';
interface SelectProps extends SelectHTMLAttributes<HTMLSelectElement> {
options: { value: string; label: string; disabled?: boolean }[];
options: { value: any; label: string; disabled?: boolean }[];
loading?: boolean;
}
@@ -21,7 +21,7 @@ export const Select = forwardRef<HTMLSelectElement, SelectProps>(
ref={ref}
disabled={disabled || loading}
className={cn(
'bg-light-secondary dark:bg-dark-secondary px-3 py-2 flex items-center overflow-hidden border border-light-200 dark:border-dark-200 dark:text-white rounded-lg text-sm appearance-none w-full pr-10',
'bg-light-secondary dark:bg-dark-secondary px-3 py-2 flex items-center overflow-hidden border border-light-200 dark:border-dark-200 dark:text-white rounded-lg appearance-none w-full pr-10 text-xs lg:text-sm',
className,
)}
>

View File

@@ -0,0 +1,65 @@
/* I don't think can be classified as agents but to keep the structure consistent i guess ill keep it here */
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '@/lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '@/lib/outputParsers/lineOutputParser';
import { imageSearchFewShots, imageSearchPrompt } from '@/lib/prompts/media/image';
type ImageSearchChainInput = {
chatHistory: BaseMessage[];
query: string;
};
type ImageSearchResult = {
img_src: string;
url: string;
title: string;
}
const outputParser = new LineOutputParser({
key: 'query',
})
const searchImages = async (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const chatPrompt = await ChatPromptTemplate.fromMessages([
new SystemMessage(imageSearchPrompt),
...imageSearchFewShots,
new HumanMessage(`<conversation>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<follow_up>\n${input.query}\n</follow_up>`)
]).formatMessages({})
const res = await llm.invoke(chatPrompt)
const query = await outputParser.invoke(res)
const searchRes = await searchSearxng(query!, {
engines: ['bing images', 'google images'],
});
const images: ImageSearchResult[] = [];
searchRes.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
return images.slice(0, 10);
};
export default searchImages;

View File

@@ -0,0 +1,65 @@
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
import { searchSearxng } from '@/lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '@/lib/outputParsers/lineOutputParser';
import { videoSearchFewShots, videoSearchPrompt } from '@/lib/prompts/media/videos';
type VideoSearchChainInput = {
chatHistory: BaseMessage[];
query: string;
};
type VideoSearchResult = {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const outputParser = new LineOutputParser({
key: 'query',
});
const searchVideos = async (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const chatPrompt = await ChatPromptTemplate.fromMessages([
new SystemMessage(videoSearchPrompt),
...videoSearchFewShots,
new HumanMessage(`<conversation>${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<follow_up>\n${input.query}\n</follow_up>`)
]).formatMessages({})
const res = await llm.invoke(chatPrompt)
const query = await outputParser.invoke(res)
const searchRes = await searchSearxng(query!, {
engines: ['youtube'],
});
const videos: VideoSearchResult[] = [];
searchRes.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
};
export default searchVideos;

View File

@@ -0,0 +1,72 @@
import z from 'zod';
import { ClassifierInput, ClassifierOutput } from '../types';
import { WidgetRegistry } from '../widgets';
import { IntentRegistry } from './intents';
import { getClassifierPrompt } from '@/lib/prompts/search/classifier';
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
class Classifier {
async classify(input: ClassifierInput): Promise<ClassifierOutput> {
const availableIntents = IntentRegistry.getAvailableIntents({
sources: input.enabledSources,
});
const availableWidgets = WidgetRegistry.getAll();
const classificationSchema = z.object({
skipSearch: z
.boolean()
.describe(
'Set to true to SKIP search. Skip ONLY when: (1) widgets alone fully answer the query (e.g., weather, stocks, calculator), (2) simple greetings or writing tasks (NOT questions). Set to false for ANY question or information request.',
),
standaloneFollowUp: z
.string()
.describe(
'A self-contained, context-independent reformulation of the user\'s question. Must include all necessary context from chat history, replace pronouns with specific nouns, and be clear enough to answer without seeing the conversation. Keep the same complexity as the original question.',
),
intents: z
.array(z.enum(availableIntents.map((i) => i.name)))
.describe(
'The intent(s) that best describe how to fulfill the user\'s query. Can include multiple intents (e.g., [\'web_search\', \'widget_response\'] for \'weather in NYC and recent news\'). Always include at least one intent when applicable.',
),
widgets: z
.array(z.union(availableWidgets.map((w) => w.schema)))
.describe(
'Widgets that can display structured data to answer (fully or partially) the query. Include all applicable widgets regardless of skipSearch value.',
),
});
const classifierPrompt = getClassifierPrompt({
intentDesc: IntentRegistry.getDescriptions({
sources: input.enabledSources,
}),
widgetDesc: WidgetRegistry.getDescriptions(),
});
const res = await input.llm.generateObject<
z.infer<typeof classificationSchema>
>({
messages: [
{
role: 'system',
content: classifierPrompt,
},
{
role: 'user',
content: `<conversation>${formatChatHistoryAsString(input.chatHistory)}</conversation>\n\n<query>${input.query}</query>`,
},
],
schema: classificationSchema,
});
res.widgets = res.widgets.map((widgetConfig) => {
return {
type: widgetConfig.type,
params: widgetConfig,
};
});
return res as ClassifierOutput;
}
}
export default Classifier;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const academicSearchIntent: Intent = {
name: 'academic_search',
description:
'Use this intent to find scholarly articles, research papers, and academic resources when the user is seeking credible and authoritative information on a specific topic.',
requiresSearch: true,
enabled: (config) => config.sources.includes('academic'),
};
export default academicSearchIntent;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const discussionSearchIntent: Intent = {
name: 'discussion_search',
description:
'Use this intent to search through discussion forums, community boards, or social media platforms when the user is looking for opinions, experiences, or community-driven information on a specific topic.',
requiresSearch: true,
enabled: (config) => config.sources.includes('discussions'),
};
export default discussionSearchIntent;

View File

@@ -0,0 +1,14 @@
import academicSearchIntent from './academicSearch';
import discussionSearchIntent from './discussionSearch';
import IntentRegistry from './registry';
import webSearchIntent from './webSearch';
import widgetResponseIntent from './widgetResponse';
import writingTaskIntent from './writingTask';
IntentRegistry.register(webSearchIntent);
IntentRegistry.register(academicSearchIntent);
IntentRegistry.register(discussionSearchIntent);
IntentRegistry.register(widgetResponseIntent);
IntentRegistry.register(writingTaskIntent);
export { IntentRegistry };

View File

@@ -0,0 +1,29 @@
import { Intent, SearchAgentConfig, SearchSources } from '../../types';
class IntentRegistry {
private static intents = new Map<string, Intent>();
static register(intent: Intent) {
this.intents.set(intent.name, intent);
}
static get(name: string): Intent | undefined {
return this.intents.get(name);
}
static getAvailableIntents(config: { sources: SearchSources[] }): Intent[] {
return Array.from(
this.intents.values().filter((intent) => intent.enabled(config)),
);
}
static getDescriptions(config: { sources: SearchSources[] }): string {
const availableintnets = this.getAvailableIntents(config);
return availableintnets
.map((intent) => `${intent.name}: ${intent.description}`)
.join('\n\n');
}
}
export default IntentRegistry;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const webSearchIntent: Intent = {
name: 'web_search',
description:
'Use this intent to find current information from the web when the user is asking a question or needs up-to-date information that cannot be provided by widgets or other intents.',
requiresSearch: true,
enabled: (config) => config.sources.includes('web'),
};
export default webSearchIntent;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const widgetResponseIntent: Intent = {
name: 'widget_response',
description:
'Use this intent to respond to user queries using available widgets when the required information can be obtained from them.',
requiresSearch: false,
enabled: (config) => true,
};
export default widgetResponseIntent;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const writingTaskIntent: Intent = {
name: 'writing_task',
description:
'Use this intent to assist users with writing tasks such as drafting emails, creating documents, or generating content based on their instructions or greetings.',
requiresSearch: false,
enabled: (config) => true,
};
export default writingTaskIntent;

View File

@@ -0,0 +1,65 @@
import { EventEmitter } from 'stream';
import z from 'zod';
import BaseLLM from '../../models/base/llm';
import BaseEmbedding from '@/lib/models/base/embedding';
export type SearchSources = 'web' | 'discussions' | 'academic';
export type SearchAgentConfig = {
sources: SearchSources[];
llm: BaseLLM<any>;
embedding: BaseEmbedding<any>;
};
export type SearchAgentInput = {
chatHistory: Message[];
followUp: string;
config: SearchAgentConfig;
};
export interface Intent {
name: string;
description: string;
requiresSearch: boolean;
enabled: (config: { sources: SearchSources[] }) => boolean;
}
export type Widget<TSchema extends z.ZodObject<any> = z.ZodObject<any>> = {
name: string;
description: string;
schema: TSchema;
execute: (
params: z.infer<TSchema>,
additionalConfig: AdditionalConfig,
) => Promise<WidgetOutput>;
};
export type WidgetConfig = {
type: string;
params: Record<string, any>;
};
export type WidgetOutput = {
type: string;
data: any;
};
export type ClassifierInput = {
llm: BaseLLM<any>;
enabledSources: SearchSources[];
query: string;
chatHistory: Message[];
};
export type ClassifierOutput = {
skipSearch: boolean;
standaloneFollowUp: string;
intents: string[];
widgets: WidgetConfig[];
};
export type AdditionalConfig = {
llm: BaseLLM<any>;
embedding: BaseLLM<any>;
emitter: EventEmitter;
};

View File

@@ -0,0 +1,6 @@
import WidgetRegistry from './registry';
import weatherWidget from './weatherWidget';
WidgetRegistry.register(weatherWidget);
export { WidgetRegistry };

View File

@@ -0,0 +1,65 @@
import {
AdditionalConfig,
SearchAgentConfig,
Widget,
WidgetConfig,
WidgetOutput,
} from '../types';
class WidgetRegistry {
private static widgets = new Map<string, Widget>();
static register(widget: Widget<any>) {
this.widgets.set(widget.name, widget);
}
static get(name: string): Widget | undefined {
return this.widgets.get(name);
}
static getAll(): Widget[] {
return Array.from(this.widgets.values());
}
static getDescriptions(): string {
return Array.from(this.widgets.values())
.map((widget) => `${widget.name}: ${widget.description}`)
.join('\n\n');
}
static async execute(
name: string,
params: any,
config: AdditionalConfig,
): Promise<WidgetOutput> {
const widget = this.get(name);
if (!widget) {
throw new Error(`Widget with name ${name} not found`);
}
return widget.execute(params, config);
}
static async executeAll(
widgets: WidgetConfig[],
additionalConfig: AdditionalConfig,
): Promise<WidgetOutput[]> {
const results: WidgetOutput[] = [];
await Promise.all(
widgets.map(async (widgetConfig) => {
const output = await this.execute(
widgetConfig.type,
widgetConfig.params,
additionalConfig,
);
results.push(output);
}),
);
return results;
}
}
export default WidgetRegistry;

View File

@@ -0,0 +1,123 @@
import z from 'zod';
import { Widget } from '../types';
const WeatherWidgetSchema = z.object({
type: z.literal('weather'),
location: z
.string()
.describe(
'Human-readable location name (e.g., "New York, NY, USA", "London, UK"). Use this OR lat/lon coordinates, never both. Leave empty string if providing coordinates.',
),
lat: z
.number()
.describe(
'Latitude coordinate in decimal degrees (e.g., 40.7128). Only use when location name is empty.',
),
lon: z
.number()
.describe(
'Longitude coordinate in decimal degrees (e.g., -74.0060). Only use when location name is empty.',
),
});
const weatherWidget = {
name: 'weather',
description:
'Provides current weather information for a specified location. It can return details such as temperature, humidity, wind speed, and weather conditions. It needs either a location name or latitude/longitude coordinates to function.',
schema: WeatherWidgetSchema,
execute: async (params, _) => {
if (
params.location === '' &&
(params.lat === undefined || params.lon === undefined)
) {
throw new Error(
'Either location name or both latitude and longitude must be provided.',
);
}
if (params.location !== '') {
const openStreetMapUrl = `https://nominatim.openstreetmap.org/search?q=${encodeURIComponent(params.location)}&format=json&limit=1`;
const locationRes = await fetch(openStreetMapUrl, {
headers: {
'User-Agent': 'Perplexica',
'Content-Type': 'application/json',
},
});
const data = await locationRes.json();
const location = data[0];
if (!location) {
throw new Error(
`Could not find coordinates for location: ${params.location}`,
);
}
const weatherRes = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${location.lat}&longitude=${location.lon}&current_weather=true`,
{
headers: {
'User-Agent': 'Perplexica',
'Content-Type': 'application/json',
},
},
);
const weatherData = await weatherRes.json();
/* this is like a very simple implementation just to see the bacckend works, when we're working on the frontend, we'll return more data i guess? */
return {
type: 'weather',
data: {
location: params.location,
latitude: location.lat,
longitude: location.lon,
weather: weatherData.current_weather,
},
};
} else if (params.lat !== undefined && params.lon !== undefined) {
const [weatherRes, locationRes] = await Promise.all([
fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${params.lat}&longitude=${params.lon}&current_weather=true`,
{
headers: {
'User-Agent': 'Perplexica',
'Content-Type': 'application/json',
},
},
),
fetch(
`https://nominatim.openstreetmap.org/reverse?lat=${params.lat}&lon=${params.lon}&format=json`,
{
headers: {
'User-Agent': 'Perplexica',
'Content-Type': 'application/json',
},
},
),
]);
const weatherData = await weatherRes.json();
const locationData = await locationRes.json();
return {
type: 'weather',
data: {
location: locationData.display_name,
latitude: params.lat,
longitude: params.lon,
weather: weatherData.current_weather,
},
};
}
return {
type: 'weather',
data: null,
};
},
} satisfies Widget<typeof WeatherWidgetSchema>;
export default weatherWidget;

View File

@@ -0,0 +1,32 @@
import ListLineOutputParser from '@/lib/outputParsers/listLineOutputParser';
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { suggestionGeneratorPrompt } from '@/lib/prompts/suggestions';
type SuggestionGeneratorInput = {
chatHistory: BaseMessage[];
};
const outputParser = new ListLineOutputParser({
key: 'suggestions',
});
const generateSuggestions = async (
input: SuggestionGeneratorInput,
llm: BaseChatModel,
) => {
const chatPrompt = await ChatPromptTemplate.fromMessages([
new SystemMessage(suggestionGeneratorPrompt),
new HumanMessage(`<conversation>${formatChatHistoryAsString(input.chatHistory)}</conversation>`)
]).formatMessages({})
const res = await llm.invoke(chatPrompt)
const suggestions = await outputParser.invoke(res)
return suggestions
};
export default generateSuggestions;

View File

@@ -1,105 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type ImageSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: ImageSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: ImageSearchChainInput) => {
return input.query;
},
}),
ChatPromptTemplate.fromMessages([
['system', imageSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
],
['assistant', '<query>A cat</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
],
['assistant', '<query>Car working</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images: ImageSearchResult[] = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
return images.slice(0, 10);
}),
]);
};
const handleImageSearch = (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const imageSearchChain = createImageSearchChain(llm);
return imageSearchChain.invoke(input);
};
export default handleImageSearch;

View File

@@ -1,55 +0,0 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
const suggestionGeneratorPrompt = `
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:
<suggestions>
Tell me more about SpaceX and their recent projects
What is the latest news on SpaceX?
Who is the CEO of SpaceX?
</suggestions>
Conversation:
{chat_history}
`;
type SuggestionGeneratorInput = {
chat_history: BaseMessage[];
};
const outputParser = new ListLineOutputParser({
key: 'suggestions',
});
const createSuggestionGeneratorChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: SuggestionGeneratorInput) =>
formatChatHistoryAsString(input.chat_history),
}),
PromptTemplate.fromTemplate(suggestionGeneratorPrompt),
llm,
outputParser,
]);
};
const generateSuggestions = (
input: SuggestionGeneratorInput,
llm: BaseChatModel,
) => {
(llm as unknown as ChatOpenAI).temperature = 0;
const suggestionGeneratorChain = createSuggestionGeneratorChain(llm);
return suggestionGeneratorChain.invoke(input);
};
export default generateSuggestions;

View File

@@ -1,110 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const videoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: VideoSearchChainInput) => {
return input.query;
},
}),
ChatPromptTemplate.fromMessages([
['system', videoSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
],
['assistant', '<query>How does a car work?</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
],
['assistant', '<query>Theory of relativity</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos: VideoSearchResult[] = [];
res.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
}),
]);
};
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const videoSearchChain = createVideoSearchChain(llm);
return videoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@@ -6,11 +6,8 @@ const getClientConfig = (key: string, defaultVal?: any) => {
export const getTheme = () => getClientConfig('theme', 'dark');
export const getAutoImageSearch = () =>
Boolean(getClientConfig('autoImageSearch', 'true'));
export const getAutoVideoSearch = () =>
Boolean(getClientConfig('autoVideoSearch', 'true'));
export const getAutoMediaSearch = () =>
getClientConfig('autoMediaSearch', 'true') === 'true';
export const getSystemInstructions = () =>
getClientConfig('systemInstructions', '');

View File

@@ -13,14 +13,15 @@ class ConfigManager {
currentConfig: Config = {
version: this.configVersion,
setupComplete: false,
general: {},
preferences: {},
personalization: {},
modelProviders: [],
search: {
searxngURL: '',
},
};
uiConfigSections: UIConfigSections = {
general: [
preferences: [
{
name: 'Theme',
key: 'theme',
@@ -40,6 +41,46 @@ class ConfigManager {
default: 'dark',
scope: 'client',
},
{
name: 'Measurement Unit',
key: 'measureUnit',
type: 'select',
options: [
{
name: 'Imperial',
value: 'Imperial',
},
{
name: 'Metric',
value: 'Metric',
},
],
required: false,
description: 'Choose between Metric and Imperial measurement unit.',
default: 'Metric',
scope: 'client',
},
{
name: 'Auto video & image search',
key: 'autoMediaSearch',
type: 'switch',
required: false,
description: 'Automatically search for relevant images and videos.',
default: true,
scope: 'client',
},
],
personalization: [
{
name: 'System Instructions',
key: 'systemInstructions',
type: 'textarea',
required: false,
description: 'Add custom behavior or tone for the model.',
placeholder:
'e.g., "Respond in a friendly and concise tone" or "Use British English and format answers as bullet points."',
scope: 'client',
},
],
modelProviders: [],
search: [
@@ -124,7 +165,7 @@ class ConfigManager {
providerConfigSections.forEach((provider) => {
const newProvider: ConfigModelProvider & { required?: string[] } = {
id: crypto.randomUUID(),
name: `${provider.name} ${Math.floor(Math.random() * 1000)}`,
name: `${provider.name}`,
type: provider.key,
chatModels: [],
embeddingModels: [],

View File

@@ -32,10 +32,23 @@ type PasswordUIConfigField = BaseUIConfigField & {
default?: string;
};
type TextareaUIConfigField = BaseUIConfigField & {
type: 'textarea';
placeholder?: string;
default?: string;
};
type SwitchUIConfigField = BaseUIConfigField & {
type: 'switch';
default?: boolean;
};
type UIConfigField =
| StringUIConfigField
| SelectUIConfigField
| PasswordUIConfigField;
| PasswordUIConfigField
| TextareaUIConfigField
| SwitchUIConfigField;
type ConfigModelProvider = {
id: string;
@@ -50,7 +63,10 @@ type ConfigModelProvider = {
type Config = {
version: number;
setupComplete: boolean;
general: {
preferences: {
[key: string]: any;
};
personalization: {
[key: string]: any;
};
modelProviders: ConfigModelProvider[];
@@ -73,7 +89,8 @@ type ModelProviderUISection = {
};
type UIConfigSections = {
general: UIConfigField[];
preferences: UIConfigField[];
personalization: UIConfigField[];
modelProviders: ModelProviderUISection[];
search: UIConfigField[];
};
@@ -87,4 +104,6 @@ export type {
StringUIConfigField,
ModelProviderUISection,
ConfigModelProvider,
TextareaUIConfigField,
SwitchUIConfigField,
};

View File

@@ -1,6 +1,6 @@
import { sql } from 'drizzle-orm';
import { text, integer, sqliteTable } from 'drizzle-orm/sqlite-core';
import { Document } from 'langchain/document';
import { Document } from '@langchain/core/documents';
export const messages = sqliteTable('messages', {
id: integer('id').primaryKey(),

View File

@@ -17,10 +17,11 @@ import {
useState,
} from 'react';
import crypto from 'crypto';
import { useSearchParams } from 'next/navigation';
import { useParams, useSearchParams } from 'next/navigation';
import { toast } from 'sonner';
import { getSuggestions } from '../actions';
import { MinimalProvider } from '../models/types';
import { getAutoMediaSearch } from '../config/clientRegistry';
export type Section = {
userMessage: UserMessage;
@@ -48,6 +49,8 @@ type ChatContext = {
messageAppeared: boolean;
isReady: boolean;
hasError: boolean;
chatModelProvider: ChatModelProvider;
embeddingModelProvider: EmbeddingModelProvider;
setOptimizationMode: (mode: string) => void;
setFocusMode: (mode: string) => void;
setFiles: (files: File[]) => void;
@@ -58,6 +61,8 @@ type ChatContext = {
rewrite?: boolean,
) => Promise<void>;
rewrite: (messageId: string) => void;
setChatModelProvider: (provider: ChatModelProvider) => void;
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void;
};
export interface File {
@@ -90,17 +95,6 @@ const checkConfig = async (
'embeddingModelProviderId',
);
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const res = await fetch(`/api/providers`, {
headers: {
'Content-Type': 'application/json',
@@ -256,25 +250,24 @@ export const chatContext = createContext<ChatContext>({
sections: [],
notFound: false,
optimizationMode: '',
chatModelProvider: { key: '', providerId: '' },
embeddingModelProvider: { key: '', providerId: '' },
rewrite: () => {},
sendMessage: async () => {},
setFileIds: () => {},
setFiles: () => {},
setFocusMode: () => {},
setOptimizationMode: () => {},
setChatModelProvider: () => {},
setEmbeddingModelProvider: () => {},
});
export const ChatProvider = ({
children,
id,
}: {
children: React.ReactNode;
id?: string;
}) => {
export const ChatProvider = ({ children }: { children: React.ReactNode }) => {
const params: { chatId: string } = useParams();
const searchParams = useSearchParams();
const initialMessage = searchParams.get('q');
const [chatId, setChatId] = useState<string | undefined>(id);
const [chatId, setChatId] = useState<string | undefined>(params.chatId);
const [newChatCreated, setNewChatCreated] = useState(false);
const [loading, setLoading] = useState(false);
@@ -443,6 +436,19 @@ export const ChatProvider = ({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
if (params.chatId && params.chatId !== chatId) {
setChatId(params.chatId);
setMessages([]);
setChatHistory([]);
setFiles([]);
setFileIds([]);
setIsMessagesLoaded(false);
setNotFound(false);
setNewChatCreated(false);
}
}, [params.chatId, chatId]);
useEffect(() => {
if (
chatId &&
@@ -466,7 +472,7 @@ export const ChatProvider = ({
setChatId(crypto.randomBytes(20).toString('hex'));
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [chatId, isMessagesLoaded, newChatCreated, messages.length]);
useEffect(() => {
messagesRef.current = messages;
@@ -519,7 +525,7 @@ export const ChatProvider = ({
messageId,
rewrite = false,
) => {
if (loading) return;
if (loading || !message) return;
setLoading(true);
setMessageAppeared(false);
@@ -608,16 +614,13 @@ export const ChatProvider = ({
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
const autoMediaSearch = getAutoMediaSearch();
if (autoImageSearch === 'true') {
if (autoMediaSearch) {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
@@ -743,6 +746,10 @@ export const ChatProvider = ({
setOptimizationMode,
rewrite,
sendMessage,
setChatModelProvider,
chatModelProvider,
embeddingModelProvider,
setEmbeddingModelProvider,
}}
>
{children}

View File

@@ -1,82 +0,0 @@
import { Embeddings, type EmbeddingsParams } from '@langchain/core/embeddings';
import { chunkArray } from '@langchain/core/utils/chunk_array';
export interface HuggingFaceTransformersEmbeddingsParams
extends EmbeddingsParams {
modelName: string;
model: string;
timeout?: number;
batchSize?: number;
stripNewLines?: boolean;
}
export class HuggingFaceTransformersEmbeddings
extends Embeddings
implements HuggingFaceTransformersEmbeddingsParams
{
modelName = 'Xenova/all-MiniLM-L6-v2';
model = 'Xenova/all-MiniLM-L6-v2';
batchSize = 512;
stripNewLines = true;
timeout?: number;
private pipelinePromise: Promise<any> | undefined;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});
this.modelName = fields?.model ?? fields?.modelName ?? this.model;
this.model = this.modelName;
this.stripNewLines = fields?.stripNewLines ?? this.stripNewLines;
this.timeout = fields?.timeout;
}
async embedDocuments(texts: string[]): Promise<number[][]> {
const batches = chunkArray(
this.stripNewLines ? texts.map((t) => t.replace(/\n/g, ' ')) : texts,
this.batchSize,
);
const batchRequests = batches.map((batch) => this.runEmbedding(batch));
const batchResponses = await Promise.all(batchRequests);
const embeddings: number[][] = [];
for (let i = 0; i < batchResponses.length; i += 1) {
const batchResponse = batchResponses[i];
for (let j = 0; j < batchResponse.length; j += 1) {
embeddings.push(batchResponse[j]);
}
}
return embeddings;
}
async embedQuery(text: string): Promise<number[]> {
const data = await this.runEmbedding([
this.stripNewLines ? text.replace(/\n/g, ' ') : text,
]);
return data[0];
}
private async runEmbedding(texts: string[]) {
const { pipeline } = await import('@xenova/transformers');
const pipe = await (this.pipelinePromise ??= pipeline(
'feature-extraction',
this.model,
));
return this.caller.call(async () => {
const output = await pipe(texts, { pooling: 'mean', normalize: true });
return output.tolist();
});
}
}

View File

@@ -0,0 +1,7 @@
abstract class BaseEmbedding<CONFIG> {
constructor(protected config: CONFIG) {}
abstract embedText(texts: string[]): Promise<number[][]>;
abstract embedChunks(chunks: Chunk[]): Promise<number[][]>;
}
export default BaseEmbedding;

View File

@@ -0,0 +1,22 @@
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../types';
abstract class BaseLLM<CONFIG> {
constructor(protected config: CONFIG) {}
abstract withOptions(options: GenerateOptions): this;
abstract generateText(input: GenerateTextInput): Promise<GenerateTextOutput>;
abstract streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput>;
abstract generateObject<T>(input: GenerateObjectInput): Promise<T>;
abstract streamObject<T>(
input: GenerateObjectInput,
): AsyncGenerator<Partial<T>>;
}
export default BaseLLM;

View File

@@ -1,7 +1,9 @@
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import { ModelList, ProviderMetadata } from '../types';
import { UIConfigField } from '@/lib/config/types';
import BaseLLM from './llm';
import BaseEmbedding from './embedding';
abstract class BaseModelProvider<CONFIG> {
constructor(
@@ -11,8 +13,8 @@ abstract class BaseModelProvider<CONFIG> {
) {}
abstract getDefaultModels(): Promise<ModelList>;
abstract getModelList(): Promise<ModelList>;
abstract loadChatModel(modelName: string): Promise<BaseChatModel>;
abstract loadEmbeddingModel(modelName: string): Promise<Embeddings>;
abstract loadChatModel(modelName: string): Promise<BaseLLM<any>>;
abstract loadEmbeddingModel(modelName: string): Promise<BaseEmbedding<any>>;
static getProviderConfigFields(): UIConfigField[] {
throw new Error('Method not implemented.');
}

View File

@@ -0,0 +1,152 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface AimlConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your AI/ML API key',
required: true,
placeholder: 'AI/ML API Key',
env: 'AIML_API_KEY',
scope: 'server',
},
];
class AimlProvider extends BaseModelProvider<AimlConfig> {
constructor(id: string, name: string, config: AimlConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const res = await fetch('https://api.aimlapi.com/models', {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
},
});
const data = await res.json();
const chatModels: Model[] = data.data
.filter((m: any) => m.type === 'chat-completion')
.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
const embeddingModels: Model[] = data.data
.filter((m: any) => m.type === 'embedding')
.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: embeddingModels,
chat: chatModels,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to AI/ML API. Please ensure your API key is correct and the service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading AI/ML API Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
configuration: {
baseURL: 'https://api.aimlapi.com',
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading AI/ML API Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: this.config.apiKey,
model: key,
configuration: {
baseURL: 'https://api.aimlapi.com',
},
});
}
static parseAndValidate(raw: any): AimlConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'aiml',
name: 'AI/ML API',
};
}
}
export default AimlProvider;

View File

@@ -0,0 +1,115 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatAnthropic } from '@langchain/anthropic';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface AnthropicConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Anthropic API key',
required: true,
placeholder: 'Anthropic API Key',
env: 'ANTHROPIC_API_KEY',
scope: 'server',
},
];
class AnthropicProvider extends BaseModelProvider<AnthropicConfig> {
constructor(id: string, name: string, config: AnthropicConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch('https://api.anthropic.com/v1/models?limit=999', {
method: 'GET',
headers: {
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
'Content-type': 'application/json',
},
});
if (!res.ok) {
throw new Error(`Failed to fetch Anthropic models: ${res.statusText}`);
}
const data = (await res.json()).data;
const models: Model[] = data.map((m: any) => {
return {
key: m.id,
name: m.display_name,
};
});
return {
embedding: [],
chat: models,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Anthropic Chat Model. Invalid Model Selected',
);
}
return new ChatAnthropic({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('Anthropic provider does not support embedding models.');
}
static parseAndValidate(raw: any): AnthropicConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'anthropic',
name: 'Anthropic',
};
}
}
export default AnthropicProvider;

View File

@@ -0,0 +1,107 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface DeepSeekConfig {
apiKey: string;
}
const defaultChatModels: Model[] = [
{
name: 'Deepseek Chat / DeepSeek V3.2 Exp',
key: 'deepseek-chat',
},
{
name: 'Deepseek Reasoner / DeepSeek V3.2 Exp',
key: 'deepseek-reasoner',
},
];
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your DeepSeek API key',
required: true,
placeholder: 'DeepSeek API Key',
env: 'DEEPSEEK_API_KEY',
scope: 'server',
},
];
class DeepSeekProvider extends BaseModelProvider<DeepSeekConfig> {
constructor(id: string, name: string, config: DeepSeekConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
return {
embedding: [],
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading DeepSeek Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
configuration: {
baseURL: 'https://api.deepseek.com',
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('DeepSeek provider does not support embedding models.');
}
static parseAndValidate(raw: any): DeepSeekConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'deepseek',
name: 'Deepseek AI',
};
}
}
export default DeepSeekProvider;

View File

@@ -0,0 +1,145 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface GeminiConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Google Gemini API key',
required: true,
placeholder: 'Google Gemini API Key',
env: 'GEMINI_API_KEY',
scope: 'server',
},
];
class GeminiProvider extends BaseModelProvider<GeminiConfig> {
constructor(id: string, name: string, config: GeminiConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models?key=${this.config.apiKey}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
},
);
const data = await res.json();
let defaultEmbeddingModels: Model[] = [];
let defaultChatModels: Model[] = [];
data.models.forEach((m: any) => {
if (
m.supportedGenerationMethods.some(
(genMethod: string) =>
genMethod === 'embedText' || genMethod === 'embedContent',
)
) {
defaultEmbeddingModels.push({
key: m.name,
name: m.displayName,
});
} else if (m.supportedGenerationMethods.includes('generateContent')) {
defaultChatModels.push({
key: m.name,
name: m.displayName,
});
}
});
return {
embedding: defaultEmbeddingModels,
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Chat Model. Invalid Model Selected',
);
}
return new ChatGoogleGenerativeAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Embedding Model. Invalid Model Selected.',
);
}
return new GoogleGenerativeAIEmbeddings({
apiKey: this.config.apiKey,
model: key,
});
}
static parseAndValidate(raw: any): GeminiConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'gemini',
name: 'Google Gemini',
};
}
}
export default GeminiProvider;

View File

@@ -0,0 +1,118 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatGroq } from '@langchain/groq';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface GroqConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Groq API key',
required: true,
placeholder: 'Groq API Key',
env: 'GROQ_API_KEY',
scope: 'server',
},
];
class GroqProvider extends BaseModelProvider<GroqConfig> {
constructor(id: string, name: string, config: GroqConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const res = await fetch('https://api.groq.com/openai/v1/models', {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: [],
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Groq API. Please ensure your API key is correct and the Groq service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error('Error Loading Groq Chat Model. Invalid Model Selected');
}
return new ChatGroq({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('Groq provider does not support embedding models.');
}
static parseAndValidate(raw: any): GroqConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'groq',
name: 'Groq',
};
}
}
export default GroqProvider;

View File

@@ -1,5 +1,5 @@
import { ModelProviderUISection } from '@/lib/config/types';
import { ProviderConstructor } from './baseProvider';
import { ProviderConstructor } from '../base/provider';
import OpenAIProvider from './openai';
import OllamaProvider from './ollama';

View File

@@ -0,0 +1,158 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface LemonadeConfig {
baseURL: string;
apiKey?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for Lemonade API',
required: true,
placeholder: 'https://api.lemonade.ai/v1',
env: 'LEMONADE_BASE_URL',
scope: 'server',
},
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Lemonade API key (optional)',
required: false,
placeholder: 'Lemonade API Key',
env: 'LEMONADE_API_KEY',
scope: 'server',
},
];
class LemonadeProvider extends BaseModelProvider<LemonadeConfig> {
constructor(id: string, name: string, config: LemonadeConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
};
if (this.config.apiKey) {
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
}
const res = await fetch(`${this.config.baseURL}/models`, {
method: 'GET',
headers,
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Lemonade API. Please ensure the base URL is correct and the service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey || 'not-needed',
temperature: 0.7,
model: key,
configuration: {
baseURL: this.config.baseURL,
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: this.config.apiKey || 'not-needed',
model: key,
configuration: {
baseURL: this.config.baseURL,
},
});
}
static parseAndValidate(raw: any): LemonadeConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
apiKey: raw.apiKey ? String(raw.apiKey) : undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lemonade',
name: 'Lemonade',
};
}
}
export default LemonadeProvider;

View File

@@ -0,0 +1,148 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface LMStudioConfig {
baseURL: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for LM Studio server',
required: true,
placeholder: 'http://localhost:1234',
env: 'LM_STUDIO_BASE_URL',
scope: 'server',
},
];
class LMStudioProvider extends BaseModelProvider<LMStudioConfig> {
constructor(id: string, name: string, config: LMStudioConfig) {
super(id, name, config);
}
private normalizeBaseURL(url: string): string {
const trimmed = url.trim().replace(/\/+$/, '');
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`;
}
async getDefaultModels(): Promise<ModelList> {
try {
const baseURL = this.normalizeBaseURL(this.config.baseURL);
const res = await fetch(`${baseURL}/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to LM Studio. Please ensure the base URL is correct and the LM Studio server is running.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: 'lm-studio',
temperature: 0.7,
model: key,
streaming: true,
configuration: {
baseURL: this.normalizeBaseURL(this.config.baseURL),
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: 'lm-studio',
model: key,
configuration: {
baseURL: this.normalizeBaseURL(this.config.baseURL),
},
});
}
static parseAndValidate(raw: any): LMStudioConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lmstudio',
name: 'LM Studio',
};
}
}
export default LMStudioProvider;

View File

@@ -1,10 +1,11 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOllama, OllamaEmbeddings } from '@langchain/ollama';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import BaseModelProvider from '../../base/provider';
import { Model, ModelList, ProviderMetadata } from '../../types';
import BaseLLM from '../../base/llm';
import BaseEmbedding from '../../base/embedding';
import OllamaLLM from './ollamaLLM';
import OllamaEmbedding from './ollamaEmbedding';
interface OllamaConfig {
baseURL: string;
@@ -76,7 +77,7 @@ class OllamaProvider extends BaseModelProvider<OllamaConfig> {
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
@@ -87,14 +88,13 @@ class OllamaProvider extends BaseModelProvider<OllamaConfig> {
);
}
return new ChatOllama({
temperature: 0.7,
return new OllamaLLM({
baseURL: this.config.baseURL,
model: key,
baseUrl: this.config.baseURL,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
@@ -104,9 +104,9 @@ class OllamaProvider extends BaseModelProvider<OllamaConfig> {
);
}
return new OllamaEmbeddings({
return new OllamaEmbedding({
model: key,
baseUrl: this.config.baseURL,
baseURL: this.config.baseURL,
});
}

View File

@@ -0,0 +1,39 @@
import { Ollama } from 'ollama';
import BaseEmbedding from '../../base/embedding';
type OllamaConfig = {
model: string;
baseURL?: string;
};
class OllamaEmbedding extends BaseEmbedding<OllamaConfig> {
ollamaClient: Ollama;
constructor(protected config: OllamaConfig) {
super(config);
this.ollamaClient = new Ollama({
host: this.config.baseURL || 'http://localhost:11434',
});
}
async embedText(texts: string[]): Promise<number[][]> {
const response = await this.ollamaClient.embed({
input: texts,
model: this.config.model,
});
return response.embeddings;
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
const response = await this.ollamaClient.embed({
input: chunks.map((c) => c.content),
model: this.config.model,
});
return response.embeddings;
}
}
export default OllamaEmbedding;

View File

@@ -0,0 +1,151 @@
import z from 'zod';
import BaseLLM from '../../base/llm';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types';
import { Ollama } from 'ollama';
import { parse } from 'partial-json';
type OllamaConfig = {
baseURL: string;
model: string;
options?: GenerateOptions;
};
class OllamaLLM extends BaseLLM<OllamaConfig> {
ollamaClient: Ollama;
constructor(protected config: OllamaConfig) {
super(config);
this.ollamaClient = new Ollama({
host: this.config.baseURL || 'http://localhost:11434',
});
}
withOptions(options: GenerateOptions) {
this.config.options = {
...this.config.options,
...options,
};
return this;
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
this.withOptions(input.options || {});
const res = await this.ollamaClient.chat({
model: this.config.model,
messages: input.messages,
options: {
top_p: this.config.options?.topP,
temperature: this.config.options?.temperature,
num_predict: this.config.options?.maxTokens,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stop: this.config.options?.stopSequences,
},
});
return {
content: res.message.content,
additionalInfo: {
reasoning: res.message.thinking,
},
};
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
this.withOptions(input.options || {});
const stream = await this.ollamaClient.chat({
model: this.config.model,
messages: input.messages,
stream: true,
options: {
top_p: this.config.options?.topP,
temperature: this.config.options?.temperature,
num_predict: this.config.options?.maxTokens,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stop: this.config.options?.stopSequences,
},
});
for await (const chunk of stream) {
yield {
contentChunk: chunk.message.content,
done: chunk.done,
additionalInfo: {
reasoning: chunk.message.thinking,
},
};
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
this.withOptions(input.options || {});
const response = await this.ollamaClient.chat({
model: this.config.model,
messages: input.messages,
format: z.toJSONSchema(input.schema),
think: false,
options: {
top_p: this.config.options?.topP,
temperature: 0,
num_predict: this.config.options?.maxTokens,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stop: this.config.options?.stopSequences,
},
});
try {
return input.schema.parse(JSON.parse(response.message.content)) as T;
} catch (err) {
throw new Error(`Error parsing response from Ollama: ${err}`);
}
}
async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
let recievedObj: string = '';
this.withOptions(input.options || {});
const stream = await this.ollamaClient.chat({
model: this.config.model,
messages: input.messages,
format: z.toJSONSchema(input.schema),
stream: true,
think: false,
options: {
top_p: this.config.options?.topP,
temperature: this.config.options?.temperature,
num_predict: this.config.options?.maxTokens,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stop: this.config.options?.stopSequences,
},
});
for await (const chunk of stream) {
recievedObj += chunk.message.content;
try {
yield parse(recievedObj) as T;
} catch (err) {
console.log('Error parsing partial object from Ollama:', err);
yield {} as T;
}
}
}
}
export default OllamaLLM;

View File

@@ -1,10 +1,13 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import { Model, ModelList, ProviderMetadata } from '../../types';
import OpenAIEmbedding from './openaiEmbedding';
import BaseEmbedding from '../../base/embedding';
import BaseModelProvider from '../../base/provider';
import BaseLLM from '../../base/llm';
import OpenAILLM from './openaiLLM';
interface OpenAIConfig {
apiKey: string;
@@ -145,7 +148,7 @@ class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
@@ -156,17 +159,14 @@ class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
);
}
return new ChatOpenAI({
return new OpenAILLM({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
configuration: {
baseURL: this.config.baseURL,
},
baseURL: this.config.baseURL,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
@@ -176,12 +176,10 @@ class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
);
}
return new OpenAIEmbeddings({
return new OpenAIEmbedding({
apiKey: this.config.apiKey,
model: key,
configuration: {
baseURL: this.config.baseURL,
},
baseURL: this.config.baseURL,
});
}

View File

@@ -0,0 +1,41 @@
import OpenAI from 'openai';
import BaseEmbedding from '../../base/embedding';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
};
class OpenAIEmbedding extends BaseEmbedding<OpenAIConfig> {
openAIClient: OpenAI;
constructor(protected config: OpenAIConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: config.apiKey,
baseURL: config.baseURL,
});
}
async embedText(texts: string[]): Promise<number[][]> {
const response = await this.openAIClient.embeddings.create({
model: this.config.model,
input: texts,
});
return response.data.map((embedding) => embedding.embedding);
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
const response = await this.openAIClient.embeddings.create({
model: this.config.model,
input: chunks.map((c) => c.content),
});
return response.data.map((embedding) => embedding.embedding);
}
}
export default OpenAIEmbedding;

View File

@@ -0,0 +1,163 @@
import OpenAI from 'openai';
import BaseLLM from '../../base/llm';
import { zodTextFormat, zodResponseFormat } from 'openai/helpers/zod';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types';
import { parse } from 'partial-json';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
options?: GenerateOptions;
};
class OpenAILLM extends BaseLLM<OpenAIConfig> {
openAIClient: OpenAI;
constructor(protected config: OpenAIConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: this.config.apiKey,
baseURL: this.config.baseURL || 'https://api.openai.com/v1',
});
}
withOptions(options: GenerateOptions) {
this.config.options = {
...this.config.options,
...options,
};
return this;
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
this.withOptions(input.options || {});
const response = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: input.messages,
temperature: this.config.options?.temperature || 1.0,
top_p: this.config.options?.topP,
max_completion_tokens: this.config.options?.maxTokens,
stop: this.config.options?.stopSequences,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
});
if (response.choices && response.choices.length > 0) {
return {
content: response.choices[0].message.content!,
additionalInfo: {
finishReason: response.choices[0].finish_reason,
},
};
}
throw new Error('No response from OpenAI');
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
this.withOptions(input.options || {});
const stream = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: input.messages,
temperature: this.config.options?.temperature || 1.0,
top_p: this.config.options?.topP,
max_completion_tokens: this.config.options?.maxTokens,
stop: this.config.options?.stopSequences,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stream: true,
});
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
yield {
contentChunk: chunk.choices[0].delta.content || '',
done: chunk.choices[0].finish_reason !== null,
additionalInfo: {
finishReason: chunk.choices[0].finish_reason,
},
};
}
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
this.withOptions(input.options || {});
const response = await this.openAIClient.chat.completions.parse({
messages: input.messages,
model: this.config.model,
temperature: this.config.options?.temperature || 1.0,
top_p: this.config.options?.topP,
max_completion_tokens: this.config.options?.maxTokens,
stop: this.config.options?.stopSequences,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
response_format: zodResponseFormat(input.schema, 'object'),
});
if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(response.choices[0].message.parsed) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
throw new Error('No response from OpenAI');
}
async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
let recievedObj: string = '';
this.withOptions(input.options || {});
const stream = this.openAIClient.responses.stream({
model: this.config.model,
input: input.messages,
temperature: this.config.options?.temperature || 1.0,
top_p: this.config.options?.topP,
max_completion_tokens: this.config.options?.maxTokens,
stop: this.config.options?.stopSequences,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
text: {
format: zodTextFormat(input.schema, 'object'),
},
});
for await (const chunk of stream) {
if (chunk.type === 'response.output_text.delta' && chunk.delta) {
recievedObj += chunk.delta;
try {
yield parse(recievedObj) as T;
} catch (err) {
console.log('Error parsing partial object from OpenAI:', err);
yield {} as T;
}
} else if (chunk.type === 'response.output_text.done' && chunk.text) {
try {
yield parse(chunk.text) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
}
}
}
export default OpenAILLM;

View File

@@ -0,0 +1,87 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import { HuggingFaceTransformersEmbeddings } from '@langchain/community/embeddings/huggingface_transformers';
interface TransformersConfig {}
const defaultEmbeddingModels: Model[] = [
{
name: 'all-MiniLM-L6-v2',
key: 'Xenova/all-MiniLM-L6-v2',
},
{
name: 'mxbai-embed-large-v1',
key: 'mixedbread-ai/mxbai-embed-large-v1',
},
{
name: 'nomic-embed-text-v1',
key: 'Xenova/nomic-embed-text-v1',
},
];
const providerConfigFields: UIConfigField[] = [];
class TransformersProvider extends BaseModelProvider<TransformersConfig> {
constructor(id: string, name: string, config: TransformersConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
return {
embedding: [...defaultEmbeddingModels],
chat: [],
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
throw new Error('Transformers Provider does not support chat models.');
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading OpenAI Embedding Model. Invalid Model Selected.',
);
}
return new HuggingFaceTransformersEmbeddings({
model: key,
});
}
static parseAndValidate(raw: any): TransformersConfig {
return {};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'transformers',
name: 'Transformers',
};
}
}
export default TransformersProvider;

View File

@@ -1,3 +1,5 @@
import z from 'zod';
type Model = {
name: string;
key: string;
@@ -25,10 +27,59 @@ type ModelWithProvider = {
providerId: string;
};
type GenerateOptions = {
temperature?: number;
maxTokens?: number;
topP?: number;
stopSequences?: string[];
frequencyPenalty?: number;
presencePenalty?: number;
};
type GenerateTextInput = {
messages: Message[];
options?: GenerateOptions;
};
type GenerateTextOutput = {
content: string;
additionalInfo?: Record<string, any>;
};
type StreamTextOutput = {
contentChunk: string;
additionalInfo?: Record<string, any>;
done?: boolean;
};
type GenerateObjectInput = {
schema: z.ZodTypeAny;
messages: Message[];
options?: GenerateOptions;
};
type GenerateObjectOutput<T> = {
object: T;
additionalInfo?: Record<string, any>;
};
type StreamObjectOutput<T> = {
objectChunk: Partial<T>;
additionalInfo?: Record<string, any>;
done?: boolean;
};
export type {
Model,
ModelList,
ProviderMetadata,
MinimalProvider,
ModelWithProvider,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
GenerateObjectInput,
GenerateObjectOutput,
StreamObjectOutput,
};

View File

@@ -0,0 +1,26 @@
import { BaseMessageLike } from "@langchain/core/messages";
export const imageSearchPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
export const imageSearchFewShots: BaseMessageLike[] = [
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
],
['assistant', '<query>A cat</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
],
['assistant', '<query>Car working</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>']
]

View File

@@ -0,0 +1,25 @@
import { BaseMessageLike } from "@langchain/core/messages";
export const videoSearchPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
export const videoSearchFewShots: BaseMessageLike[] = [
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
],
['assistant', '<query>How does a car work?</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
],
['assistant', '<query>Theory of relativity</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
]

View File

@@ -0,0 +1,176 @@
export const getClassifierPrompt = (input: {
intentDesc: string;
widgetDesc: string;
}) => {
return `
<role>
You are an expert query classifier for an intelligent search agent. Your task is to analyze user queries and determine the optimal way to answer them—selecting the right intent(s) and widgets.
</role>
<task>
Given a conversation history and follow-up question, you must:
1. Determine if search should be skipped (skipSearch: boolean)
2. Generate a standalone, self-contained version of the question (standaloneFollowUp: string)
3. Identify the intent(s) that describe how to fulfill the query (intent: array)
4. Select appropriate widgets (widgets: array)
</task>
<critical_decision_rule>
**THE MOST IMPORTANT RULE**: skipSearch should be TRUE only in TWO cases:
1. Widget-only queries (weather, stocks, calculator)
2. Greetings or simple writing tasks (NOT questions)
**DEFAULT TO skipSearch: false** for everything else, including:
- Any question ("what is", "how does", "explain", "tell me about")
- Any request for information or facts
- Anything you're unsure about
Ask yourself: "Is the user ASKING about something or requesting INFORMATION?"
- YES → skipSearch: false (use web_search)
- NO (just greeting or simple writing) → skipSearch: true
</critical_decision_rule>
<skip_search_decision_tree>
Follow this decision tree IN ORDER:
1. **Widget-Only Queries** → skipSearch: TRUE, intent: ['widget_response']
- Weather queries: "weather in NYC", "temperature in Paris", "is it raining in Seattle"
- Stock queries: "AAPL stock price", "how is Tesla doing", "MSFT stock"
- Calculator queries: "what is 25% of 80", "calculate 15*23", "sqrt(144)"
- These are COMPLETE answers—no search needed
2. **Writing/Greeting Tasks** → skipSearch: TRUE, intent: ['writing_task']
- ONLY for greetings and simple writing:
- Greetings: "hello", "hi", "how are you", "thanks", "goodbye"
- Simple writing needing NO facts: "write a thank you email", "draft a birthday message", "compose a poem"
- NEVER for: questions, "what is X", "how does X work", explanations, definitions, facts, code help
- If user is ASKING about something (not requesting writing), use web_search
3. **Image Display Queries** → skipSearch: FALSE, intent: ['image_preview']
- "Show me images of cats"
- "Pictures of the Eiffel Tower"
- "Visual examples of modern architecture"
- Requests for images to visualize something
4. **Widget + Additional Info** → skipSearch: FALSE, intent: ['web_search', 'widget_response']
- "weather in NYC and best things to do there"
- "AAPL stock and recent Apple news"
- "calculate my mortgage and explain how interest works"
5. **Pure Search Queries** → skipSearch: FALSE
- Default to web_search for general questions
- Use discussions_search when user explicitly mentions Reddit, forums, opinions, experiences
- Use academic_search when user explicitly mentions research, papers, studies, scientific
- Can combine multiple search intents when appropriate
6. **Fallback when web_search unavailable** → skipSearch: TRUE, intent: ['writing_task'] or []
- If no search intents are available and no widgets apply
- Set skipSearch to true and use writing_task or empty intent
</skip_search_decision_tree>
<examples>
Example 1: Widget-only query
Query: "What is the weather in New York?"
Reasoning: User wants current weather → weather widget provides this completely
Output: skipSearch: true, intent: ['widget_response'], widgets: [weather widget for New York]
Example 2: Widget-only query
Query: "AAPL stock price"
Reasoning: User wants stock price → stock_ticker widget provides this completely
Output: skipSearch: true, intent: ['widget_response'], widgets: [stock_ticker for AAPL]
Example 3: Widget + search query
Query: "What's the weather in NYC and what are some good outdoor activities?"
Reasoning: Weather widget handles weather, but outdoor activities need web search
Output: skipSearch: false, intent: ['web_search', 'widget_response'], widgets: [weather widget for NYC]
Example 4: Pure search query
Query: "What are the latest developments in AI?"
Reasoning: No widget applies, needs current web information
Output: skipSearch: false, intent: ['web_search'], widgets: []
Example 5: Writing task (greeting/simple writing only)
Query: "Write me a thank you email for a job interview"
Reasoning: Simple writing task needing no external facts → writing_task
Output: skipSearch: true, intent: ['writing_task'], widgets: []
Example 5b: Question about something - ALWAYS needs search
Query: "What is Kimi K2?"
Reasoning: User is ASKING about something → needs web search for accurate info
Output: skipSearch: false, intent: ['web_search'], widgets: []
Example 5c: Another question - needs search
Query: "Explain how photosynthesis works"
Reasoning: User is ASKING for explanation → needs web search
Output: skipSearch: false, intent: ['web_search'], widgets: []
Example 6: Image display
Query: "Show me images of cats"
Reasoning: User wants to see images → requires image search
Output: skipSearch: false, intent: ['image_preview'], widgets: []
Example 7: Multiple search sources
Query: "What does the research say about meditation benefits?"
Reasoning: Benefits from both academic papers and web articles
Output: skipSearch: false, intent: ['academic_search', 'web_search'], widgets: []
Example 8: Discussions search
Query: "What do people on Reddit think about the new iPhone?"
Reasoning: User explicitly wants forum/community opinions → discussions_search
Output: skipSearch: false, intent: ['discussions_search'], widgets: []
Example 9: Academic search only
Query: "Find scientific papers on climate change effects"
Reasoning: User explicitly wants academic/research papers
Output: skipSearch: false, intent: ['academic_search'], widgets: []
</examples>
<standalone_follow_up_guidelines>
Transform the follow-up into a self-contained question:
- Include ALL necessary context from chat history
- Replace pronouns (it, they, this, that) with specific nouns
- Replace references ("the previous one", "what you mentioned") with actual content
- Preserve the original complexity—don't over-elaborate simple questions
- The question should be answerable without seeing the conversation
</standalone_follow_up_guidelines>
<intent_selection_rules>
Available intents:
${input.intentDesc}
Rules:
- Include at least one intent when applicable
- For questions/information requests:
- Default to web_search unless user explicitly requests another source
- Use discussions_search when user mentions: Reddit, forums, opinions, experiences, "what do people think"
- Use academic_search when user mentions: research, papers, studies, scientific, scholarly
- Can combine intents (e.g., ['academic_search', 'web_search'])
- If web_search is NOT in available intents and query needs search:
- Check if discussions_search or academic_search applies
- If no search intent available and no widgets: use writing_task or empty array []
- private_search: ONLY when user provides specific URLs/documents
- widget_response: when widgets fully answer the query
- writing_task: ONLY for greetings and simple writing (never for questions)
</intent_selection_rules>
<widget_selection_rules>
Available widgets:
${input.widgetDesc}
Rules:
- Include ALL applicable widgets regardless of skipSearch value
- Each widget type can only be included once
- Widgets provide structured, real-time data that enhances any response
</widget_selection_rules>
<output_format>
Your classification must be precise and consistent:
{
"skipSearch": <true|false>,
"standaloneFollowUp": "<self-contained question>",
"intent": [<array of selected intents>],
"widgets": [<array of selected widgets>]
}
</output_format>
`;
};

View File

@@ -0,0 +1,15 @@
export const suggestionGeneratorPrompt = `
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:
<suggestions>
Tell me more about SpaceX and their recent projects
What is the latest news on SpaceX?
Who is the CEO of SpaceX?
</suggestions>
Today's date is ${new Date().toISOString()}
`;

View File

@@ -16,7 +16,7 @@ import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document';
import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../searxng';
import path from 'node:path';
import fs from 'node:fs';

View File

@@ -39,10 +39,11 @@ export const searchSearxng = async (
});
}
const res = await axios.get(url.toString());
const res = await fetch(url);
const data = await res.json();
const results: SearxngSearchResult[] = res.data.results;
const suggestions: string[] = res.data.suggestions;
const results: SearxngSearchResult[] = data.results;
const suggestions: string[] = data.suggestions;
return { results, suggestions };
};

45
src/lib/session.ts Normal file
View File

@@ -0,0 +1,45 @@
import { EventEmitter } from 'stream';
/* todo implement history saving and better artifact typing and handling */
class SessionManager {
private static sessions = new Map<string, SessionManager>();
readonly id: string;
private artifacts = new Map<string, Artifact>();
private emitter = new EventEmitter();
constructor() {
this.id = crypto.randomUUID();
}
static getSession(id: string): SessionManager | undefined {
return this.sessions.get(id);
}
static getAllSessions(): SessionManager[] {
return Array.from(this.sessions.values());
}
emit(event: string, data: any) {
this.emitter.emit(event, data);
}
emitArtifact(artifact: Artifact) {
this.artifacts.set(artifact.id, artifact);
this.emitter.emit('addArtifact', artifact);
}
appendToArtifact(artifactId: string, data: any) {
const artifact = this.artifacts.get(artifactId);
if (artifact) {
if (typeof artifact.data === 'string') {
artifact.data += data;
} else if (Array.isArray(artifact.data)) {
artifact.data.push(data);
} else if (typeof artifact.data === 'object') {
Object.assign(artifact.data, data);
}
this.emitter.emit('updateArtifact', artifact);
}
}
}
export default SessionManager;

15
src/lib/types.ts Normal file
View File

@@ -0,0 +1,15 @@
type Message = {
role: 'user' | 'assistant' | 'system';
content: string;
};
type Chunk = {
content: string;
metadata: Record<string, any>;
};
type Artifact = {
id: string;
type: string;
data: any;
};

View File

@@ -1,6 +1,6 @@
import axios from 'axios';
import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse';

1101
yarn.lock

File diff suppressed because it is too large Load Diff