Compare commits

..

35 Commits

Author SHA1 Message Date
ItzCrazyKns
008c7cbec0 feat(chat-window): remove debugging code, 2025-03-20 09:47:32 +05:30
ItzCrazyKns
4d1ee79b8d feat(package): migrate db when built 2025-03-20 09:47:12 +05:30
ItzCrazyKns
ea638279e5 feat(docker): use standalone build 2025-03-20 09:46:50 +05:30
ItzCrazyKns
403d13eb50 feat(package): update scripts 2025-03-19 16:34:55 +05:30
ItzCrazyKns
217736d05a feat(app): remove backend 2025-03-19 16:23:27 +05:30
ItzCrazyKns
8a24572cd2 feat(app): add upload functionality 2025-03-19 15:32:32 +05:30
ItzCrazyKns
649c68f292 feat(ui): fix type errors 2025-03-19 13:42:28 +05:30
ItzCrazyKns
bab5dba6e1 feat(app): port history saving features 2025-03-19 13:42:15 +05:30
ItzCrazyKns
c24edac16d feat(app): add chat functionality 2025-03-19 13:41:52 +05:30
ItzCrazyKns
3150c21f17 feat(icons): fix type errors 2025-03-19 13:41:01 +05:30
ItzCrazyKns
c46fd7a9c8 feat(utils): add files utils, remove logger, fix API url 2025-03-19 13:40:35 +05:30
ItzCrazyKns
bab32e8d70 feat(app): add suggestions route 2025-03-19 13:40:10 +05:30
ItzCrazyKns
1130746f5d feat(app): add image & video search functionality 2025-03-19 13:38:40 +05:30
ItzCrazyKns
d1e9361665 feat(routes): add discover route 2025-03-19 13:37:54 +05:30
ItzCrazyKns
3bf2337697 feat(app): add db & schema 2025-03-19 13:37:01 +05:30
ItzCrazyKns
ee6e197ec0 feat(app): lint & beautify 2025-03-18 11:29:04 +05:30
ItzCrazyKns
32f26bb4e8 feat(app): add groq, gemini & anthropic provider 2025-03-18 11:28:47 +05:30
ItzCrazyKns
4cb20542a5 feat(config): update file path, add post endpoint 2025-03-18 10:33:32 +05:30
ItzCrazyKns
97f6196d9b feat(app): add GET config route 2025-03-18 10:25:09 +05:30
ItzCrazyKns
6c227cab6f feat(providers): move providers to UI 2025-03-18 10:24:51 +05:30
ItzCrazyKns
e9e34ddff9 feat(ui): add meta search agent 2025-03-18 10:24:33 +05:30
ItzCrazyKns
e29a08dc46 feat(ui): add necessary utils 2025-03-18 10:24:16 +05:30
ItzCrazyKns
5c313e9bed feat(ui): update packages, add config, add searxng 2025-03-18 10:23:59 +05:30
ItzCrazyKns
6b5bd9d79b feat(prompts): move to UI 2025-03-18 10:23:21 +05:30
ItzCrazyKns
64d2a467b0 Merge pull request #672 from sjiampojamarn/scrolling
Only set scrollIntoView for user msg.
2025-03-17 12:03:05 +05:30
sjiampojamarn
9a2c4fe3b6 Only set scrollIntoView for user msg. 2025-03-16 22:15:58 -07:00
ItzCrazyKns
060c68a900 feat(message-box): lint & beautify 2025-03-14 22:05:07 +05:30
ItzCrazyKns
e6b87f89ec feat(sample-config): add custom openai model name 2025-03-08 20:08:27 +05:30
ItzCrazyKns
89b5229ce9 Merge pull request #663 from ericdachen/master
Update Readme
2025-03-05 11:11:07 +05:30
ItzCrazyKns
7756340dd9 Update README.md 2025-03-05 11:09:19 +05:30
ItzCrazyKns
bbd2e9c359 feat(readme): update warp banner 2025-03-05 11:05:25 +05:30
ItzCrazyKns
a32eb1dda3 feat(readme): lint & beautify, update anchor URL 2025-03-05 10:55:02 +05:30
Eric Chen
aa834f7f04 Update README.md 2025-03-04 14:45:10 -05:00
Eric Chen
064c0fbe42 Update README.md 2025-03-04 12:16:10 -05:00
Eric Chen
bf4cf8eaeb Update README.md 2025-03-04 12:14:17 -05:00
124 changed files with 4968 additions and 8382 deletions

View File

@@ -10,9 +10,6 @@ on:
jobs:
build-and-push:
runs-on: ubuntu-latest
strategy:
matrix:
service: [backend, app]
steps:
- name: Checkout code
uses: actions/checkout@v3
@@ -36,17 +33,12 @@ jobs:
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push Docker image for ${{ matrix.service }}
- name: Build and push Docker image
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica; \
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
--cache-to=type=inline \
@@ -54,17 +46,12 @@ jobs:
-t itzcrazykns1337/${IMAGE_NAME}:main \
--push .
- name: Build and push release Docker image for ${{ matrix.service }}
- name: Build and push release Docker image
if: github.event_name == 'release'
run: |
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica; \
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--cache-to=type=inline \

10
.gitignore vendored
View File

@@ -2,12 +2,11 @@
node_modules/
npm-debug.log
yarn-error.log
package-lock.json
# Build output
/.next/
/out/
/dist/
.next/
out/
dist/
# IDE/Editor specific
.vscode/
@@ -38,6 +37,3 @@ Thumbs.db
# Db
db.sqlite
/searxng
# Dev
docker-compose-dev.yaml

View File

@@ -6,7 +6,6 @@ const config = {
endOfLine: 'auto',
singleQuote: true,
tabWidth: 2,
semi: true,
};
module.exports = config;

View File

@@ -1,5 +1,21 @@
# 🚀 Perplexica - An AI-powered search engine 🔎 <!-- omit in toc -->
<div align="center" markdown="1">
<sup>Special thanks to:</sup>
<br>
<br>
<a href="https://www.warp.dev/perplexica">
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/775dd593-9b5f-40f1-bf48-479faff4c27b">
</a>
### [Warp, the AI Devtool that lives in your terminal](https://www.warp.dev/perplexica)
[Available for MacOS, Linux, & Windows](https://www.warp.dev/perplexica)
</div>
<hr/>
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?)

View File

@@ -1,15 +1,27 @@
FROM node:20.18.0-alpine
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
FROM node:20.18.0-alpine AS builder
WORKDIR /home/perplexica
COPY ui /home/perplexica/
COPY package.json yarn.lock ./
RUN yarn install --frozen-lockfile
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
RUN mkdir -p /home/perplexica/data
RUN yarn build
CMD ["yarn", "start"]
FROM node:20.18.0-alpine
WORKDIR /home/perplexica
COPY --from=builder /home/perplexica/public ./public
COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data
RUN mkdir /home/perplexica/uploads
CMD ["node", "server.js"]

View File

@@ -1,17 +0,0 @@
FROM node:18-slim
WORKDIR /home/perplexica
COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/
COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn build
CMD ["yarn", "start"]

View File

@@ -4,46 +4,26 @@ services:
volumes:
- ./searxng:/etc/searxng:rw
ports:
- '4000:8080'
- 4000:8080
networks:
- perplexica-network
restart: unless-stopped
perplexica-backend:
app:
image: itzcrazykns1337/perplexica:main
build:
context: .
dockerfile: backend.dockerfile
image: itzcrazykns1337/perplexica-backend:main
dockerfile: app.dockerfile
environment:
- SEARXNG_API_URL=http://searxng:8080
depends_on:
- searxng
ports:
- '3001:3001'
- 3000:3000
networks:
- perplexica-network
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- perplexica-network
restart: unless-stopped
perplexica-frontend:
build:
context: .
dockerfile: app.dockerfile
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
ports:
- '3000:3000'
networks:
- perplexica-network
restart: unless-stopped
networks:

View File

@@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
export default defineConfig({
dialect: 'sqlite',
schema: './src/db/schema.ts',
schema: './src/lib/db/schema.ts',
out: './drizzle',
dbCredentials: {
url: './data/db.sqlite',

5
next-env.d.ts vendored Normal file
View File

@@ -0,0 +1,5 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@@ -1,5 +1,6 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
output: 'standalone',
images: {
remotePatterns: [
{
@@ -7,6 +8,7 @@ const nextConfig = {
},
],
},
serverExternalPackages: ['pdf-parse'],
};
export default nextConfig;

View File

@@ -1,53 +1,62 @@
{
"name": "perplexica-backend",
"name": "perplexica-frontend",
"version": "1.10.0-rc3",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
"start": "npm run db:push && node dist/app.js",
"build": "tsc",
"dev": "nodemon --ignore uploads/ src/app.ts ",
"db:push": "drizzle-kit push sqlite",
"format": "prettier . --check",
"format:write": "prettier . --write"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.10",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/html-to-text": "^9.0.4",
"@types/multer": "^1.4.12",
"@types/pdf-parse": "^1.1.4",
"@types/readable-stream": "^4.0.11",
"@types/ws": "^8.5.12",
"drizzle-kit": "^0.22.7",
"nodemon": "^3.1.0",
"prettier": "^3.2.5",
"ts-node": "^10.9.2",
"typescript": "^5.4.3"
"dev": "next dev",
"build": "npm run db:push && next build",
"start": "next start",
"lint": "next lint",
"format:write": "prettier . --write",
"db:push": "drizzle-kit push"
},
"dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3",
"@langchain/community": "^0.2.16",
"@langchain/google-genai": "^0.0.23",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/openai": "^0.0.25",
"@xenova/transformers": "^2.17.1",
"axios": "^1.6.8",
"better-sqlite3": "^11.0.0",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
"compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0",
"cors": "^2.8.5",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.31.2",
"express": "^4.19.2",
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"langchain": "^0.1.30",
"mammoth": "^1.8.0",
"multer": "^1.4.5-lts.1",
"lucide-react": "^0.363.0",
"markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"pdf-parse": "^1.1.1",
"winston": "^3.13.0",
"ws": "^8.17.1",
"react": "^18",
"react-dom": "^18",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/html-to-text": "^9.0.4",
"@types/node": "^20",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"drizzle-kit": "^0.30.5",
"eslint": "^8",
"eslint-config-next": "14.1.4",
"postcss": "^8",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"typescript": "^5"
}
}

View File

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

Before

Width:  |  Height:  |  Size: 629 B

After

Width:  |  Height:  |  Size: 629 B

View File

@@ -3,12 +3,6 @@ PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
[SEARCH_ENGINE_BACKENDS] # "google" | "searxng" | "bing" | "brave" | "yacy"
SEARCH = "searxng"
IMAGE = "searxng"
VIDEO = "searxng"
NEWS = "searxng"
[MODELS.OPENAI]
API_KEY = ""
@@ -24,22 +18,10 @@ API_KEY = ""
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
API_URL = ""
MODEL_NAME = ""
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[SEARCH_ENGINES.GOOGLE]
API_KEY = ""
CSE_ID = ""
[SEARCH_ENGINES.SEARXNG]
ENDPOINT = ""
[SEARCH_ENGINES.BING]
SUBSCRIPTION_KEY = ""
[SEARCH_ENGINES.BRAVE]
API_KEY = ""
[SEARCH_ENGINES.YACY]
ENDPOINT = ""
[API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@@ -15,5 +15,3 @@ server:
engines:
- name: wolframalpha
disabled: false
- name: qwant
disabled: true

View File

@@ -1,38 +0,0 @@
import { startWebSocketServer } from './websocket';
import express from 'express';
import cors from 'cors';
import http from 'http';
import routes from './routes';
import { getPort } from './config';
import logger from './utils/logger';
const port = getPort();
const app = express();
const server = http.createServer(app);
const corsOptions = {
origin: '*',
};
app.use(cors(corsOptions));
app.use(express.json());
app.use('/api', routes);
app.get('/api', (_, res) => {
res.status(200).json({ status: 'ok' });
});
server.listen(port, () => {
logger.info(`Server is running on port ${port}`);
});
startWebSocketServer(server);
process.on('uncaughtException', (err, origin) => {
logger.error(`Uncaught Exception at ${origin}: ${err}`);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
});

360
src/app/api/chat/route.ts Normal file
View File

@@ -0,0 +1,360 @@
import prompts from '@/lib/prompts';
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream';
import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import db from '@/lib/db';
import { chats, messages as messagesSchema } from '@/lib/db/schema';
import { and, eq, gt } from 'drizzle-orm';
import { getFileDetails } from '@/lib/utils/files';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
const searchHandlers: Record<string, MetaSearchAgent> = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};
type Message = {
messageId: string;
chatId: string;
content: string;
};
type ChatModel = {
provider: string;
name: string;
};
type EmbeddingModel = {
provider: string;
name: string;
};
type Body = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
};
const handleEmitterEvents = async (
stream: EventEmitter,
writer: WritableStreamDefaultWriter,
encoder: TextEncoder,
aiMessageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources: any[] = [];
stream.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'sources',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
sources = parsedData.data;
}
});
stream.on('end', () => {
writer.write(
encoder.encode(
JSON.stringify({
type: 'messageEnd',
messageId: aiMessageId,
}) + '\n',
),
);
writer.close();
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
})
.execute();
});
stream.on('error', (data) => {
const parsedData = JSON.parse(data);
writer.write(
encoder.encode(
JSON.stringify({
type: 'error',
data: parsedData.data,
}),
),
);
writer.close();
});
};
const handleHistorySave = async (
message: Message,
humanMessageId: string,
focusMode: string,
files: string[],
) => {
const chat = await db.query.chats.findFirst({
where: eq(chats.id, message.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: message.chatId,
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: message.content,
chatId: message.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gt(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, message.chatId),
),
)
.execute();
}
};
export const POST = async (req: Request) => {
try {
const body = (await req.json()) as Body;
const { message } = body;
if (message.content === '') {
return Response.json(
{
message: 'Please provide a message to process',
},
{ status: 400 },
);
}
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.name || Object.keys(chatModelProvider)[0]
];
const embeddingProvider =
embeddingModelProviders[
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
];
const embeddingModel =
embeddingProvider[
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
];
let llm: BaseChatModel | undefined;
let embedding = embeddingModel.model;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
if (!embedding) {
return Response.json(
{ error: 'Invalid embedding model' },
{ status: 400 },
);
}
const humanMessageId =
message.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const handler = searchHandlers[body.focusMode];
if (!handler) {
return Response.json(
{
message: 'Invalid focus mode',
},
{ status: 400 },
);
}
const stream = await handler.searchAndAnswer(
message.content,
history,
llm,
embedding,
body.optimizationMode,
body.files,
);
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId);
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
return new Response(responseStream.readable, {
headers: {
'Content-Type': 'text/event-stream',
Connection: 'keep-alive',
'Cache-Control': 'no-cache, no-transform',
},
});
} catch (err) {
console.error('An error ocurred while processing chat request:', err);
return Response.json(
{ message: 'An error ocurred while processing chat request' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,69 @@
import db from '@/lib/db';
import { chats, messages } from '@/lib/db/schema';
import { eq } from 'drizzle-orm';
export const GET = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, id),
});
return Response.json(
{
chat: chatExists,
messages: chatMessages,
},
{ status: 200 },
);
} catch (err) {
console.error('Error in getting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};
export const DELETE = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
await db.delete(chats).where(eq(chats.id, id)).execute();
await db.delete(messages).where(eq(messages.chatId, id)).execute();
return Response.json(
{ message: 'Chat deleted successfully' },
{ status: 200 },
);
} catch (err) {
console.error('Error in deleting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,15 @@
import db from '@/lib/db';
export const GET = async (req: Request) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return Response.json({ chats: chats }, { status: 200 });
} catch (err) {
console.error('Error in getting chats: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

109
src/app/api/config/route.ts Normal file
View File

@@ -0,0 +1,109 @@
import {
getAnthropicApiKey,
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
getGeminiApiKey,
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
updateConfig,
} from '@/lib/config';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const config: Record<string, any> = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
config['chatModelProviders'] = {};
config['embeddingModelProviders'] = {};
for (const provider in chatModelProviders) {
config['chatModelProviders'][provider] = Object.keys(
chatModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: chatModelProviders[provider][model].displayName,
};
});
}
for (const provider in embeddingModelProviders) {
config['embeddingModelProviders'][provider] = Object.keys(
embeddingModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: embeddingModelProviders[provider][model].displayName,
};
});
}
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
return Response.json({ ...config }, { status: 200 });
} catch (err) {
console.error('An error ocurred while getting config:', err);
return Response.json(
{ message: 'An error ocurred while getting config' },
{ status: 500 },
);
}
};
export const POST = async (req: Request) => {
try {
const config = await req.json();
const updatedConfig = {
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
};
updateConfig(updatedConfig);
return Response.json({ message: 'Config updated' }, { status: 200 });
} catch (err) {
console.error('An error ocurred while updating config:', err);
return Response.json(
{ message: 'An error ocurred while updating config' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,61 @@
import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
export const GET = async (req: Request) => {
try {
const data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
).results;
}),
])
)
.map((result) => result)
.flat()
.sort(() => Math.random() - 0.5);
return Response.json(
{
blogs: data,
},
{
status: 200,
},
);
} catch (err) {
console.error(`An error ocurred in discover route: ${err}`);
return Response.json(
{
message: 'An error has occurred',
},
{
status: 500,
},
);
}
};

View File

@@ -0,0 +1,83 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const images = await handleImageSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ images }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching images: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching images' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,47 @@
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete (chatModelProviders[provider][model] as { model?: unknown })
.model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete (embeddingModelProviders[provider][model] as { model?: unknown })
.model;
});
});
return Response.json(
{
chatModelProviders,
embeddingModelProviders,
},
{
status: 200,
},
);
} catch (err) {
console.error('An error ocurred while fetching models', err);
return Response.json(
{
message: 'An error has occurred.',
},
{
status: 500,
},
);
}
};

View File

@@ -0,0 +1,81 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsGenerationBody {
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
},
llm,
);
return Response.json({ suggestions }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while generating suggestions: ${err}`);
return Response.json(
{ message: 'An error ocurred while generating suggestions' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,134 @@
import { NextResponse } from 'next/server';
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import { getAvailableEmbeddingModelProviders } from '@/lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
interface FileRes {
fileName: string;
fileExtension: string;
fileId: string;
}
const uploadDir = path.join(process.cwd(), 'uploads');
if (!fs.existsSync(uploadDir)) {
fs.mkdirSync(uploadDir, { recursive: true });
}
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
export async function POST(req: Request) {
try {
const formData = await req.formData();
const files = formData.getAll('files') as File[];
const embedding_model = formData.get('embedding_model');
const embedding_model_provider = formData.get('embedding_model_provider');
if (!embedding_model || !embedding_model_provider) {
return NextResponse.json(
{ message: 'Missing embedding model or provider' },
{ status: 400 },
);
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel =
embedding_model ?? Object.keys(embeddingModels[provider as string])[0];
let embeddingsModel =
embeddingModels[provider as string]?.[embeddingModel as string]?.model;
if (!embeddingsModel) {
return NextResponse.json(
{ message: 'Invalid embedding model selected' },
{ status: 400 },
);
}
const processedFiles: FileRes[] = [];
await Promise.all(
files.map(async (file: any) => {
const fileExtension = file.name.split('.').pop();
if (!['pdf', 'docx', 'txt'].includes(fileExtension!)) {
return NextResponse.json(
{ message: 'File type not supported' },
{ status: 400 },
);
}
const uniqueFileName = `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`;
const filePath = path.join(uploadDir, uniqueFileName);
const buffer = Buffer.from(await file.arrayBuffer());
fs.writeFileSync(filePath, new Uint8Array(buffer));
let docs: any[] = [];
if (fileExtension === 'pdf') {
const loader = new PDFLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'docx') {
const loader = new DocxLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'txt') {
const text = fs.readFileSync(filePath, 'utf-8');
docs = [
new Document({ pageContent: text, metadata: { title: file.name } }),
];
}
const splitted = await splitter.splitDocuments(docs);
const extractedDataPath = filePath.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(
extractedDataPath,
JSON.stringify({
title: file.name,
contents: splitted.map((doc) => doc.pageContent),
}),
);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsDataPath = filePath.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(
embeddingsDataPath,
JSON.stringify({
title: file.name,
embeddings,
}),
);
processedFiles.push({
fileName: file.name,
fileExtension: fileExtension,
fileId: uniqueFileName.replace(/\.\w+$/, ''),
});
}),
);
return NextResponse.json({
files: processedFiles,
});
} catch (error) {
console.error('Error uploading file:', error);
return NextResponse.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
}

View File

@@ -0,0 +1,83 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ videos }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching videos: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching videos' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,9 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
};
export default Page;

View File

@@ -19,7 +19,7 @@ const Page = () => {
useEffect(() => {
const fetchData = async () => {
try {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, {
const res = await fetch(`/api/discover`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

@@ -21,7 +21,7 @@ const Page = () => {
const fetchChats = async () => {
setLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats`, {
const res = await fetch(`/api/chats`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',

View File

@@ -23,18 +23,6 @@ interface SettingsType {
customOpenaiApiKey: string;
customOpenaiApiUrl: string;
customOpenaiModelName: string;
searchEngineBackends: {
search: string;
image: string;
video: string;
news: string;
};
searxngEndpoint: string;
googleApiKey: string;
googleCseId: string;
bingSubscriptionKey: string;
braveApiKey: string;
yacyEndpoint: string;
}
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
@@ -124,17 +112,11 @@ const Page = () => {
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
const [searchEngineBackends, setSearchEngineBackends] = useState({
search: '',
image: '',
video: '',
news: '',
});
useEffect(() => {
const fetchConfig = async () => {
setIsLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
const res = await fetch(`/api/config`, {
headers: {
'Content-Type': 'application/json',
},
@@ -143,16 +125,6 @@ const Page = () => {
const data = (await res.json()) as SettingsType;
setConfig(data);
// Set search engine backends if they exist in the response
if (data.searchEngineBackends) {
setSearchEngineBackends({
search: data.searchEngineBackends.search || '',
image: data.searchEngineBackends.image || '',
video: data.searchEngineBackends.video || '',
news: data.searchEngineBackends.news || '',
});
}
const chatModelProvidersKeys = Object.keys(data.chatModelProviders || {});
const embeddingModelProvidersKeys = Object.keys(
data.embeddingModelProviders || {},
@@ -215,16 +187,13 @@ const Page = () => {
[key]: value,
} as SettingsType;
const response = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/config`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(updatedConfig),
const response = await fetch(`/api/config`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify(updatedConfig),
});
if (!response.ok) {
throw new Error('Failed to update config');
@@ -236,7 +205,7 @@ const Page = () => {
key.toLowerCase().includes('api') ||
key.toLowerCase().includes('url')
) {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, {
const res = await fetch(`/api/config`, {
headers: {
'Content-Type': 'application/json',
},
@@ -359,8 +328,6 @@ const Page = () => {
localStorage.setItem('embeddingModelProvider', value);
} else if (key === 'embeddingModel') {
localStorage.setItem('embeddingModel', value);
} else if (key === 'searchEngineBackends') {
localStorage.setItem('searchEngineBackends', value);
}
} catch (err) {
console.error('Failed to save:', err);
@@ -823,234 +790,6 @@ const Page = () => {
</div>
</div>
</SettingsSection>
<SettingsSection title="Search Engine Settings">
<div className="flex flex-col space-y-4">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Default Search Engine
</p>
<Select
value={searchEngineBackends.search}
onChange={(e) => {
const value = e.target.value;
setSearchEngineBackends((prev) => ({
...prev,
search: value,
}));
saveConfig('searchEngineBackends', {
...searchEngineBackends,
search: value,
});
}}
options={[
{ value: 'searxng', label: 'SearXNG' },
{ value: 'google', label: 'Google' },
{ value: 'bing', label: 'Bing' },
{ value: 'brave', label: 'Brave' },
{ value: 'yacy', label: 'YaCy' },
]}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Image Search Engine
</p>
<Select
value={searchEngineBackends.image}
onChange={(e) => {
const value = e.target.value;
setSearchEngineBackends((prev) => ({
...prev,
image: value,
}));
saveConfig('searchEngineBackends', {
...searchEngineBackends,
image: value,
});
}}
options={[
{ value: '', label: 'Use Default Search Engine' },
{ value: 'searxng', label: 'SearXNG' },
{ value: 'google', label: 'Google' },
{ value: 'bing', label: 'Bing' },
{ value: 'brave', label: 'Brave' },
]}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Video Search Engine
</p>
<Select
value={searchEngineBackends.video}
onChange={(e) => {
const value = e.target.value;
setSearchEngineBackends((prev) => ({
...prev,
video: value,
}));
saveConfig('searchEngineBackends', {
...searchEngineBackends,
video: value,
});
}}
options={[
{ value: '', label: 'Use Default Search Engine' },
{ value: 'searxng', label: 'SearXNG' },
{ value: 'google', label: 'Google' },
{ value: 'bing', label: 'Bing' },
{ value: 'brave', label: 'Brave' },
]}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
News Search Engine
</p>
<Select
value={searchEngineBackends.news}
onChange={(e) => {
const value = e.target.value;
setSearchEngineBackends((prev) => ({
...prev,
news: value,
}));
saveConfig('searchEngineBackends', {
...searchEngineBackends,
news: value,
});
}}
options={[
{ value: '', label: 'Use Default Search Engine' },
{ value: 'searxng', label: 'SearXNG' },
{ value: 'google', label: 'Google' },
{ value: 'bing', label: 'Bing' },
{ value: 'brave', label: 'Brave' },
]}
/>
</div>
<div className="pt-4 border-t border-light-200 dark:border-dark-200">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
SearXNG Endpoint
</p>
<Input
type="text"
placeholder="SearXNG API Endpoint"
value={config.searxngEndpoint || ''}
isSaving={savingStates['searxngEndpoint']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
searxngEndpoint: e.target.value,
}));
}}
onSave={(value) => saveConfig('searxngEndpoint', value)}
/>
</div>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Google API Key
</p>
<Input
type="text"
placeholder="Google API Key"
value={config.googleApiKey || ''}
isSaving={savingStates['googleApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
googleApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('googleApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Google CSE ID
</p>
<Input
type="text"
placeholder="Google Custom Search Engine ID"
value={config.googleCseId || ''}
isSaving={savingStates['googleCseId']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
googleCseId: e.target.value,
}));
}}
onSave={(value) => saveConfig('googleCseId', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Bing Subscription Key
</p>
<Input
type="text"
placeholder="Bing Subscription Key"
value={config.bingSubscriptionKey || ''}
isSaving={savingStates['bingSubscriptionKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
bingSubscriptionKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('bingSubscriptionKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Brave API Key
</p>
<Input
type="text"
placeholder="Brave API Key"
value={config.braveApiKey || ''}
isSaving={savingStates['braveApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
braveApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('braveApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
YaCy Endpoint
</p>
<Input
type="text"
placeholder="YaCy API Endpoint"
value={config.yacyEndpoint || ''}
isSaving={savingStates['yacyEndpoint']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
yacyEndpoint: e.target.value,
}));
}}
onSave={(value) => saveConfig('yacyEndpoint', value)}
/>
</div>
</div>
</SettingsSection>
</div>
)
)}

View File

@@ -1,171 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchYaCy } from '../lib/searchEngines/yacy';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { getImageSearchEngineBackend } from '../config';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type ImageSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
async function performImageSearch(query: string) {
const searchEngine = getImageSearchEngineBackend();
let images = [];
switch (searchEngine) {
case 'google': {
const googleResult = await searchGooglePSE(query);
images = googleResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.displayLink,
};
}
})
.filter(Boolean);
break;
}
case 'searxng': {
const searxResult = await searchSearxng(query, {
engines: ['google images', 'bing images'],
pageno: 1,
});
searxResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
break;
}
case 'brave': {
const braveResult = await searchBraveAPI(query);
images = braveResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
case 'yacy': {
const yacyResult = await searchYaCy(query);
images = yacyResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
case 'bing': {
const bingResult = await searchBingAPI(query);
images = bingResult.results
.map((result) => {
if (result.img_src && result.url && result.title) {
return {
img_src: result.img_src,
url: result.url,
title: result.title,
source: result.url,
};
}
})
.filter(Boolean);
break;
}
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
return images;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: ImageSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: ImageSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const images = await performImageSearch(input);
return images.slice(0, 10);
}),
]);
};
const handleImageSearch = (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const imageSearchChain = createImageSearchChain(llm);
return imageSearchChain.invoke(input);
};
export default handleImageSearch;

View File

@@ -1,169 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { getVideoSearchEngineBackend } from '../config';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
const strParser = new StringOutputParser();
async function performVideoSearch(query: string) {
const searchEngine = getVideoSearchEngineBackend();
const youtubeQuery = `${query} site:youtube.com`;
let videos = [];
switch (searchEngine) {
case 'google': {
const googleResult = await searchGooglePSE(youtubeQuery);
googleResult.results.forEach((result) => {
// Use .results instead of .originalres
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
case 'searxng': {
const searxResult = await searchSearxng(query, {
engines: ['youtube'],
});
searxResult.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
break;
}
case 'brave': {
const braveResult = await searchBraveAPI(youtubeQuery);
braveResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
case 'yacy': {
console.log('Not available for yacy');
videos = [];
break;
}
case 'bing': {
const bingResult = await searchBingAPI(youtubeQuery);
bingResult.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
const videoId = new URL(result.url).searchParams.get('v');
videos.push({
img_src: result.img_src,
url: result.url,
title: result.title,
iframe_src: videoId
? `https://www.youtube.com/embed/${videoId}`
: null,
});
}
});
break;
}
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
return videos;
}
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: VideoSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const videos = await performVideoSearch(input);
return videos.slice(0, 10);
}),
]);
};
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@@ -48,11 +48,17 @@ const Chat = ({
});
useEffect(() => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
};
if (messages.length === 1) {
document.title = `${messages[0].content.substring(0, 30)} - Perplexica`;
}
if (messages[messages.length - 1]?.role == 'user') {
scroll();
}
}, [messages]);
return (

View File

@@ -29,280 +29,154 @@ export interface File {
fileId: string;
}
const useSocket = (
url: string,
setIsWSReady: (ready: boolean) => void,
setError: (error: boolean) => void,
interface ChatModelProvider {
name: string;
provider: string;
}
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => {
const wsRef = useRef<WebSocket | null>(null);
const reconnectTimeoutRef = useRef<NodeJS.Timeout>();
const retryCountRef = useRef(0);
const isCleaningUpRef = useRef(false);
const MAX_RETRIES = 3;
const INITIAL_BACKOFF = 1000; // 1 second
const isConnectionErrorRef = useRef(false);
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const getBackoffDelay = (retryCount: number) => {
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000); // Cap at 10 seconds
};
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
useEffect(() => {
const connectWs = async () => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
}
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem(
'embeddingModelProvider',
);
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/models`,
{
headers: {
'Content-Type': 'application/json',
},
},
).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return toast.error('No chat models available');
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
const wsURL = new URL(url);
const searchParams = new URLSearchParams({});
searchParams.append('chatModel', chatModel!);
searchParams.append('chatModelProvider', chatModelProvider);
if (chatModelProvider === 'custom_openai') {
searchParams.append(
'openAIApiKey',
localStorage.getItem('openAIApiKey')!,
);
searchParams.append(
'openAIBaseURL',
localStorage.getItem('openAIBaseURL')!,
);
}
searchParams.append('embeddingModel', embeddingModel!);
searchParams.append('embeddingModelProvider', embeddingModelProvider);
wsURL.search = searchParams.toString();
const ws = new WebSocket(wsURL.toString());
wsRef.current = ws;
const timeoutId = setTimeout(() => {
if (ws.readyState !== 1) {
toast.error(
'Failed to connect to the server. Please try again later.',
);
}
}, 10000);
ws.addEventListener('message', (e) => {
const data = JSON.parse(e.data);
if (data.type === 'signal' && data.data === 'open') {
const interval = setInterval(() => {
if (ws.readyState === 1) {
setIsWSReady(true);
setError(false);
if (retryCountRef.current > 0) {
toast.success('Connection restored.');
}
retryCountRef.current = 0;
clearInterval(interval);
}
}, 5);
clearTimeout(timeoutId);
console.debug(new Date(), 'ws:connected');
}
if (data.type === 'error') {
isConnectionErrorRef.current = true;
setError(true);
toast.error(data.data);
}
});
ws.onerror = () => {
clearTimeout(timeoutId);
setIsWSReady(false);
toast.error('WebSocket connection error.');
};
ws.onclose = () => {
clearTimeout(timeoutId);
setIsWSReady(false);
console.debug(new Date(), 'ws:disconnected');
if (!isCleaningUpRef.current && !isConnectionErrorRef.current) {
toast.error('Connection lost. Attempting to reconnect...');
attemptReconnect();
}
};
} catch (error) {
console.debug(new Date(), 'ws:error', error);
setIsWSReady(false);
attemptReconnect();
}
};
const attemptReconnect = () => {
retryCountRef.current += 1;
if (retryCountRef.current > MAX_RETRIES) {
console.debug(new Date(), 'ws:max_retries');
setError(true);
toast.error(
'Unable to connect to server after multiple attempts. Please refresh the page to try again.',
);
return;
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
const backoffDelay = getBackoffDelay(retryCountRef.current);
console.debug(
new Date(),
`ws:retry attempt=${retryCountRef.current}/${MAX_RETRIES} delay=${backoffDelay}ms`,
);
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
reconnectTimeoutRef.current = setTimeout(() => {
connectWs();
}, backoffDelay);
};
connectWs();
return () => {
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current);
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
isCleaningUpRef.current = true;
console.debug(new Date(), 'ws:cleanup');
}
};
}, [url, setIsWSReady, setError]);
return wsRef.current;
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
};
const loadMessages = async (
@@ -315,15 +189,12 @@ const loadMessages = async (
setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void,
) => {
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
const res = await fetch(`/api/chats/${chatId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
);
});
if (res.status === 404) {
setNotFound(true);
@@ -373,15 +244,32 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false);
const [isWSReady, setIsWSReady] = useState(false);
const ws = useSocket(
process.env.NEXT_PUBLIC_WS_URL!,
setIsWSReady,
setHasError,
);
useEffect(() => {
checkConfig(
setChatModelProvider,
setEmbeddingModelProvider,
setIsConfigReady,
setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false);
@@ -399,8 +287,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [notFound, setNotFound] = useState(false);
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
useEffect(() => {
if (
chatId &&
@@ -426,16 +312,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
return () => {
if (ws?.readyState === 1) {
ws.close();
console.debug(new Date(), 'ws:cleanup');
}
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const messagesRef = useRef<Message[]>([]);
useEffect(() => {
@@ -443,18 +319,18 @@ const ChatWindow = ({ id }: { id?: string }) => {
}, [messages]);
useEffect(() => {
if (isMessagesLoaded && isWSReady) {
if (isMessagesLoaded && isConfigReady) {
setIsReady(true);
console.debug(new Date(), 'app:ready');
} else {
setIsReady(false);
}
}, [isMessagesLoaded, isWSReady]);
}, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => {
if (loading) return;
if (!ws || ws.readyState !== WebSocket.OPEN) {
toast.error('Cannot send message while disconnected');
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
return;
}
@@ -467,21 +343,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
ws.send(
JSON.stringify({
type: 'message',
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: [...chatHistory, ['human', message]],
}),
);
setMessages((prevMessages) => [
...prevMessages,
{
@@ -493,9 +354,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
},
]);
const messageHandler = async (e: MessageEvent) => {
const data = JSON.parse(e.data);
const messageHandler = async (data: any) => {
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
@@ -558,11 +417,25 @@ const ChatWindow = ({ id }: { id?: string }) => {
['assistant', recievedMessage],
]);
ws?.removeEventListener('message', messageHandler);
setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
}
if (
lastMsg.role === 'assistant' &&
lastMsg.sources &&
@@ -579,21 +452,62 @@ const ChatWindow = ({ id }: { id?: string }) => {
}),
);
}
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document.getElementById('search-images')?.click();
}
if (autoVideoSearch === 'true') {
document.getElementById('search-videos')?.click();
}
}
};
ws?.addEventListener('message', messageHandler);
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
};
const rewrite = (messageId: string) => {
@@ -614,11 +528,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
};
useEffect(() => {
if (isReady && initialMessage && ws?.readyState === 1) {
if (isReady && initialMessage && isConfigReady) {
sendMessage(initialMessage);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [ws?.readyState, isReady, initialMessage, isWSReady]);
}, [isConfigReady, isReady, initialMessage]);
if (hasError) {
return (

View File

@@ -29,15 +29,12 @@ const DeleteChat = ({
const handleDelete = async () => {
setLoading(true);
try {
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`,
{
method: 'DELETE',
headers: {
'Content-Type': 'application/json',
},
const res = await fetch(`/api/chats/${chatId}`, {
method: 'DELETE',
headers: {
'Content-Type': 'application/json',
},
);
});
if (res.status != 200) {
throw new Error('Failed to delete chat');

View File

@@ -193,10 +193,12 @@ const MessageBox = ({
<SearchImages
query={history[messageIndex - 1].content}
chatHistory={history.slice(0, messageIndex - 1)}
messageId={message.messageId}
/>
<SearchVideos
chatHistory={history.slice(0, messageIndex - 1)}
query={history[messageIndex - 1].content}
messageId={message.messageId}
/>
</div>
</div>

View File

@@ -41,7 +41,7 @@ const Attach = ({
data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
const res = await fetch(`/api/uploads`, {
method: 'POST',
body: data,
});

View File

@@ -39,7 +39,7 @@ const AttachSmall = ({
data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, {
const res = await fetch(`/api/uploads`, {
method: 'POST',
body: data,
});

View File

@@ -45,25 +45,13 @@ const focusModes = [
key: 'youtubeSearch',
title: 'Youtube',
description: 'Search and watch videos',
icon: (
<SiYoutube
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
icon: <SiYoutube className="h-5 w-auto mr-0.5" />,
},
{
key: 'redditSearch',
title: 'Reddit',
description: 'Search for discussions and opinions',
icon: (
<SiReddit
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
icon: <SiReddit className="h-5 w-auto mr-0.5" />,
},
];

View File

@@ -69,11 +69,15 @@ const MessageSources = ({ sources }: { sources: Document[] }) => {
<div className="flex flex-row items-center space-x-1">
{sources.slice(3, 6).map((source, i) => {
return source.metadata.url === 'File' ? (
<div className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full">
<div
key={i}
className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full"
>
<File size={12} className="text-white/70" />
</div>
) : (
<img
key={i}
src={`https://s2.googleusercontent.com/s2/favicons?domain_url=${source.metadata.url}`}
width={16}
height={16}

View File

@@ -14,9 +14,11 @@ type Image = {
const SearchImages = ({
query,
chatHistory,
messageId,
}: {
query: string;
chatHistory: Message[];
messageId: string;
}) => {
const [images, setImages] = useState<Image[] | null>(null);
const [loading, setLoading] = useState(false);
@@ -27,7 +29,7 @@ const SearchImages = ({
<>
{!loading && images === null && (
<button
id="search-images"
id={`search-images-${messageId}`}
onClick={async () => {
setLoading(true);
@@ -37,27 +39,24 @@ const SearchImages = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/images`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
const res = await fetch(`/api/images`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
});
const data = await res.json();

View File

@@ -27,9 +27,11 @@ declare module 'yet-another-react-lightbox' {
const Searchvideos = ({
query,
chatHistory,
messageId,
}: {
query: string;
chatHistory: Message[];
messageId: string;
}) => {
const [videos, setVideos] = useState<Video[] | null>(null);
const [loading, setLoading] = useState(false);
@@ -42,7 +44,7 @@ const Searchvideos = ({
<>
{!loading && videos === null && (
<button
id="search-videos"
id={`search-videos-${messageId}`}
onClick={async () => {
setLoading(true);
@@ -52,27 +54,24 @@ const Searchvideos = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
const res = await fetch(`/api/videos`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
);
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
});
const data = await res.json();

View File

@@ -7,7 +7,7 @@ export const getSuggestions = async (chatHisory: Message[]) => {
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/suggestions`, {
const res = await fetch(`/api/suggestions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',

View File

@@ -0,0 +1,90 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type ImageSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: ImageSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: ImageSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images: ImageSearchResult[] = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
return images.slice(0, 10);
}),
]);
};
const handleImageSearch = (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const imageSearchChain = createImageSearchChain(llm);
return imageSearchChain.invoke(input);
};
export default handleImageSearch;

View File

@@ -1,5 +1,5 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser';
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';

View File

@@ -0,0 +1,97 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: VideoSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos: VideoSearchResult[] = [];
res.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
}),
]);
};
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@@ -10,12 +10,6 @@ interface Config {
SIMILARITY_MEASURE: string;
KEEP_ALIVE: string;
};
SEARCH_ENGINE_BACKENDS: {
SEARCH: string;
IMAGE: string;
VIDEO: string;
NEWS: string;
};
MODELS: {
OPENAI: {
API_KEY: string;
@@ -38,23 +32,8 @@ interface Config {
MODEL_NAME: string;
};
};
SEARCH_ENGINES: {
GOOGLE: {
API_KEY: string;
CSE_ID: string;
};
SEARXNG: {
ENDPOINT: string;
};
BING: {
SUBSCRIPTION_KEY: string;
};
BRAVE: {
API_KEY: string;
};
YACY: {
ENDPOINT: string;
};
API_ENDPOINTS: {
SEARXNG: string;
};
}
@@ -64,7 +43,7 @@ type RecursivePartial<T> = {
const loadConfig = () =>
toml.parse(
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
export const getPort = () => loadConfig().GENERAL.PORT;
@@ -82,32 +61,8 @@ export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.SEARCH;
export const getImageSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.IMAGE || getSearchEngineBackend();
export const getVideoSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.VIDEO || getSearchEngineBackend();
export const getNewsSearchEngineBackend = () =>
loadConfig().SEARCH_ENGINE_BACKENDS.NEWS || getSearchEngineBackend();
export const getGoogleApiKey = () => loadConfig().SEARCH_ENGINES.GOOGLE.API_KEY;
export const getGoogleCseId = () => loadConfig().SEARCH_ENGINES.GOOGLE.CSE_ID;
export const getBraveApiKey = () => loadConfig().SEARCH_ENGINES.BRAVE.API_KEY;
export const getBingSubscriptionKey = () =>
loadConfig().SEARCH_ENGINES.BING.SUBSCRIPTION_KEY;
export const getYacyJsonEndpoint = () =>
loadConfig().SEARCH_ENGINES.YACY.ENDPOINT;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().SEARCH_ENGINES.SEARXNG.ENDPOINT;
loadConfig().API_ENDPOINTS.SEARXNG || process.env.SEARXNG_API_URL;
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
@@ -154,9 +109,8 @@ const mergeConfigs = (current: any, update: any): any => {
export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(__dirname, `../${configFileName}`),
path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig),
);
};

View File

@@ -1,8 +1,9 @@
import { drizzle } from 'drizzle-orm/better-sqlite3';
import Database from 'better-sqlite3';
import * as schema from './schema';
import path from 'path';
const sqlite = new Database('data/db.sqlite');
const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite'));
const db = drizzle(sqlite, {
schema: schema,
});

View File

@@ -1,82 +0,0 @@
import { Embeddings, type EmbeddingsParams } from '@langchain/core/embeddings';
import { chunkArray } from '@langchain/core/utils/chunk_array';
export interface HuggingFaceTransformersEmbeddingsParams
extends EmbeddingsParams {
modelName: string;
model: string;
timeout?: number;
batchSize?: number;
stripNewLines?: boolean;
}
export class HuggingFaceTransformersEmbeddings
extends Embeddings
implements HuggingFaceTransformersEmbeddingsParams
{
modelName = 'Xenova/all-MiniLM-L6-v2';
model = 'Xenova/all-MiniLM-L6-v2';
batchSize = 512;
stripNewLines = true;
timeout?: number;
private pipelinePromise: Promise<any>;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});
this.modelName = fields?.model ?? fields?.modelName ?? this.model;
this.model = this.modelName;
this.stripNewLines = fields?.stripNewLines ?? this.stripNewLines;
this.timeout = fields?.timeout;
}
async embedDocuments(texts: string[]): Promise<number[][]> {
const batches = chunkArray(
this.stripNewLines ? texts.map((t) => t.replace(/\n/g, ' ')) : texts,
this.batchSize,
);
const batchRequests = batches.map((batch) => this.runEmbedding(batch));
const batchResponses = await Promise.all(batchRequests);
const embeddings: number[][] = [];
for (let i = 0; i < batchResponses.length; i += 1) {
const batchResponse = batchResponses[i];
for (let j = 0; j < batchResponse.length; j += 1) {
embeddings.push(batchResponse[j]);
}
}
return embeddings;
}
async embedQuery(text: string): Promise<number[]> {
const data = await this.runEmbedding([
this.stripNewLines ? text.replace(/\n/g, ' ') : text,
]);
return data[0];
}
private async runEmbedding(texts: string[]) {
const { pipeline } = await import('@xenova/transformers');
const pipe = await (this.pipelinePromise ??= pipeline(
'feature-extraction',
this.model,
));
return this.caller.call(async () => {
const output = await pipe(texts, { pooling: 'mean', normalize: true });
return output.tolist();
});
}
}

View File

@@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
constructor(args?: LineOutputParserArgs) {
super();
this.key = args.key ?? this.key;
this.key = args?.key ?? this.key;
}
static lc_name() {

View File

@@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
constructor(args?: LineListOutputParserArgs) {
super();
this.key = args.key ?? this.key;
this.key = args?.key ?? this.key;
}
static lc_name() {

View File

@@ -1,6 +1,38 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { getAnthropicApiKey } from '../../config';
import logger from '../../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey();
@@ -8,52 +40,25 @@ export const loadAnthropicChatModels = async () => {
if (!anthropicApiKey) return {};
try {
const chatModels = {
'claude-3-5-sonnet-20241022': {
displayName: 'Claude 3.5 Sonnet',
model: new ChatAnthropic({
const chatModels: Record<string, ChatModel> = {};
anthropicChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-sonnet-20241022',
}),
},
'claude-3-5-haiku-20241022': {
displayName: 'Claude 3.5 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-haiku-20241022',
}),
},
'claude-3-opus-20240229': {
displayName: 'Claude 3 Opus',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-opus-20240229',
}),
},
'claude-3-sonnet-20240229': {
displayName: 'Claude 3 Sonnet',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-sonnet-20240229',
}),
},
'claude-3-haiku-20240307': {
displayName: 'Claude 3 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-haiku-20240307',
}),
},
};
configuration: {
baseURL: 'https://api.anthropic.com/v1/',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Anthropic models: ${err}`);
console.error(`Error loading Anthropic models: ${err}`);
return {};
}
};

View File

@@ -1,9 +1,42 @@
import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../../config';
import logger from '../../utils/logger';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Pro Experimental',
key: 'gemini-2.0-pro-exp-02-05',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Gemini Embedding',
key: 'gemini-embedding-exp',
},
];
export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey();
@@ -11,75 +44,53 @@ export const loadGeminiChatModels = async () => {
if (!geminiApiKey) return {};
try {
const chatModels = {
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash',
const chatModels: Record<string, ChatModel> = {};
geminiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: geminiApiKey,
modelName: model.key,
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-flash-8b': {
displayName: 'Gemini 1.5 Flash 8B',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash-8b',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-exp': {
displayName: 'Gemini 2.0 Flash Exp',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-exp',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-thinking-exp-01-21': {
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
};
configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Gemini models: ${err}`);
console.error(`Error loading Gemini models: ${err}`);
return {};
}
};
export const loadGeminiEmbeddingsModels = async () => {
export const loadGeminiEmbeddingModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
try {
const embeddingModels = {
'text-embedding-004': {
displayName: 'Text Embedding',
model: new GoogleGenerativeAIEmbeddings({
apiKey: geminiApiKey,
modelName: 'text-embedding-004',
}),
},
};
const embeddingModels: Record<string, EmbeddingModel> = {};
geminiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey: geminiApiKey,
modelName: model.key,
configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
}) as unknown as Embeddings,
};
});
return embeddingModels;
} catch (err) {
logger.error(`Error loading Gemini embeddings model: ${err}`);
console.error(`Error loading OpenAI embeddings models: ${err}`);
return {};
}
};

View File

@@ -1,6 +1,82 @@
import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../../config';
import logger from '../../utils/logger';
import { getGroqApiKey } from '../config';
import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B SpecDec (Preview)',
key: 'deepseek-r1-distill-llama-70b-specdec',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
];
export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey();
@@ -8,129 +84,25 @@ export const loadGroqChatModels = async () => {
if (!groqApiKey) return {};
try {
const chatModels = {
'llama-3.3-70b-versatile': {
displayName: 'Llama 3.3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.3-70b-versatile',
temperature: 0.7,
},
{
const chatModels: Record<string, ChatModel> = {};
groqChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-3b-preview': {
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-vision-preview': {
displayName: 'Llama 3.2 11B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-vision-preview': {
displayName: 'Llama 3.2 90B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-8b-instant': {
displayName: 'Llama 3.1 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.1-8b-instant',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-8b-8192': {
displayName: 'LLaMA3 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-70b-8192': {
displayName: 'LLaMA3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'mixtral-8x7b-32768': {
displayName: 'Mixtral 8x7B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'gemma2-9b-it': {
displayName: 'Gemma2 9B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma2-9b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
};
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading Groq models: ${err}`);
console.error(`Error loading Groq models: ${err}`);
return {};
}
};

View File

@@ -1,33 +1,49 @@
import { loadGroqChatModels } from './groq';
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../../config';
} from '../config';
import { ChatOpenAI } from '@langchain/openai';
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
import { loadGroqChatModels } from './groq';
import { loadAnthropicChatModels } from './anthropic';
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
const chatModelProviders = {
export interface ChatModel {
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
string,
() => Promise<Record<string, ChatModel>>
> = {
openai: loadOpenAIChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels,
groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
};
const embeddingModelProviders = {
openai: loadOpenAIEmbeddingsModels,
local: loadTransformersEmbeddingsModels,
ollama: loadOllamaEmbeddingsModels,
gemini: loadGeminiEmbeddingsModels,
export const embeddingModelProviders: Record<
string,
() => Promise<Record<string, EmbeddingModel>>
> = {
openai: loadOpenAIEmbeddingModels,
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
};
export const getAvailableChatModelProviders = async () => {
const models = {};
const models: Record<string, Record<string, ChatModel>> = {};
for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider]();
@@ -52,7 +68,7 @@ export const getAvailableChatModelProviders = async () => {
configuration: {
baseURL: customOpenAiApiUrl,
},
}),
}) as unknown as BaseChatModel,
},
}
: {}),
@@ -62,7 +78,7 @@ export const getAvailableChatModelProviders = async () => {
};
export const getAvailableEmbeddingModelProviders = async () => {
const models = {};
const models: Record<string, Record<string, EmbeddingModel>> = {};
for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider]();

View File

@@ -1,74 +1,73 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
export const loadOllamaChatModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint();
const keepAlive = getKeepAlive();
const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {};
if (!ollamaApiEndpoint) return {};
try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models: ollamaModels } = response.data;
const { models } = res.data;
const chatModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
const chatModels: Record<string, ChatModel> = {};
models.forEach((model: any) => {
chatModels[model.model] = {
displayName: model.name,
model: new ChatOllama({
baseUrl: ollamaEndpoint,
baseUrl: ollamaApiEndpoint,
model: model.model,
temperature: 0.7,
keepAlive: keepAlive,
keepAlive: getKeepAlive(),
}),
};
return acc;
}, {});
});
return chatModels;
} catch (err) {
logger.error(`Error loading Ollama models: ${err}`);
console.error(`Error loading Ollama models: ${err}`);
return {};
}
};
export const loadOllamaEmbeddingsModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint();
export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {};
if (!ollamaApiEndpoint) return {};
try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: {
'Content-Type': 'application/json',
},
});
const { models: ollamaModels } = response.data;
const { models } = res.data;
const embeddingsModels = ollamaModels.reduce((acc, model) => {
acc[model.model] = {
const embeddingModels: Record<string, EmbeddingModel> = {};
models.forEach((model: any) => {
embeddingModels[model.model] = {
displayName: model.name,
model: new OllamaEmbeddings({
baseUrl: ollamaEndpoint,
baseUrl: ollamaApiEndpoint,
model: model.model,
}),
};
});
return acc;
}, {});
return embeddingsModels;
return embeddingModels;
} catch (err) {
logger.error(`Error loading Ollama embeddings model: ${err}`);
console.error(`Error loading Ollama embeddings models: ${err}`);
return {};
}
};

View File

@@ -1,89 +1,90 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../../config';
import logger from '../../utils/logger';
import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
export const loadOpenAIChatModels = async () => {
const openAIApiKey = getOpenaiApiKey();
const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {};
if (!openaiApiKey) return {};
try {
const chatModels = {
'gpt-3.5-turbo': {
displayName: 'GPT-3.5 Turbo',
const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-3.5-turbo',
openAIApiKey: openaiApiKey,
modelName: model.key,
temperature: 0.7,
}),
},
'gpt-4': {
displayName: 'GPT-4',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
},
'gpt-4-turbo': {
displayName: 'GPT-4 turbo',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
},
'gpt-4o': {
displayName: 'GPT-4 omni',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o',
temperature: 0.7,
}),
},
'gpt-4o-mini': {
displayName: 'GPT-4 omni mini',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o-mini',
temperature: 0.7,
}),
},
};
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
logger.error(`Error loading OpenAI models: ${err}`);
console.error(`Error loading OpenAI models: ${err}`);
return {};
}
};
export const loadOpenAIEmbeddingsModels = async () => {
const openAIApiKey = getOpenaiApiKey();
export const loadOpenAIEmbeddingModels = async () => {
const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {};
if (!openaiApiKey) return {};
try {
const embeddingModels = {
'text-embedding-3-small': {
displayName: 'Text Embedding 3 Small',
const embeddingModels: Record<string, EmbeddingModel> = {};
openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-small',
}),
},
'text-embedding-3-large': {
displayName: 'Text Embedding 3 Large',
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
},
};
openAIApiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};
});
return embeddingModels;
} catch (err) {
logger.error(`Error loading OpenAI embeddings model: ${err}`);
console.error(`Error loading OpenAI embeddings models: ${err}`);
return {};
}
};

View File

@@ -1,32 +0,0 @@
import logger from '../../utils/logger';
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const loadTransformersEmbeddingsModels = async () => {
try {
const embeddingModels = {
'xenova-bge-small-en-v1.5': {
displayName: 'BGE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bge-small-en-v1.5',
}),
},
'xenova-gte-small': {
displayName: 'GTE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/gte-small',
}),
},
'xenova-bert-base-multilingual-uncased': {
displayName: 'Bert Multilingual',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bert-base-multilingual-uncased',
}),
},
};
return embeddingModels;
} catch (err) {
logger.error(`Error loading Transformers embeddings model: ${err}`);
return {};
}
};

View File

@@ -13,23 +13,17 @@ import {
} from '@langchain/core/runnables';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document';
import { searchSearxng } from '../lib/searchEngines/searxng';
import { searchGooglePSE } from '../lib/searchEngines/google_pse';
import { searchBingAPI } from '../lib/searchEngines/bing';
import { searchBraveAPI } from '../lib/searchEngines/brave';
import { searchYaCy } from '../lib/searchEngines/yacy';
import { getSearchEngineBackend } from '../config';
import path from 'path';
import fs from 'fs';
import { searchSearxng } from '../searxng';
import path from 'node:path';
import fs from 'node:fs';
import computeSimilarity from '../utils/computeSimilarity';
import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events';
import { StreamEvent } from '@langchain/core/tracers/log_stream';
import { IterableReadableStream } from '@langchain/core/utils/stream';
export interface MetaSearchAgentType {
searchAndAnswer: (
@@ -95,7 +89,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
question = 'summarize';
}
let docs = [];
let docs: Document[] = [];
const linkDocs = await getDocumentsFromLinks({ links });
@@ -137,7 +131,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
You are a web search summarizer, tasked with summarizing a piece of text retrieved from a web search. Your job is to summarize the
text into a detailed, 2-4 paragraph explanation that captures the main ideas and provides a comprehensive answer to the query.
If the query is \"summarize\", you should provide a detailed summary of the text. If the query is a specific question, you should answer it in the summary.
- **Journalistic tone**: The summary should sound professional and journalistic, not too casual or vague.
- **Thorough and detailed**: Ensure that every key point from the text is captured and that the summary directly answers the query.
- **Not too lengthy, but detailed**: The summary should be informative but not excessively long. Focus on providing detailed information in a concise format.
@@ -208,37 +202,10 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: docs };
} else {
const searchEngine = getSearchEngineBackend();
let res;
switch (searchEngine) {
case 'searxng':
res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
break;
case 'google':
res = await searchGooglePSE(question);
break;
case 'bing':
res = await searchBingAPI(question);
break;
case 'brave':
res = await searchBraveAPI(question);
break;
case 'yacy':
res = await searchYaCy(question);
break;
default:
throw new Error(`Unknown search engine ${searchEngine}`);
}
if (!res?.results) {
throw new Error(
`No results found for search engine: ${searchEngine}`,
);
}
const res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
const documents = res.results.map(
(result) =>
@@ -343,7 +310,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
const fileSimilaritySearchObject = content.contents.map(
(c: string, i) => {
(c: string, i: number) => {
return {
fileName: content.title,
content: c,
@@ -446,6 +413,8 @@ class MetaSearchAgent implements MetaSearchAgentType {
return sortedDocs;
}
return [];
}
private processDocs(docs: Document[]) {
@@ -458,7 +427,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
}
private async handleStream(
stream: IterableReadableStream<StreamEvent>,
stream: AsyncGenerator<StreamEvent, any, any>,
emitter: eventEmitter,
) {
for await (const event of stream) {

View File

@@ -1,105 +0,0 @@
import axios from 'axios';
import { getBingSubscriptionKey } from '../../config';
interface BingAPISearchResult {
_type: string;
name: string;
url: string;
displayUrl: string;
snippet?: string;
dateLastCrawled?: string;
thumbnailUrl?: string;
contentUrl?: string;
hostPageUrl?: string;
width?: number;
height?: number;
accentColor?: string;
contentSize?: string;
datePublished?: string;
encodingFormat?: string;
hostPageDisplayUrl?: string;
id?: string;
isLicensed?: boolean;
isFamilyFriendly?: boolean;
language?: string;
mediaUrl?: string;
motionThumbnailUrl?: string;
publisher?: string;
viewCount?: number;
webSearchUrl?: string;
primaryImageOfPage?: {
thumbnailUrl?: string;
width?: number;
height?: number;
};
video?: {
allowHttpsEmbed?: boolean;
embedHtml?: string;
allowMobileEmbed?: boolean;
viewCount?: number;
duration?: string;
};
image?: {
thumbnail?: {
contentUrl?: string;
width?: number;
height?: number;
};
imageInsightsToken?: string;
imageId?: string;
};
}
export const searchBingAPI = async (query: string) => {
try {
const bingApiKey = await getBingSubscriptionKey();
const url = new URL(`https://api.cognitive.microsoft.com/bing/v7.0/search`);
url.searchParams.append('q', query);
url.searchParams.append('responseFilter', 'Webpages,Images,Videos');
const res = await axios.get(url.toString(), {
headers: {
'Ocp-Apim-Subscription-Key': bingApiKey,
Accept: 'application/json',
},
});
if (res.data.error) {
throw new Error(`Bing API Error: ${res.data.error.message}`);
}
const originalres = res.data;
// Extract web, image, and video results
const webResults = originalres.webPages?.value || [];
const imageResults = originalres.images?.value || [];
const videoResults = originalres.videos?.value || [];
const results = webResults.map((item: BingAPISearchResult) => ({
title: item.name,
url: item.url,
content: item.snippet,
img_src:
item.primaryImageOfPage?.thumbnailUrl ||
imageResults.find((img: any) => img.hostPageUrl === item.url)
?.thumbnailUrl ||
videoResults.find((vid: any) => vid.hostPageUrl === item.url)
?.thumbnailUrl,
...(item.video && {
videoData: {
duration: item.video.duration,
embedUrl: item.video.embedHtml?.match(/src="(.*?)"/)?.[1],
},
publisher: item.publisher,
datePublished: item.datePublished,
}),
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Bing API Error: ${errorMessage}`);
}
};

View File

@@ -1,102 +0,0 @@
import axios from 'axios';
import { getBraveApiKey } from '../../config';
interface BraveSearchResult {
title: string;
url: string;
content?: string;
img_src?: string;
age?: string;
family_friendly?: boolean;
language?: string;
video?: {
embedUrl?: string;
duration?: string;
};
rating?: {
value: number;
scale: number;
};
products?: Array<{
name: string;
price?: string;
}>;
recipe?: {
ingredients?: string[];
cookTime?: string;
};
meta?: {
fetched?: string;
lastCrawled?: string;
};
}
export const searchBraveAPI = async (
query: string,
numResults: number = 20,
): Promise<{ results: BraveSearchResult[]; originalres: any }> => {
try {
const braveApiKey = await getBraveApiKey();
const url = new URL(`https://api.search.brave.com/res/v1/web/search`);
url.searchParams.append('q', query);
url.searchParams.append('count', numResults.toString());
const res = await axios.get(url.toString(), {
headers: {
'X-Subscription-Token': braveApiKey,
Accept: 'application/json',
},
});
if (res.data.error) {
throw new Error(`Brave API Error: ${res.data.error.message}`);
}
const originalres = res.data;
const webResults = originalres.web?.results || [];
const results: BraveSearchResult[] = webResults.map((item: any) => ({
title: item.title,
url: item.url,
content: item.description,
img_src: item.thumbnail?.src || item.deep_results?.images?.[0]?.src,
age: item.age,
family_friendly: item.family_friendly,
language: item.language,
video: item.video
? {
embedUrl: item.video.embed_url,
duration: item.video.duration,
}
: undefined,
rating: item.rating
? {
value: item.rating.value,
scale: item.rating.scale_max,
}
: undefined,
products: item.deep_results?.product_cluster?.map((p: any) => ({
name: p.name,
price: p.price,
})),
recipe: item.recipe
? {
ingredients: item.recipe.ingredients,
cookTime: item.recipe.cook_time,
}
: undefined,
meta: {
fetched: item.meta?.fetched,
lastCrawled: item.meta?.last_crawled,
},
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Brave API Error: ${errorMessage}`);
}
};

View File

@@ -1,85 +0,0 @@
import axios from 'axios';
import { getGoogleApiKey, getGoogleCseId } from '../../config';
interface GooglePSESearchResult {
kind: string;
title: string;
htmlTitle: string;
link: string;
displayLink: string;
snippet?: string;
htmlSnippet?: string;
cacheId?: string;
formattedUrl: string;
htmlFormattedUrl: string;
pagemap?: {
videoobject: any;
cse_thumbnail?: Array<{
src: string;
width: string;
height: string;
}>;
metatags?: Array<{
[key: string]: string;
author?: string;
}>;
cse_image?: Array<{
src: string;
}>;
};
fileFormat?: string;
image?: {
contextLink: string;
thumbnailLink: string;
};
mime?: string;
labels?: Array<{
name: string;
displayName: string;
}>;
}
export const searchGooglePSE = async (query: string) => {
try {
const [googleApiKey, googleCseID] = await Promise.all([
getGoogleApiKey(),
getGoogleCseId(),
]);
const url = new URL(`https://www.googleapis.com/customsearch/v1`);
url.searchParams.append('q', query);
url.searchParams.append('cx', googleCseID);
url.searchParams.append('key', googleApiKey);
const res = await axios.get(url.toString());
if (res.data.error) {
throw new Error(`Google PSE Error: ${res.data.error.message}`);
}
const originalres = res.data.items;
const results = originalres.map((item: GooglePSESearchResult) => ({
title: item.title,
url: item.link,
content: item.snippet,
img_src:
item.pagemap?.cse_image?.[0]?.src ||
item.pagemap?.cse_thumbnail?.[0]?.src ||
item.image?.thumbnailLink,
...(item.pagemap?.videoobject?.[0] && {
videoData: {
duration: item.pagemap.videoobject[0].duration,
embedUrl: item.pagemap.videoobject[0].embedurl,
},
}),
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`Google PSE Error: ${errorMessage}`);
}
};

View File

@@ -1,79 +0,0 @@
import axios from 'axios';
import { getYacyJsonEndpoint } from '../../config';
interface YaCySearchResult {
channels: {
title: string;
description: string;
link: string;
image: {
url: string;
title: string;
link: string;
};
startIndex: string;
itemsPerPage: string;
searchTerms: string;
items: {
title: string;
link: string;
code: string;
description: string;
pubDate: string;
image?: string;
size: string;
sizename: string;
guid: string;
faviconUrl: string;
host: string;
path: string;
file: string;
urlhash: string;
ranking: string;
}[];
navigation: {
facetname: string;
displayname: string;
type: string;
min: string;
max: string;
mean: string;
elements: {
name: string;
count: string;
modifier: string;
url: string;
}[];
}[];
}[];
}
export const searchYaCy = async (query: string, numResults: number = 20) => {
try {
const yacyBaseUrl = getYacyJsonEndpoint();
const url = new URL(`${yacyBaseUrl}/yacysearch.json`);
url.searchParams.append('query', query);
url.searchParams.append('count', numResults.toString());
const res = await axios.get(url.toString());
const originalres = res.data as YaCySearchResult;
const results = originalres.channels[0].items.map((item) => ({
title: item.title,
url: item.link,
content: item.description,
img_src: item.image || null,
pubDate: item.pubDate,
host: item.host,
}));
return { results, originalres };
} catch (error) {
const errorMessage = error.response?.data
? JSON.stringify(error.response.data, null, 2)
: error.message || 'Unknown error';
throw new Error(`YaCy Error: ${errorMessage}`);
}
};

View File

@@ -1,5 +1,5 @@
import axios from 'axios';
import { getSearxngApiEndpoint } from '../../config';
import { getSearxngApiEndpoint } from './config';
interface SearxngSearchOptions {
categories?: string[];
@@ -30,11 +30,12 @@ export const searchSearxng = async (
if (opts) {
Object.keys(opts).forEach((key) => {
if (Array.isArray(opts[key])) {
url.searchParams.append(key, opts[key].join(','));
const value = opts[key as keyof SearxngSearchOptions];
if (Array.isArray(value)) {
url.searchParams.append(key, value.join(','));
return;
}
url.searchParams.append(key, opts[key]);
url.searchParams.append(key, value as string);
});
}

5
src/lib/types/compute-dot.d.ts vendored Normal file
View File

@@ -0,0 +1,5 @@
declare function computeDot(vectorA: number[], vectorB: number[]): number;
declare module 'compute-dot' {
export default computeDot;
}

View File

@@ -6,7 +6,7 @@ const computeSimilarity = (x: number[], y: number[]): number => {
const similarityMeasure = getSimilarityMeasure();
if (similarityMeasure === 'cosine') {
return cosineSimilarity(x, y);
return cosineSimilarity(x, y) as number;
} else if (similarityMeasure === 'dot') {
return dot(x, y);
}

View File

@@ -3,7 +3,6 @@ import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse';
import logger from './logger';
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splitter = new RecursiveCharacterTextSplitter();
@@ -79,12 +78,13 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
docs.push(...linkDocs);
} catch (err) {
logger.error(
`Error at generating documents from links: ${err.message}`,
console.error(
'An error occurred while getting documents from links: ',
err,
);
docs.push(
new Document({
pageContent: `Failed to retrieve content from the link: ${err.message}`,
pageContent: `Failed to retrieve content from the link: ${err}`,
metadata: {
title: 'Failed to retrieve content',
url: link,

View File

@@ -1,66 +0,0 @@
import express from 'express';
import logger from '../utils/logger';
import db from '../db/index';
import { eq } from 'drizzle-orm';
import { chats, messages } from '../db/schema';
const router = express.Router();
router.get('/', async (_, res) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return res.status(200).json({ chats: chats });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chats: ${err.message}`);
}
});
router.get('/:id', async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, req.params.id),
});
return res.status(200).json({ chat: chatExists, messages: chatMessages });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chat: ${err.message}`);
}
});
router.delete(`/:id`, async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
await db.delete(chats).where(eq(chats.id, req.params.id)).execute();
await db
.delete(messages)
.where(eq(messages.chatId, req.params.id))
.execute();
return res.status(200).json({ message: 'Chat deleted successfully' });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in deleting chat: ${err.message}`);
}
});
export default router;

Some files were not shown because too many files have changed in this diff Show More