Compare commits

...

25 Commits

Author SHA1 Message Date
ItzCrazyKns
403d13eb50 feat(package): update scripts 2025-03-19 16:34:55 +05:30
ItzCrazyKns
217736d05a feat(app): remove backend 2025-03-19 16:23:27 +05:30
ItzCrazyKns
8a24572cd2 feat(app): add upload functionality 2025-03-19 15:32:32 +05:30
ItzCrazyKns
649c68f292 feat(ui): fix type errors 2025-03-19 13:42:28 +05:30
ItzCrazyKns
bab5dba6e1 feat(app): port history saving features 2025-03-19 13:42:15 +05:30
ItzCrazyKns
c24edac16d feat(app): add chat functionality 2025-03-19 13:41:52 +05:30
ItzCrazyKns
3150c21f17 feat(icons): fix type errors 2025-03-19 13:41:01 +05:30
ItzCrazyKns
c46fd7a9c8 feat(utils): add files utils, remove logger, fix API url 2025-03-19 13:40:35 +05:30
ItzCrazyKns
bab32e8d70 feat(app): add suggestions route 2025-03-19 13:40:10 +05:30
ItzCrazyKns
1130746f5d feat(app): add image & video search functionality 2025-03-19 13:38:40 +05:30
ItzCrazyKns
d1e9361665 feat(routes): add discover route 2025-03-19 13:37:54 +05:30
ItzCrazyKns
3bf2337697 feat(app): add db & schema 2025-03-19 13:37:01 +05:30
ItzCrazyKns
ee6e197ec0 feat(app): lint & beautify 2025-03-18 11:29:04 +05:30
ItzCrazyKns
32f26bb4e8 feat(app): add groq, gemini & anthropic provider 2025-03-18 11:28:47 +05:30
ItzCrazyKns
4cb20542a5 feat(config): update file path, add post endpoint 2025-03-18 10:33:32 +05:30
ItzCrazyKns
97f6196d9b feat(app): add GET config route 2025-03-18 10:25:09 +05:30
ItzCrazyKns
6c227cab6f feat(providers): move providers to UI 2025-03-18 10:24:51 +05:30
ItzCrazyKns
e9e34ddff9 feat(ui): add meta search agent 2025-03-18 10:24:33 +05:30
ItzCrazyKns
e29a08dc46 feat(ui): add necessary utils 2025-03-18 10:24:16 +05:30
ItzCrazyKns
5c313e9bed feat(ui): update packages, add config, add searxng 2025-03-18 10:23:59 +05:30
ItzCrazyKns
6b5bd9d79b feat(prompts): move to UI 2025-03-18 10:23:21 +05:30
ItzCrazyKns
64d2a467b0 Merge pull request #672 from sjiampojamarn/scrolling
Only set scrollIntoView for user msg.
2025-03-17 12:03:05 +05:30
sjiampojamarn
9a2c4fe3b6 Only set scrollIntoView for user msg. 2025-03-16 22:15:58 -07:00
ItzCrazyKns
060c68a900 feat(message-box): lint & beautify 2025-03-14 22:05:07 +05:30
ItzCrazyKns
e6b87f89ec feat(sample-config): add custom openai model name 2025-03-08 20:08:27 +05:30
114 changed files with 4718 additions and 7083 deletions

6
.gitignore vendored
View File

@@ -4,9 +4,9 @@ npm-debug.log
yarn-error.log yarn-error.log
# Build output # Build output
/.next/ .next/
/out/ out/
/dist/ dist/
# IDE/Editor specific # IDE/Editor specific
.vscode/ .vscode/

View File

@@ -6,7 +6,6 @@ const config = {
endOfLine: 'auto', endOfLine: 'auto',
singleQuote: true, singleQuote: true,
tabWidth: 2, tabWidth: 2,
semi: true,
}; };
module.exports = config; module.exports = config;

View File

@@ -1,13 +1,20 @@
FROM node:20.18.0-alpine FROM node:20.18.0-alpine
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
WORKDIR /home/perplexica WORKDIR /home/perplexica
COPY ui /home/perplexica/ COPY src /home/perplexica/src
COPY public /home/perplexica/public
COPY package.json /home/perplexica/package.json
COPY yarn.lock /home/perplexica/yarn.lock
COPY tsconfig.json /home/perplexica/tsconfig.json
COPY next.config.mjs /home/perplexica/next.config.mjs
COPY next-env.d.ts /home/perplexica/next-env.d.ts
COPY postcss.config.js /home/perplexica/postcss.config.js
COPY drizzle.config.ts /home/perplexica/drizzle.config.ts
COPY tailwind.config.ts /home/perplexica/tailwind.config.ts
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile RUN yarn install --frozen-lockfile
RUN yarn build RUN yarn build

View File

@@ -1,17 +0,0 @@
FROM node:18-slim
WORKDIR /home/perplexica
COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/
COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn build
CMD ["yarn", "start"]

View File

@@ -9,41 +9,20 @@ services:
- perplexica-network - perplexica-network
restart: unless-stopped restart: unless-stopped
perplexica-backend: app:
build:
context: .
dockerfile: backend.dockerfile
image: itzcrazykns1337/perplexica-backend:main
environment:
- SEARXNG_API_URL=http://searxng:8080
depends_on:
- searxng
ports:
- 3001:3001
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- perplexica-network
restart: unless-stopped
perplexica-frontend:
build: build:
context: . context: .
dockerfile: app.dockerfile dockerfile: app.dockerfile
args: environment:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api - SEARXNG_API_URL=http://searxng:8080
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
ports: ports:
- 3000:3000 - 3000:3000
networks: networks:
- perplexica-network - perplexica-network
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
restart: unless-stopped restart: unless-stopped
networks: networks:

View File

@@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
export default defineConfig({ export default defineConfig({
dialect: 'sqlite', dialect: 'sqlite',
schema: './src/db/schema.ts', schema: './src/lib/db/schema.ts',
out: './drizzle', out: './drizzle',
dbCredentials: { dbCredentials: {
url: './data/db.sqlite', url: './data/db.sqlite',

5
next-env.d.ts vendored Normal file
View File

@@ -0,0 +1,5 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@@ -7,6 +7,7 @@ const nextConfig = {
}, },
], ],
}, },
serverExternalPackages: ['pdf-parse'],
}; };
export default nextConfig; export default nextConfig;

View File

@@ -1,53 +1,62 @@
{ {
"name": "perplexica-backend", "name": "perplexica-frontend",
"version": "1.10.0-rc3", "version": "1.10.0-rc3",
"license": "MIT", "license": "MIT",
"author": "ItzCrazyKns", "author": "ItzCrazyKns",
"scripts": { "scripts": {
"start": "npm run db:push && node dist/app.js", "dev": "next dev",
"build": "tsc", "build": "next build",
"dev": "nodemon --ignore uploads/ src/app.ts ", "start": "npm run db:push && next start",
"db:push": "drizzle-kit push sqlite", "lint": "next lint",
"format": "prettier . --check", "format:write": "prettier . --write",
"format:write": "prettier . --write" "db:push": "drizzle-kit push"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.10",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/html-to-text": "^9.0.4",
"@types/multer": "^1.4.12",
"@types/pdf-parse": "^1.1.4",
"@types/readable-stream": "^4.0.11",
"@types/ws": "^8.5.12",
"drizzle-kit": "^0.22.7",
"nodemon": "^3.1.0",
"prettier": "^3.2.5",
"ts-node": "^10.9.2",
"typescript": "^5.4.3"
}, },
"dependencies": { "dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5", "@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3", "@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/community": "^0.2.16", "@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/openai": "^0.0.25", "@langchain/openai": "^0.0.25",
"@langchain/google-genai": "^0.0.23", "@langchain/textsplitters": "^0.1.0",
"@xenova/transformers": "^2.17.1", "@tailwindcss/typography": "^0.5.12",
"axios": "^1.6.8", "axios": "^1.8.3",
"better-sqlite3": "^11.0.0", "better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
"compute-cosine-similarity": "^1.1.0", "compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0", "compute-dot": "^1.1.0",
"cors": "^2.8.5", "drizzle-orm": "^0.40.1",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.31.2",
"express": "^4.19.2",
"html-to-text": "^9.0.5", "html-to-text": "^9.0.5",
"langchain": "^0.1.30", "langchain": "^0.1.30",
"mammoth": "^1.8.0", "lucide-react": "^0.363.0",
"multer": "^1.4.5-lts.1", "markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"pdf-parse": "^1.1.1", "pdf-parse": "^1.1.1",
"winston": "^3.13.0", "react": "^18",
"ws": "^8.17.1", "react-dom": "^18",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4" "zod": "^3.22.4"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/html-to-text": "^9.0.4",
"@types/node": "^20",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"drizzle-kit": "^0.30.5",
"eslint": "^8",
"eslint-config-next": "14.1.4",
"postcss": "^8",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"typescript": "^5"
} }
} }

View File

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

Before

Width:  |  Height:  |  Size: 629 B

After

Width:  |  Height:  |  Size: 629 B

View File

@@ -18,9 +18,10 @@ API_KEY = ""
[MODELS.CUSTOM_OPENAI] [MODELS.CUSTOM_OPENAI]
API_KEY = "" API_KEY = ""
API_URL = "" API_URL = ""
MODEL_NAME = ""
[MODELS.OLLAMA] [MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434 API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[API_ENDPOINTS] [API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@@ -1,38 +0,0 @@
import { startWebSocketServer } from './websocket';
import express from 'express';
import cors from 'cors';
import http from 'http';
import routes from './routes';
import { getPort } from './config';
import logger from './utils/logger';
const port = getPort();
const app = express();
const server = http.createServer(app);
const corsOptions = {
origin: '*',
};
app.use(cors(corsOptions));
app.use(express.json());
app.use('/api', routes);
app.get('/api', (_, res) => {
res.status(200).json({ status: 'ok' });
});
server.listen(port, () => {
logger.info(`Server is running on port ${port}`);
});
startWebSocketServer(server);
process.on('uncaughtException', (err, origin) => {
logger.error(`Uncaught Exception at ${origin}: ${err}`);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
});

360
src/app/api/chat/route.ts Normal file
View File

@@ -0,0 +1,360 @@
import prompts from '@/lib/prompts';
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream';
import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import db from '@/lib/db';
import { chats, messages as messagesSchema } from '@/lib/db/schema';
import { and, eq, gt } from 'drizzle-orm';
import { getFileDetails } from '@/lib/utils/files';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
const searchHandlers: Record<string, MetaSearchAgent> = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};
type Message = {
messageId: string;
chatId: string;
content: string;
};
type ChatModel = {
provider: string;
name: string;
};
type EmbeddingModel = {
provider: string;
name: string;
};
type Body = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
};
const handleEmitterEvents = async (
stream: EventEmitter,
writer: WritableStreamDefaultWriter,
encoder: TextEncoder,
aiMessageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources: any[] = [];
stream.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'message',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
recievedMessage += parsedData.data;
} else if (parsedData.type === 'sources') {
writer.write(
encoder.encode(
JSON.stringify({
type: 'sources',
data: parsedData.data,
messageId: aiMessageId,
}) + '\n',
),
);
sources = parsedData.data;
}
});
stream.on('end', () => {
writer.write(
encoder.encode(
JSON.stringify({
type: 'messageEnd',
messageId: aiMessageId,
}) + '\n',
),
);
writer.close();
db.insert(messagesSchema)
.values({
content: recievedMessage,
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
})
.execute();
});
stream.on('error', (data) => {
const parsedData = JSON.parse(data);
writer.write(
encoder.encode(
JSON.stringify({
type: 'error',
data: parsedData.data,
}),
),
);
writer.close();
});
};
const handleHistorySave = async (
message: Message,
humanMessageId: string,
focusMode: string,
files: string[],
) => {
const chat = await db.query.chats.findFirst({
where: eq(chats.id, message.chatId),
});
if (!chat) {
await db
.insert(chats)
.values({
id: message.chatId,
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
})
.execute();
}
const messageExists = await db.query.messages.findFirst({
where: eq(messagesSchema.messageId, humanMessageId),
});
if (!messageExists) {
await db
.insert(messagesSchema)
.values({
content: message.content,
chatId: message.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
})
.execute();
} else {
await db
.delete(messagesSchema)
.where(
and(
gt(messagesSchema.id, messageExists.id),
eq(messagesSchema.chatId, message.chatId),
),
)
.execute();
}
};
export const POST = async (req: Request) => {
try {
const body = (await req.json()) as Body;
const { message } = body;
if (message.content === '') {
return Response.json(
{
message: 'Please provide a message to process',
},
{ status: 400 },
);
}
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.name || Object.keys(chatModelProvider)[0]
];
const embeddingProvider =
embeddingModelProviders[
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
];
const embeddingModel =
embeddingProvider[
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
];
let llm: BaseChatModel | undefined;
let embedding = embeddingModel.model;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
if (!embedding) {
return Response.json(
{ error: 'Invalid embedding model' },
{ status: 400 },
);
}
const humanMessageId =
message.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const handler = searchHandlers[body.focusMode];
if (!handler) {
return Response.json(
{
message: 'Invalid focus mode',
},
{ status: 400 },
);
}
const stream = await handler.searchAndAnswer(
message.content,
history,
llm,
embedding,
body.optimizationMode,
body.files,
);
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId);
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
return new Response(responseStream.readable, {
headers: {
'Content-Type': 'text/event-stream',
Connection: 'keep-alive',
'Cache-Control': 'no-cache, no-transform',
},
});
} catch (err) {
console.error('An error ocurred while processing chat request:', err);
return Response.json(
{ message: 'An error ocurred while processing chat request' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,69 @@
import db from '@/lib/db';
import { chats, messages } from '@/lib/db/schema';
import { eq } from 'drizzle-orm';
export const GET = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, id),
});
return Response.json(
{
chat: chatExists,
messages: chatMessages,
},
{ status: 200 },
);
} catch (err) {
console.error('Error in getting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};
export const DELETE = async (
req: Request,
{ params }: { params: Promise<{ id: string }> },
) => {
try {
const { id } = await params;
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, id),
});
if (!chatExists) {
return Response.json({ message: 'Chat not found' }, { status: 404 });
}
await db.delete(chats).where(eq(chats.id, id)).execute();
await db.delete(messages).where(eq(messages.chatId, id)).execute();
return Response.json(
{ message: 'Chat deleted successfully' },
{ status: 200 },
);
} catch (err) {
console.error('Error in deleting chat by id: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,15 @@
import db from '@/lib/db';
export const GET = async (req: Request) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return Response.json({ chats: chats }, { status: 200 });
} catch (err) {
console.error('Error in getting chats: ', err);
return Response.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
};

View File

@@ -1,26 +1,22 @@
import express from 'express'; import {
getAnthropicApiKey,
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
getGeminiApiKey,
getGroqApiKey,
getOllamaApiEndpoint,
getOpenaiApiKey,
updateConfig,
} from '@/lib/config';
import { import {
getAvailableChatModelProviders, getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders, getAvailableEmbeddingModelProviders,
} from '../lib/providers'; } from '@/lib/providers';
import {
getGroqApiKey,
getOllamaApiEndpoint,
getAnthropicApiKey,
getGeminiApiKey,
getOpenaiApiKey,
updateConfig,
getCustomOpenaiApiUrl,
getCustomOpenaiApiKey,
getCustomOpenaiModelName,
} from '../config';
import logger from '../utils/logger';
const router = express.Router(); export const GET = async (req: Request) => {
router.get('/', async (_, res) => {
try { try {
const config = {}; const config: Record<string, any> = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(), getAvailableChatModelProviders(),
@@ -61,44 +57,53 @@ router.get('/', async (_, res) => {
config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName(); config['customOpenaiModelName'] = getCustomOpenaiModelName();
res.status(200).json(config); return Response.json({ ...config }, { status: 200 });
} catch (err: any) { } catch (err) {
res.status(500).json({ message: 'An error has occurred.' }); console.error('An error ocurred while getting config:', err);
logger.error(`Error getting config: ${err.message}`); return Response.json(
{ message: 'An error ocurred while getting config' },
{ status: 500 },
);
} }
}); };
router.post('/', async (req, res) => { export const POST = async (req: Request) => {
const config = req.body; try {
const config = await req.json();
const updatedConfig = { const updatedConfig = {
MODELS: { MODELS: {
OPENAI: { OPENAI: {
API_KEY: config.openaiApiKey, API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
}, },
GROQ: { };
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
};
updateConfig(updatedConfig); updateConfig(updatedConfig);
res.status(200).json({ message: 'Config updated' }); return Response.json({ message: 'Config updated' }, { status: 200 });
}); } catch (err) {
console.error('An error ocurred while updating config:', err);
export default router; return Response.json(
{ message: 'An error ocurred while updating config' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,61 @@
import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
export const GET = async (req: Request) => {
try {
const data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
).results;
}),
])
)
.map((result) => result)
.flat()
.sort(() => Math.random() - 0.5);
return Response.json(
{
blogs: data,
},
{
status: 200,
},
);
} catch (err) {
console.error(`An error ocurred in discover route: ${err}`);
return Response.json(
{
message: 'An error has occurred',
},
{
status: 500,
},
);
}
};

View File

@@ -0,0 +1,83 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const images = await handleImageSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ images }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching images: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching images' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,47 @@
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
export const GET = async (req: Request) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete (chatModelProviders[provider][model] as { model?: unknown })
.model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete (embeddingModelProviders[provider][model] as { model?: unknown })
.model;
});
});
return Response.json(
{
chatModelProviders,
embeddingModelProviders,
},
{
status: 200,
},
);
} catch (err) {
console.error('An error ocurred while fetching models', err);
return Response.json(
{
message: 'An error has occurred.',
},
{
status: 500,
},
);
}
};

View File

@@ -0,0 +1,81 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsGenerationBody {
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
},
llm,
);
return Response.json({ suggestions }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while generating suggestions: ${err}`);
return Response.json(
{ message: 'An error ocurred while generating suggestions' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,134 @@
import { NextResponse } from 'next/server';
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import { getAvailableEmbeddingModelProviders } from '@/lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
interface FileRes {
fileName: string;
fileExtension: string;
fileId: string;
}
const uploadDir = path.join(process.cwd(), 'uploads');
if (!fs.existsSync(uploadDir)) {
fs.mkdirSync(uploadDir, { recursive: true });
}
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
export async function POST(req: Request) {
try {
const formData = await req.formData();
const files = formData.getAll('files') as File[];
const embedding_model = formData.get('embedding_model');
const embedding_model_provider = formData.get('embedding_model_provider');
if (!embedding_model || !embedding_model_provider) {
return NextResponse.json(
{ message: 'Missing embedding model or provider' },
{ status: 400 },
);
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel =
embedding_model ?? Object.keys(embeddingModels[provider as string])[0];
let embeddingsModel =
embeddingModels[provider as string]?.[embeddingModel as string]?.model;
if (!embeddingsModel) {
return NextResponse.json(
{ message: 'Invalid embedding model selected' },
{ status: 400 },
);
}
const processedFiles: FileRes[] = [];
await Promise.all(
files.map(async (file: any) => {
const fileExtension = file.name.split('.').pop();
if (!['pdf', 'docx', 'txt'].includes(fileExtension!)) {
return NextResponse.json(
{ message: 'File type not supported' },
{ status: 400 },
);
}
const uniqueFileName = `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`;
const filePath = path.join(uploadDir, uniqueFileName);
const buffer = Buffer.from(await file.arrayBuffer());
fs.writeFileSync(filePath, new Uint8Array(buffer));
let docs: any[] = [];
if (fileExtension === 'pdf') {
const loader = new PDFLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'docx') {
const loader = new DocxLoader(filePath);
docs = await loader.load();
} else if (fileExtension === 'txt') {
const text = fs.readFileSync(filePath, 'utf-8');
docs = [
new Document({ pageContent: text, metadata: { title: file.name } }),
];
}
const splitted = await splitter.splitDocuments(docs);
const extractedDataPath = filePath.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(
extractedDataPath,
JSON.stringify({
title: file.name,
contents: splitted.map((doc) => doc.pageContent),
}),
);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsDataPath = filePath.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(
embeddingsDataPath,
JSON.stringify({
title: file.name,
embeddings,
}),
);
processedFiles.push({
fileName: file.name,
fileExtension: fileExtension,
fileId: uniqueFileName.replace(/\.\w+$/, ''),
});
}),
);
return NextResponse.json({
files: processedFiles,
});
} catch (error) {
console.error('Error uploading file:', error);
return NextResponse.json(
{ message: 'An error has occurred.' },
{ status: 500 },
);
}
}

View File

@@ -0,0 +1,83 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
chatModelProviders[
body.chatModel?.provider || Object.keys(chatModelProviders)[0]
];
const chatModel =
chatModelProvider[
body.chatModel?.model || Object.keys(chatModelProvider)[0]
];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) {
llm = chatModel.model;
}
if (!llm) {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
);
return Response.json({ videos }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching videos: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching videos' },
{ status: 500 },
);
}
};

View File

@@ -0,0 +1,9 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
};
export default Page;

View File

@@ -19,7 +19,7 @@ const Page = () => {
useEffect(() => { useEffect(() => {
const fetchData = async () => { const fetchData = async () => {
try { try {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/discover`, { const res = await fetch(`/api/discover`, {
method: 'GET', method: 'GET',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

@@ -21,7 +21,7 @@ const Page = () => {
const fetchChats = async () => { const fetchChats = async () => {
setLoading(true); setLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/chats`, { const res = await fetch(`/api/chats`, {
method: 'GET', method: 'GET',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',

View File

@@ -116,7 +116,7 @@ const Page = () => {
useEffect(() => { useEffect(() => {
const fetchConfig = async () => { const fetchConfig = async () => {
setIsLoading(true); setIsLoading(true);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, { const res = await fetch(`/api/config`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
@@ -187,16 +187,13 @@ const Page = () => {
[key]: value, [key]: value,
} as SettingsType; } as SettingsType;
const response = await fetch( const response = await fetch(`/api/config`, {
`${process.env.NEXT_PUBLIC_API_URL}/config`, method: 'POST',
{ headers: {
method: 'POST', 'Content-Type': 'application/json',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(updatedConfig),
}, },
); body: JSON.stringify(updatedConfig),
});
if (!response.ok) { if (!response.ok) {
throw new Error('Failed to update config'); throw new Error('Failed to update config');
@@ -208,7 +205,7 @@ const Page = () => {
key.toLowerCase().includes('api') || key.toLowerCase().includes('api') ||
key.toLowerCase().includes('url') key.toLowerCase().includes('url')
) { ) {
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/config`, { const res = await fetch(`/api/config`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },

View File

@@ -48,11 +48,17 @@ const Chat = ({
}); });
useEffect(() => { useEffect(() => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' }); const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
};
if (messages.length === 1) { if (messages.length === 1) {
document.title = `${messages[0].content.substring(0, 30)} - Perplexica`; document.title = `${messages[0].content.substring(0, 30)} - Perplexica`;
} }
if (messages[messages.length - 1]?.role == 'user') {
scroll();
}
}, [messages]); }, [messages]);
return ( return (

View File

@@ -29,280 +29,154 @@ export interface File {
fileId: string; fileId: string;
} }
const useSocket = ( interface ChatModelProvider {
url: string, name: string;
setIsWSReady: (ready: boolean) => void, provider: string;
setError: (error: boolean) => void, }
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => { ) => {
const wsRef = useRef<WebSocket | null>(null); try {
const reconnectTimeoutRef = useRef<NodeJS.Timeout>(); let chatModel = localStorage.getItem('chatModel');
const retryCountRef = useRef(0); let chatModelProvider = localStorage.getItem('chatModelProvider');
const isCleaningUpRef = useRef(false); let embeddingModel = localStorage.getItem('embeddingModel');
const MAX_RETRIES = 3; let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const INITIAL_BACKOFF = 1000; // 1 second
const isConnectionErrorRef = useRef(false);
const getBackoffDelay = (retryCount: number) => { const autoImageSearch = localStorage.getItem('autoImageSearch');
return Math.min(INITIAL_BACKOFF * Math.pow(2, retryCount), 10000); // Cap at 10 seconds const autoVideoSearch = localStorage.getItem('autoVideoSearch');
};
useEffect(() => { if (!autoImageSearch) {
const connectWs = async () => { localStorage.setItem('autoImageSearch', 'true');
if (wsRef.current?.readyState === WebSocket.OPEN) { }
wsRef.current.close();
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
} }
try { if (!embeddingModel || !embeddingModelProvider) {
let chatModel = localStorage.getItem('chatModel'); const embeddingModelProviders = providers.embeddingModelProviders;
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem(
'embeddingModelProvider',
);
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/models`,
{
headers: {
'Content-Type': 'application/json',
},
},
).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if ( if (
!chatModel || !embeddingModelProviders ||
!chatModelProvider || Object.keys(embeddingModelProviders).length === 0
!embeddingModel || )
!embeddingModelProvider return toast.error('No embedding models available');
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
chatModelProvider = embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
chatModelProvider || Object.keys(chatModelProviders)[0]; embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; )[0];
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return toast.error('No chat models available');
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
const wsURL = new URL(url);
const searchParams = new URLSearchParams({});
searchParams.append('chatModel', chatModel!);
searchParams.append('chatModelProvider', chatModelProvider);
if (chatModelProvider === 'custom_openai') {
searchParams.append(
'openAIApiKey',
localStorage.getItem('openAIApiKey')!,
);
searchParams.append(
'openAIBaseURL',
localStorage.getItem('openAIBaseURL')!,
);
}
searchParams.append('embeddingModel', embeddingModel!);
searchParams.append('embeddingModelProvider', embeddingModelProvider);
wsURL.search = searchParams.toString();
const ws = new WebSocket(wsURL.toString());
wsRef.current = ws;
const timeoutId = setTimeout(() => {
if (ws.readyState !== 1) {
toast.error(
'Failed to connect to the server. Please try again later.',
);
}
}, 10000);
ws.addEventListener('message', (e) => {
const data = JSON.parse(e.data);
if (data.type === 'signal' && data.data === 'open') {
const interval = setInterval(() => {
if (ws.readyState === 1) {
setIsWSReady(true);
setError(false);
if (retryCountRef.current > 0) {
toast.success('Connection restored.');
}
retryCountRef.current = 0;
clearInterval(interval);
}
}, 5);
clearTimeout(timeoutId);
console.debug(new Date(), 'ws:connected');
}
if (data.type === 'error') {
isConnectionErrorRef.current = true;
setError(true);
toast.error(data.data);
}
});
ws.onerror = () => {
clearTimeout(timeoutId);
setIsWSReady(false);
toast.error('WebSocket connection error.');
};
ws.onclose = () => {
clearTimeout(timeoutId);
setIsWSReady(false);
console.debug(new Date(), 'ws:disconnected');
if (!isCleaningUpRef.current && !isConnectionErrorRef.current) {
toast.error('Connection lost. Attempting to reconnect...');
attemptReconnect();
}
};
} catch (error) {
console.debug(new Date(), 'ws:error', error);
setIsWSReady(false);
attemptReconnect();
}
};
const attemptReconnect = () => {
retryCountRef.current += 1;
if (retryCountRef.current > MAX_RETRIES) {
console.debug(new Date(), 'ws:max_retries');
setError(true);
toast.error(
'Unable to connect to server after multiple attempts. Please refresh the page to try again.',
);
return;
} }
const backoffDelay = getBackoffDelay(retryCountRef.current); localStorage.setItem('chatModel', chatModel!);
console.debug( localStorage.setItem('chatModelProvider', chatModelProvider);
new Date(), localStorage.setItem('embeddingModel', embeddingModel!);
`ws:retry attempt=${retryCountRef.current}/${MAX_RETRIES} delay=${backoffDelay}ms`, localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
); } else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (reconnectTimeoutRef.current) { if (
clearTimeout(reconnectTimeoutRef.current); Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
} }
reconnectTimeoutRef.current = setTimeout(() => { if (
connectWs(); chatModelProvider &&
}, backoffDelay); !chatModelProviders[chatModelProvider][chatModel]
}; ) {
chatModel = Object.keys(
connectWs(); chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
return () => { ? chatModelProvider
if (reconnectTimeoutRef.current) { : Object.keys(chatModelProviders)[0]
clearTimeout(reconnectTimeoutRef.current); ],
)[0];
localStorage.setItem('chatModel', chatModel);
} }
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.close();
isCleaningUpRef.current = true;
console.debug(new Date(), 'ws:cleanup');
}
};
}, [url, setIsWSReady, setError]);
return wsRef.current; if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
}; };
const loadMessages = async ( const loadMessages = async (
@@ -315,15 +189,12 @@ const loadMessages = async (
setFiles: (files: File[]) => void, setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void, setFileIds: (fileIds: string[]) => void,
) => { ) => {
const res = await fetch( const res = await fetch(`/api/chats/${chatId}`, {
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`, method: 'GET',
{ headers: {
method: 'GET', 'Content-Type': 'application/json',
headers: {
'Content-Type': 'application/json',
},
}, },
); });
if (res.status === 404) { if (res.status === 404) {
setNotFound(true); setNotFound(true);
@@ -373,15 +244,32 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [chatId, setChatId] = useState<string | undefined>(id); const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false); const [newChatCreated, setNewChatCreated] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false); const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false); const [isReady, setIsReady] = useState(false);
const [isWSReady, setIsWSReady] = useState(false); useEffect(() => {
const ws = useSocket( checkConfig(
process.env.NEXT_PUBLIC_WS_URL!, setChatModelProvider,
setIsWSReady, setEmbeddingModelProvider,
setHasError, setIsConfigReady,
); setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false); const [messageAppeared, setMessageAppeared] = useState(false);
@@ -399,8 +287,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [notFound, setNotFound] = useState(false); const [notFound, setNotFound] = useState(false);
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
useEffect(() => { useEffect(() => {
if ( if (
chatId && chatId &&
@@ -426,16 +312,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, []); }, []);
useEffect(() => {
return () => {
if (ws?.readyState === 1) {
ws.close();
console.debug(new Date(), 'ws:cleanup');
}
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const messagesRef = useRef<Message[]>([]); const messagesRef = useRef<Message[]>([]);
useEffect(() => { useEffect(() => {
@@ -443,18 +319,18 @@ const ChatWindow = ({ id }: { id?: string }) => {
}, [messages]); }, [messages]);
useEffect(() => { useEffect(() => {
if (isMessagesLoaded && isWSReady) { if (isMessagesLoaded && isConfigReady) {
setIsReady(true); setIsReady(true);
console.debug(new Date(), 'app:ready'); console.debug(new Date(), 'app:ready');
} else { } else {
setIsReady(false); setIsReady(false);
} }
}, [isMessagesLoaded, isWSReady]); }, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => { const sendMessage = async (message: string, messageId?: string) => {
if (loading) return; if (loading) return;
if (!ws || ws.readyState !== WebSocket.OPEN) { if (!isConfigReady) {
toast.error('Cannot send message while disconnected'); toast.error('Cannot send message before the configuration is ready');
return; return;
} }
@@ -467,18 +343,27 @@ const ChatWindow = ({ id }: { id?: string }) => {
messageId = messageId ?? crypto.randomBytes(7).toString('hex'); messageId = messageId ?? crypto.randomBytes(7).toString('hex');
ws.send( console.log(
JSON.stringify({ JSON.stringify({
type: 'message', content: message,
message: { message: {
messageId: messageId, messageId: messageId,
chatId: chatId!, chatId: chatId!,
content: message, content: message,
}, },
chatId: chatId!,
files: fileIds, files: fileIds,
focusMode: focusMode, focusMode: focusMode,
optimizationMode: optimizationMode, optimizationMode: optimizationMode,
history: [...chatHistory, ['human', message]], history: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
}), }),
); );
@@ -493,9 +378,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
}, },
]); ]);
const messageHandler = async (e: MessageEvent) => { const messageHandler = async (data: any) => {
const data = JSON.parse(e.data);
if (data.type === 'error') { if (data.type === 'error') {
toast.error(data.data); toast.error(data.data);
setLoading(false); setLoading(false);
@@ -558,7 +441,6 @@ const ChatWindow = ({ id }: { id?: string }) => {
['assistant', recievedMessage], ['assistant', recievedMessage],
]); ]);
ws?.removeEventListener('message', messageHandler);
setLoading(false); setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1]; const lastMsg = messagesRef.current[messagesRef.current.length - 1];
@@ -584,16 +466,72 @@ const ChatWindow = ({ id }: { id?: string }) => {
const autoVideoSearch = localStorage.getItem('autoVideoSearch'); const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') { if (autoImageSearch === 'true') {
document.getElementById('search-images')?.click(); document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
} }
if (autoVideoSearch === 'true') { if (autoVideoSearch === 'true') {
document.getElementById('search-videos')?.click(); document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
} }
} }
}; };
ws?.addEventListener('message', messageHandler); const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
}; };
const rewrite = (messageId: string) => { const rewrite = (messageId: string) => {
@@ -614,11 +552,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
}; };
useEffect(() => { useEffect(() => {
if (isReady && initialMessage && ws?.readyState === 1) { if (isReady && initialMessage && isConfigReady) {
sendMessage(initialMessage); sendMessage(initialMessage);
} }
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [ws?.readyState, isReady, initialMessage, isWSReady]); }, [isConfigReady, isReady, initialMessage]);
if (hasError) { if (hasError) {
return ( return (

View File

@@ -29,15 +29,12 @@ const DeleteChat = ({
const handleDelete = async () => { const handleDelete = async () => {
setLoading(true); setLoading(true);
try { try {
const res = await fetch( const res = await fetch(`/api/chats/${chatId}`, {
`${process.env.NEXT_PUBLIC_API_URL}/chats/${chatId}`, method: 'DELETE',
{ headers: {
method: 'DELETE', 'Content-Type': 'application/json',
headers: {
'Content-Type': 'application/json',
},
}, },
); });
if (res.status != 200) { if (res.status != 200) {
throw new Error('Failed to delete chat'); throw new Error('Failed to delete chat');

View File

@@ -68,7 +68,13 @@ const MessageBox = ({
return ( return (
<div> <div>
{message.role === 'user' && ( {message.role === 'user' && (
<div className={cn('w-full', messageIndex === 0 ? 'pt-16' : 'pt-8', 'break-words')}> <div
className={cn(
'w-full',
messageIndex === 0 ? 'pt-16' : 'pt-8',
'break-words',
)}
>
<h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12"> <h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12">
{message.content} {message.content}
</h2> </h2>
@@ -187,10 +193,12 @@ const MessageBox = ({
<SearchImages <SearchImages
query={history[messageIndex - 1].content} query={history[messageIndex - 1].content}
chatHistory={history.slice(0, messageIndex - 1)} chatHistory={history.slice(0, messageIndex - 1)}
messageId={message.messageId}
/> />
<SearchVideos <SearchVideos
chatHistory={history.slice(0, messageIndex - 1)} chatHistory={history.slice(0, messageIndex - 1)}
query={history[messageIndex - 1].content} query={history[messageIndex - 1].content}
messageId={message.messageId}
/> />
</div> </div>
</div> </div>

View File

@@ -41,7 +41,7 @@ const Attach = ({
data.append('embedding_model_provider', embeddingModelProvider!); data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!); data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, { const res = await fetch(`/api/uploads`, {
method: 'POST', method: 'POST',
body: data, body: data,
}); });

View File

@@ -39,7 +39,7 @@ const AttachSmall = ({
data.append('embedding_model_provider', embeddingModelProvider!); data.append('embedding_model_provider', embeddingModelProvider!);
data.append('embedding_model', embeddingModel!); data.append('embedding_model', embeddingModel!);
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/uploads`, { const res = await fetch(`/api/uploads`, {
method: 'POST', method: 'POST',
body: data, body: data,
}); });

View File

@@ -45,25 +45,13 @@ const focusModes = [
key: 'youtubeSearch', key: 'youtubeSearch',
title: 'Youtube', title: 'Youtube',
description: 'Search and watch videos', description: 'Search and watch videos',
icon: ( icon: <SiYoutube className="h-5 w-auto mr-0.5" />,
<SiYoutube
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
}, },
{ {
key: 'redditSearch', key: 'redditSearch',
title: 'Reddit', title: 'Reddit',
description: 'Search for discussions and opinions', description: 'Search for discussions and opinions',
icon: ( icon: <SiReddit className="h-5 w-auto mr-0.5" />,
<SiReddit
className="h-5 w-auto mr-0.5"
onPointerEnterCapture={undefined}
onPointerLeaveCapture={undefined}
/>
),
}, },
]; ];

View File

@@ -69,11 +69,15 @@ const MessageSources = ({ sources }: { sources: Document[] }) => {
<div className="flex flex-row items-center space-x-1"> <div className="flex flex-row items-center space-x-1">
{sources.slice(3, 6).map((source, i) => { {sources.slice(3, 6).map((source, i) => {
return source.metadata.url === 'File' ? ( return source.metadata.url === 'File' ? (
<div className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full"> <div
key={i}
className="bg-dark-200 hover:bg-dark-100 transition duration-200 flex items-center justify-center w-6 h-6 rounded-full"
>
<File size={12} className="text-white/70" /> <File size={12} className="text-white/70" />
</div> </div>
) : ( ) : (
<img <img
key={i}
src={`https://s2.googleusercontent.com/s2/favicons?domain_url=${source.metadata.url}`} src={`https://s2.googleusercontent.com/s2/favicons?domain_url=${source.metadata.url}`}
width={16} width={16}
height={16} height={16}

View File

@@ -14,9 +14,11 @@ type Image = {
const SearchImages = ({ const SearchImages = ({
query, query,
chatHistory, chatHistory,
messageId,
}: { }: {
query: string; query: string;
chatHistory: Message[]; chatHistory: Message[];
messageId: string;
}) => { }) => {
const [images, setImages] = useState<Image[] | null>(null); const [images, setImages] = useState<Image[] | null>(null);
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
@@ -27,7 +29,7 @@ const SearchImages = ({
<> <>
{!loading && images === null && ( {!loading && images === null && (
<button <button
id="search-images" id={`search-images-${messageId}`}
onClick={async () => { onClick={async () => {
setLoading(true); setLoading(true);
@@ -37,27 +39,24 @@ const SearchImages = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL'); const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey'); const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch( const res = await fetch(`/api/images`, {
`${process.env.NEXT_PUBLIC_API_URL}/images`, method: 'POST',
{ headers: {
method: 'POST', 'Content-Type': 'application/json',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
}, },
); body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
});
const data = await res.json(); const data = await res.json();

View File

@@ -27,9 +27,11 @@ declare module 'yet-another-react-lightbox' {
const Searchvideos = ({ const Searchvideos = ({
query, query,
chatHistory, chatHistory,
messageId,
}: { }: {
query: string; query: string;
chatHistory: Message[]; chatHistory: Message[];
messageId: string;
}) => { }) => {
const [videos, setVideos] = useState<Video[] | null>(null); const [videos, setVideos] = useState<Video[] | null>(null);
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
@@ -42,7 +44,7 @@ const Searchvideos = ({
<> <>
{!loading && videos === null && ( {!loading && videos === null && (
<button <button
id="search-videos" id={`search-videos-${messageId}`}
onClick={async () => { onClick={async () => {
setLoading(true); setLoading(true);
@@ -52,27 +54,24 @@ const Searchvideos = ({
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL'); const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey'); const customOpenAIKey = localStorage.getItem('openAIApiKey');
const res = await fetch( const res = await fetch(`/api/videos`, {
`${process.env.NEXT_PUBLIC_API_URL}/videos`, method: 'POST',
{ headers: {
method: 'POST', 'Content-Type': 'application/json',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
}, },
); body: JSON.stringify({
query: query,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,
...(chatModelProvider === 'custom_openai' && {
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
},
}),
});
const data = await res.json(); const data = await res.json();

View File

@@ -7,7 +7,7 @@ export const getSuggestions = async (chatHisory: Message[]) => {
const customOpenAIKey = localStorage.getItem('openAIApiKey'); const customOpenAIKey = localStorage.getItem('openAIApiKey');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL'); const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/suggestions`, { const res = await fetch(`/api/suggestions`, {
method: 'POST', method: 'POST',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',

View File

@@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng'; import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = ` const imageSearchChainPrompt = `
@@ -36,6 +36,12 @@ type ImageSearchChainInput = {
query: string; query: string;
}; };
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser(); const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => { const createImageSearchChain = (llm: BaseChatModel) => {
@@ -56,7 +62,7 @@ const createImageSearchChain = (llm: BaseChatModel) => {
engines: ['bing images', 'google images'], engines: ['bing images', 'google images'],
}); });
const images = []; const images: ImageSearchResult[] = [];
res.results.forEach((result) => { res.results.forEach((result) => {
if (result.img_src && result.url && result.title) { if (result.img_src && result.url && result.title) {

View File

@@ -1,5 +1,5 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables'; import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser'; import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts'; import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';

View File

@@ -7,7 +7,7 @@ import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng'; import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = ` const VideoSearchChainPrompt = `
@@ -36,6 +36,13 @@ type VideoSearchChainInput = {
query: string; query: string;
}; };
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser(); const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => { const createVideoSearchChain = (llm: BaseChatModel) => {
@@ -56,7 +63,7 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
engines: ['youtube'], engines: ['youtube'],
}); });
const videos = []; const videos: VideoSearchResult[] = [];
res.results.forEach((result) => { res.results.forEach((result) => {
if ( if (

View File

@@ -43,7 +43,7 @@ type RecursivePartial<T> = {
const loadConfig = () => const loadConfig = () =>
toml.parse( toml.parse(
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'), fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config; ) as any as Config;
export const getPort = () => loadConfig().GENERAL.PORT; export const getPort = () => loadConfig().GENERAL.PORT;
@@ -62,7 +62,7 @@ export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY; export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearxngApiEndpoint = () => export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG; loadConfig().API_ENDPOINTS.SEARXNG || process.env.SEARXNG_API_URL;
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL; export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
@@ -109,9 +109,8 @@ const mergeConfigs = (current: any, update: any): any => {
export const updateConfig = (config: RecursivePartial<Config>) => { export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig(); const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config); const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync( fs.writeFileSync(
path.join(__dirname, `../${configFileName}`), path.join(path.join(process.cwd(), `${configFileName}`)),
toml.stringify(mergedConfig), toml.stringify(mergedConfig),
); );
}; };

View File

@@ -1,8 +1,9 @@
import { drizzle } from 'drizzle-orm/better-sqlite3'; import { drizzle } from 'drizzle-orm/better-sqlite3';
import Database from 'better-sqlite3'; import Database from 'better-sqlite3';
import * as schema from './schema'; import * as schema from './schema';
import path from 'path';
const sqlite = new Database('data/db.sqlite'); const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite'));
const db = drizzle(sqlite, { const db = drizzle(sqlite, {
schema: schema, schema: schema,
}); });

View File

@@ -1,82 +0,0 @@
import { Embeddings, type EmbeddingsParams } from '@langchain/core/embeddings';
import { chunkArray } from '@langchain/core/utils/chunk_array';
export interface HuggingFaceTransformersEmbeddingsParams
extends EmbeddingsParams {
modelName: string;
model: string;
timeout?: number;
batchSize?: number;
stripNewLines?: boolean;
}
export class HuggingFaceTransformersEmbeddings
extends Embeddings
implements HuggingFaceTransformersEmbeddingsParams
{
modelName = 'Xenova/all-MiniLM-L6-v2';
model = 'Xenova/all-MiniLM-L6-v2';
batchSize = 512;
stripNewLines = true;
timeout?: number;
private pipelinePromise: Promise<any>;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});
this.modelName = fields?.model ?? fields?.modelName ?? this.model;
this.model = this.modelName;
this.stripNewLines = fields?.stripNewLines ?? this.stripNewLines;
this.timeout = fields?.timeout;
}
async embedDocuments(texts: string[]): Promise<number[][]> {
const batches = chunkArray(
this.stripNewLines ? texts.map((t) => t.replace(/\n/g, ' ')) : texts,
this.batchSize,
);
const batchRequests = batches.map((batch) => this.runEmbedding(batch));
const batchResponses = await Promise.all(batchRequests);
const embeddings: number[][] = [];
for (let i = 0; i < batchResponses.length; i += 1) {
const batchResponse = batchResponses[i];
for (let j = 0; j < batchResponse.length; j += 1) {
embeddings.push(batchResponse[j]);
}
}
return embeddings;
}
async embedQuery(text: string): Promise<number[]> {
const data = await this.runEmbedding([
this.stripNewLines ? text.replace(/\n/g, ' ') : text,
]);
return data[0];
}
private async runEmbedding(texts: string[]) {
const { pipeline } = await import('@xenova/transformers');
const pipe = await (this.pipelinePromise ??= pipeline(
'feature-extraction',
this.model,
));
return this.caller.call(async () => {
const output = await pipe(texts, { pooling: 'mean', normalize: true });
return output.tolist();
});
}
}

View File

@@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
constructor(args?: LineOutputParserArgs) { constructor(args?: LineOutputParserArgs) {
super(); super();
this.key = args.key ?? this.key; this.key = args?.key ?? this.key;
} }
static lc_name() { static lc_name() {

View File

@@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
constructor(args?: LineListOutputParserArgs) { constructor(args?: LineListOutputParserArgs) {
super(); super();
this.key = args.key ?? this.key; this.key = args?.key ?? this.key;
} }
static lc_name() { static lc_name() {

View File

@@ -1,6 +1,38 @@
import { ChatAnthropic } from '@langchain/anthropic'; import { ChatOpenAI } from '@langchain/openai';
import { getAnthropicApiKey } from '../../config'; import { ChatModel } from '.';
import logger from '../../utils/logger'; import { getAnthropicApiKey } from '../config';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
export const loadAnthropicChatModels = async () => { export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey(); const anthropicApiKey = getAnthropicApiKey();
@@ -8,52 +40,25 @@ export const loadAnthropicChatModels = async () => {
if (!anthropicApiKey) return {}; if (!anthropicApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'claude-3-5-sonnet-20241022': {
displayName: 'Claude 3.5 Sonnet', anthropicChatModels.forEach((model) => {
model: new ChatAnthropic({ chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7, temperature: 0.7,
anthropicApiKey: anthropicApiKey, configuration: {
model: 'claude-3-5-sonnet-20241022', baseURL: 'https://api.anthropic.com/v1/',
}), },
}, }) as unknown as BaseChatModel,
'claude-3-5-haiku-20241022': { };
displayName: 'Claude 3.5 Haiku', });
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-haiku-20241022',
}),
},
'claude-3-opus-20240229': {
displayName: 'Claude 3 Opus',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-opus-20240229',
}),
},
'claude-3-sonnet-20240229': {
displayName: 'Claude 3 Sonnet',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-sonnet-20240229',
}),
},
'claude-3-haiku-20240307': {
displayName: 'Claude 3 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-haiku-20240307',
}),
},
};
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Anthropic models: ${err}`); console.error(`Error loading Anthropic models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,9 +1,42 @@
import { import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
ChatGoogleGenerativeAI, import { getGeminiApiKey } from '../config';
GoogleGenerativeAIEmbeddings, import { ChatModel, EmbeddingModel } from '.';
} from '@langchain/google-genai'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getGeminiApiKey } from '../../config'; import { Embeddings } from '@langchain/core/embeddings';
import logger from '../../utils/logger';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Pro Experimental',
key: 'gemini-2.0-pro-exp-02-05',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Gemini Embedding',
key: 'gemini-embedding-exp',
},
];
export const loadGeminiChatModels = async () => { export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey(); const geminiApiKey = getGeminiApiKey();
@@ -11,75 +44,53 @@ export const loadGeminiChatModels = async () => {
if (!geminiApiKey) return {}; if (!geminiApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash', geminiChatModels.forEach((model) => {
model: new ChatGoogleGenerativeAI({ chatModels[model.key] = {
modelName: 'gemini-1.5-flash', displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: geminiApiKey,
modelName: model.key,
temperature: 0.7, temperature: 0.7,
apiKey: geminiApiKey, configuration: {
}), baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
}, },
'gemini-1.5-flash-8b': { }) as unknown as BaseChatModel,
displayName: 'Gemini 1.5 Flash 8B', };
model: new ChatGoogleGenerativeAI({ });
modelName: 'gemini-1.5-flash-8b',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-exp': {
displayName: 'Gemini 2.0 Flash Exp',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-exp',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-thinking-exp-01-21': {
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
};
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Gemini models: ${err}`); console.error(`Error loading Gemini models: ${err}`);
return {}; return {};
} }
}; };
export const loadGeminiEmbeddingsModels = async () => { export const loadGeminiEmbeddingModels = async () => {
const geminiApiKey = getGeminiApiKey(); const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {}; if (!geminiApiKey) return {};
try { try {
const embeddingModels = { const embeddingModels: Record<string, EmbeddingModel> = {};
'text-embedding-004': {
displayName: 'Text Embedding', geminiEmbeddingModels.forEach((model) => {
model: new GoogleGenerativeAIEmbeddings({ embeddingModels[model.key] = {
apiKey: geminiApiKey, displayName: model.displayName,
modelName: 'text-embedding-004', model: new OpenAIEmbeddings({
}), openAIApiKey: geminiApiKey,
}, modelName: model.key,
}; configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
}) as unknown as Embeddings,
};
});
return embeddingModels; return embeddingModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Gemini embeddings model: ${err}`); console.error(`Error loading OpenAI embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,6 +1,82 @@
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../../config'; import { getGroqApiKey } from '../config';
import logger from '../../utils/logger'; import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B SpecDec (Preview)',
key: 'deepseek-r1-distill-llama-70b-specdec',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
];
export const loadGroqChatModels = async () => { export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey(); const groqApiKey = getGroqApiKey();
@@ -8,129 +84,25 @@ export const loadGroqChatModels = async () => {
if (!groqApiKey) return {}; if (!groqApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'llama-3.3-70b-versatile': {
displayName: 'Llama 3.3 70B', groqChatModels.forEach((model) => {
model: new ChatOpenAI( chatModels[model.key] = {
{ displayName: model.displayName,
openAIApiKey: groqApiKey, model: new ChatOpenAI({
modelName: 'llama-3.3-70b-versatile', openAIApiKey: groqApiKey,
temperature: 0.7, modelName: model.key,
}, temperature: 0.7,
{ configuration: {
baseURL: 'https://api.groq.com/openai/v1', baseURL: 'https://api.groq.com/openai/v1',
}, },
), }) as unknown as BaseChatModel,
}, };
'llama-3.2-3b-preview': { });
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-vision-preview': {
displayName: 'Llama 3.2 11B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-vision-preview': {
displayName: 'Llama 3.2 90B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-8b-instant': {
displayName: 'Llama 3.1 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.1-8b-instant',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-8b-8192': {
displayName: 'LLaMA3 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-70b-8192': {
displayName: 'LLaMA3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'mixtral-8x7b-32768': {
displayName: 'Mixtral 8x7B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'gemma2-9b-it': {
displayName: 'Gemma2 9B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma2-9b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
};
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Groq models: ${err}`); console.error(`Error loading Groq models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,33 +1,49 @@
import { loadGroqChatModels } from './groq'; import { Embeddings } from '@langchain/core/embeddings';
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai'; import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import { import {
getCustomOpenaiApiKey, getCustomOpenaiApiKey,
getCustomOpenaiApiUrl, getCustomOpenaiApiUrl,
getCustomOpenaiModelName, getCustomOpenaiModelName,
} from '../../config'; } from '../config';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
import { loadGroqChatModels } from './groq';
import { loadAnthropicChatModels } from './anthropic';
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
const chatModelProviders = { export interface ChatModel {
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
string,
() => Promise<Record<string, ChatModel>>
> = {
openai: loadOpenAIChatModels, openai: loadOpenAIChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels, ollama: loadOllamaChatModels,
groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
}; };
const embeddingModelProviders = { export const embeddingModelProviders: Record<
openai: loadOpenAIEmbeddingsModels, string,
local: loadTransformersEmbeddingsModels, () => Promise<Record<string, EmbeddingModel>>
ollama: loadOllamaEmbeddingsModels, > = {
gemini: loadGeminiEmbeddingsModels, openai: loadOpenAIEmbeddingModels,
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
}; };
export const getAvailableChatModelProviders = async () => { export const getAvailableChatModelProviders = async () => {
const models = {}; const models: Record<string, Record<string, ChatModel>> = {};
for (const provider in chatModelProviders) { for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider](); const providerModels = await chatModelProviders[provider]();
@@ -52,7 +68,7 @@ export const getAvailableChatModelProviders = async () => {
configuration: { configuration: {
baseURL: customOpenAiApiUrl, baseURL: customOpenAiApiUrl,
}, },
}), }) as unknown as BaseChatModel,
}, },
} }
: {}), : {}),
@@ -62,7 +78,7 @@ export const getAvailableChatModelProviders = async () => {
}; };
export const getAvailableEmbeddingModelProviders = async () => { export const getAvailableEmbeddingModelProviders = async () => {
const models = {}; const models: Record<string, Record<string, EmbeddingModel>> = {};
for (const provider in embeddingModelProviders) { for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider](); const providerModels = await embeddingModelProviders[provider]();

View File

@@ -1,74 +1,73 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import axios from 'axios'; import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
export const loadOllamaChatModels = async () => { export const loadOllamaChatModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaApiEndpoint = getOllamaApiEndpoint();
const keepAlive = getKeepAlive();
if (!ollamaEndpoint) return {}; if (!ollamaApiEndpoint) return {};
try { try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, { const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}); });
const { models: ollamaModels } = response.data; const { models } = res.data;
const chatModels = ollamaModels.reduce((acc, model) => { const chatModels: Record<string, ChatModel> = {};
acc[model.model] = {
models.forEach((model: any) => {
chatModels[model.model] = {
displayName: model.name, displayName: model.name,
model: new ChatOllama({ model: new ChatOllama({
baseUrl: ollamaEndpoint, baseUrl: ollamaApiEndpoint,
model: model.model, model: model.model,
temperature: 0.7, temperature: 0.7,
keepAlive: keepAlive, keepAlive: getKeepAlive(),
}), }),
}; };
});
return acc;
}, {});
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Ollama models: ${err}`); console.error(`Error loading Ollama models: ${err}`);
return {}; return {};
} }
}; };
export const loadOllamaEmbeddingsModels = async () => { export const loadOllamaEmbeddingModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {}; if (!ollamaApiEndpoint) return {};
try { try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, { const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}); });
const { models: ollamaModels } = response.data; const { models } = res.data;
const embeddingsModels = ollamaModels.reduce((acc, model) => { const embeddingModels: Record<string, EmbeddingModel> = {};
acc[model.model] = {
models.forEach((model: any) => {
embeddingModels[model.model] = {
displayName: model.name, displayName: model.name,
model: new OllamaEmbeddings({ model: new OllamaEmbeddings({
baseUrl: ollamaEndpoint, baseUrl: ollamaApiEndpoint,
model: model.model, model: model.model,
}), }),
}; };
});
return acc; return embeddingModels;
}, {});
return embeddingsModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Ollama embeddings model: ${err}`); console.error(`Error loading Ollama embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,89 +1,90 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../../config'; import { getOpenaiApiKey } from '../config';
import logger from '../../utils/logger'; import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
export const loadOpenAIChatModels = async () => { export const loadOpenAIChatModels = async () => {
const openAIApiKey = getOpenaiApiKey(); const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {}; if (!openaiApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'gpt-3.5-turbo': {
displayName: 'GPT-3.5 Turbo', openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({ model: new ChatOpenAI({
openAIApiKey, openAIApiKey: openaiApiKey,
modelName: 'gpt-3.5-turbo', modelName: model.key,
temperature: 0.7, temperature: 0.7,
}), }) as unknown as BaseChatModel,
}, };
'gpt-4': { });
displayName: 'GPT-4',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
},
'gpt-4-turbo': {
displayName: 'GPT-4 turbo',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
},
'gpt-4o': {
displayName: 'GPT-4 omni',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o',
temperature: 0.7,
}),
},
'gpt-4o-mini': {
displayName: 'GPT-4 omni mini',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o-mini',
temperature: 0.7,
}),
},
};
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading OpenAI models: ${err}`); console.error(`Error loading OpenAI models: ${err}`);
return {}; return {};
} }
}; };
export const loadOpenAIEmbeddingsModels = async () => { export const loadOpenAIEmbeddingModels = async () => {
const openAIApiKey = getOpenaiApiKey(); const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {}; if (!openaiApiKey) return {};
try { try {
const embeddingModels = { const embeddingModels: Record<string, EmbeddingModel> = {};
'text-embedding-3-small': {
displayName: 'Text Embedding 3 Small', openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({ model: new OpenAIEmbeddings({
openAIApiKey, openAIApiKey: openaiApiKey,
modelName: 'text-embedding-3-small', modelName: model.key,
}), }) as unknown as Embeddings,
}, };
'text-embedding-3-large': { });
displayName: 'Text Embedding 3 Large',
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
},
};
return embeddingModels; return embeddingModels;
} catch (err) { } catch (err) {
logger.error(`Error loading OpenAI embeddings model: ${err}`); console.error(`Error loading OpenAI embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,32 +0,0 @@
import logger from '../../utils/logger';
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const loadTransformersEmbeddingsModels = async () => {
try {
const embeddingModels = {
'xenova-bge-small-en-v1.5': {
displayName: 'BGE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bge-small-en-v1.5',
}),
},
'xenova-gte-small': {
displayName: 'GTE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/gte-small',
}),
},
'xenova-bert-base-multilingual-uncased': {
displayName: 'Bert Multilingual',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bert-base-multilingual-uncased',
}),
},
};
return embeddingModels;
} catch (err) {
logger.error(`Error loading Transformers embeddings model: ${err}`);
return {};
}
};

View File

@@ -13,18 +13,17 @@ import {
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../lib/outputParsers/listLineOutputParser'; import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../lib/outputParsers/lineOutputParser'; import LineOutputParser from '../outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents'; import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from 'langchain/document'; import { Document } from 'langchain/document';
import { searchSearxng } from '../lib/searxng'; import { searchSearxng } from '../searxng';
import path from 'path'; import path from 'node:path';
import fs from 'fs'; import fs from 'node:fs';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import { StreamEvent } from '@langchain/core/tracers/log_stream'; import { StreamEvent } from '@langchain/core/tracers/log_stream';
import { IterableReadableStream } from '@langchain/core/utils/stream';
export interface MetaSearchAgentType { export interface MetaSearchAgentType {
searchAndAnswer: ( searchAndAnswer: (
@@ -90,7 +89,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
question = 'summarize'; question = 'summarize';
} }
let docs = []; let docs: Document[] = [];
const linkDocs = await getDocumentsFromLinks({ links }); const linkDocs = await getDocumentsFromLinks({ links });
@@ -311,7 +310,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8')); const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
const fileSimilaritySearchObject = content.contents.map( const fileSimilaritySearchObject = content.contents.map(
(c: string, i) => { (c: string, i: number) => {
return { return {
fileName: content.title, fileName: content.title,
content: c, content: c,
@@ -414,6 +413,8 @@ class MetaSearchAgent implements MetaSearchAgentType {
return sortedDocs; return sortedDocs;
} }
return [];
} }
private processDocs(docs: Document[]) { private processDocs(docs: Document[]) {
@@ -426,7 +427,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
} }
private async handleStream( private async handleStream(
stream: IterableReadableStream<StreamEvent>, stream: AsyncGenerator<StreamEvent, any, any>,
emitter: eventEmitter, emitter: eventEmitter,
) { ) {
for await (const event of stream) { for await (const event of stream) {

View File

@@ -1,5 +1,5 @@
import axios from 'axios'; import axios from 'axios';
import { getSearxngApiEndpoint } from '../config'; import { getSearxngApiEndpoint } from './config';
interface SearxngSearchOptions { interface SearxngSearchOptions {
categories?: string[]; categories?: string[];
@@ -30,11 +30,12 @@ export const searchSearxng = async (
if (opts) { if (opts) {
Object.keys(opts).forEach((key) => { Object.keys(opts).forEach((key) => {
if (Array.isArray(opts[key])) { const value = opts[key as keyof SearxngSearchOptions];
url.searchParams.append(key, opts[key].join(',')); if (Array.isArray(value)) {
url.searchParams.append(key, value.join(','));
return; return;
} }
url.searchParams.append(key, opts[key]); url.searchParams.append(key, value as string);
}); });
} }

5
src/lib/types/compute-dot.d.ts vendored Normal file
View File

@@ -0,0 +1,5 @@
declare function computeDot(vectorA: number[], vectorB: number[]): number;
declare module 'compute-dot' {
export default computeDot;
}

View File

@@ -6,7 +6,7 @@ const computeSimilarity = (x: number[], y: number[]): number => {
const similarityMeasure = getSimilarityMeasure(); const similarityMeasure = getSimilarityMeasure();
if (similarityMeasure === 'cosine') { if (similarityMeasure === 'cosine') {
return cosineSimilarity(x, y); return cosineSimilarity(x, y) as number;
} else if (similarityMeasure === 'dot') { } else if (similarityMeasure === 'dot') {
return dot(x, y); return dot(x, y);
} }

View File

@@ -3,7 +3,6 @@ import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse'; import pdfParse from 'pdf-parse';
import logger from './logger';
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => { export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splitter = new RecursiveCharacterTextSplitter(); const splitter = new RecursiveCharacterTextSplitter();
@@ -79,12 +78,13 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
docs.push(...linkDocs); docs.push(...linkDocs);
} catch (err) { } catch (err) {
logger.error( console.error(
`Error at generating documents from links: ${err.message}`, 'An error occurred while getting documents from links: ',
err,
); );
docs.push( docs.push(
new Document({ new Document({
pageContent: `Failed to retrieve content from the link: ${err.message}`, pageContent: `Failed to retrieve content from the link: ${err}`,
metadata: { metadata: {
title: 'Failed to retrieve content', title: 'Failed to retrieve content',
url: link, url: link,

View File

@@ -1,66 +0,0 @@
import express from 'express';
import logger from '../utils/logger';
import db from '../db/index';
import { eq } from 'drizzle-orm';
import { chats, messages } from '../db/schema';
const router = express.Router();
router.get('/', async (_, res) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return res.status(200).json({ chats: chats });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chats: ${err.message}`);
}
});
router.get('/:id', async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, req.params.id),
});
return res.status(200).json({ chat: chatExists, messages: chatMessages });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chat: ${err.message}`);
}
});
router.delete(`/:id`, async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
await db.delete(chats).where(eq(chats.id, req.params.id)).execute();
await db
.delete(messages)
.where(eq(messages.chatId, req.params.id))
.execute();
return res.status(200).json({ message: 'Chat deleted successfully' });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in deleting chat: ${err.message}`);
}
});
export default router;

View File

@@ -1,48 +0,0 @@
import express from 'express';
import { searchSearxng } from '../lib/searxng';
import logger from '../utils/logger';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const data = (
await Promise.all([
searchSearxng('site:businessinsider.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:www.exchangewire.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:yahoo.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:businessinsider.com tech', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:www.exchangewire.com tech', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:yahoo.com tech', {
engines: ['bing news'],
pageno: 1,
}),
])
)
.map((result) => result.results)
.flat()
.sort(() => Math.random() - 0.5);
return res.json({ blogs: data });
} catch (err: any) {
logger.error(`Error in discover route: ${err.message}`);
return res.status(500).json({ message: 'An error has occurred' });
}
});
export default router;

View File

@@ -1,82 +0,0 @@
import express from 'express';
import handleImageSearch from '../chains/imageSearchAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: ImageSearchBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const images = await handleImageSearch(
{ query: body.query, chat_history: chatHistory },
llm,
);
res.status(200).json({ images });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in image search: ${err.message}`);
}
});
export default router;

View File

@@ -1,24 +0,0 @@
import express from 'express';
import imagesRouter from './images';
import videosRouter from './videos';
import configRouter from './config';
import modelsRouter from './models';
import suggestionsRouter from './suggestions';
import chatsRouter from './chats';
import searchRouter from './search';
import discoverRouter from './discover';
import uploadsRouter from './uploads';
const router = express.Router();
router.use('/images', imagesRouter);
router.use('/videos', videosRouter);
router.use('/config', configRouter);
router.use('/models', modelsRouter);
router.use('/suggestions', suggestionsRouter);
router.use('/chats', chatsRouter);
router.use('/search', searchRouter);
router.use('/discover', discoverRouter);
router.use('/uploads', uploadsRouter);
export default router;

View File

@@ -1,36 +0,0 @@
import express from 'express';
import logger from '../utils/logger';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete chatModelProviders[provider][model].model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete embeddingModelProviders[provider][model].model;
});
});
res.status(200).json({ chatModelProviders, embeddingModelProviders });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(err.message);
}
});
export default router;

View File

@@ -1,158 +0,0 @@
import express from 'express';
import logger from '../utils/logger';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import { ChatOpenAI } from '@langchain/openai';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import { searchHandlers } from '../websocket/messageHandler';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { MetaSearchAgentType } from '../search/metaSearchAgent';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface chatModel {
provider: string;
model: string;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
}
interface embeddingModel {
provider: string;
model: string;
}
interface ChatRequestBody {
optimizationMode: 'speed' | 'balanced';
focusMode: string;
chatModel?: chatModel;
embeddingModel?: embeddingModel;
query: string;
history: Array<[string, string]>;
}
router.post('/', async (req, res) => {
try {
const body: ChatRequestBody = req.body;
if (!body.focusMode || !body.query) {
return res.status(400).json({ message: 'Missing focus mode or query' });
}
body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
const embeddingModel =
body.embeddingModel?.model ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (
embeddingModelProviders[embeddingModelProvider] &&
embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {
return res.status(400).json({ message: 'Invalid focus mode' });
}
const emitter = await searchHandler.searchAndAnswer(
body.query,
history,
llm,
embeddings,
body.optimizationMode,
[],
);
let message = '';
let sources = [];
emitter.on('data', (data) => {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
message += parsedData.data;
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
}
});
emitter.on('end', () => {
res.status(200).json({ message, sources });
});
emitter.on('error', (data) => {
const parsedData = JSON.parse(data);
res.status(500).json({ message: parsedData.data });
});
} catch (err: any) {
logger.error(`Error in getting search results: ${err.message}`);
res.status(500).json({ message: 'An error has occurred.' });
}
});
export default router;

View File

@@ -1,81 +0,0 @@
import express from 'express';
import generateSuggestions from '../chains/suggestionGeneratorAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface SuggestionsBody {
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: SuggestionsBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const suggestions = await generateSuggestions(
{ chat_history: chatHistory },
llm,
);
res.status(200).json({ suggestions: suggestions });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in generating suggestions: ${err.message}`);
}
});
export default router;

View File

@@ -1,151 +0,0 @@
import express from 'express';
import logger from '../utils/logger';
import multer from 'multer';
import path from 'path';
import crypto from 'crypto';
import fs from 'fs';
import { Embeddings } from '@langchain/core/embeddings';
import { getAvailableEmbeddingModelProviders } from '../lib/providers';
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf';
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx';
import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
import { Document } from 'langchain/document';
const router = express.Router();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 100,
});
const storage = multer.diskStorage({
destination: (req, file, cb) => {
cb(null, path.join(process.cwd(), './uploads'));
},
filename: (req, file, cb) => {
const splitedFileName = file.originalname.split('.');
const fileExtension = splitedFileName[splitedFileName.length - 1];
if (!['pdf', 'docx', 'txt'].includes(fileExtension)) {
return cb(new Error('File type is not supported'), '');
}
cb(null, `${crypto.randomBytes(16).toString('hex')}.${fileExtension}`);
},
});
const upload = multer({ storage });
router.post(
'/',
upload.fields([
{ name: 'files' },
{ name: 'embedding_model', maxCount: 1 },
{ name: 'embedding_model_provider', maxCount: 1 },
]),
async (req, res) => {
try {
const { embedding_model, embedding_model_provider } = req.body;
if (!embedding_model || !embedding_model_provider) {
res
.status(400)
.json({ message: 'Missing embedding model or provider' });
return;
}
const embeddingModels = await getAvailableEmbeddingModelProviders();
const provider =
embedding_model_provider ?? Object.keys(embeddingModels)[0];
const embeddingModel: Embeddings =
embedding_model ?? Object.keys(embeddingModels[provider])[0];
let embeddingsModel: Embeddings | undefined;
if (
embeddingModels[provider] &&
embeddingModels[provider][embeddingModel]
) {
embeddingsModel = embeddingModels[provider][embeddingModel].model as
| Embeddings
| undefined;
}
if (!embeddingsModel) {
res.status(400).json({ message: 'Invalid LLM model selected' });
return;
}
const files = req.files['files'] as Express.Multer.File[];
if (!files || files.length === 0) {
res.status(400).json({ message: 'No files uploaded' });
return;
}
await Promise.all(
files.map(async (file) => {
let docs: Document[] = [];
if (file.mimetype === 'application/pdf') {
const loader = new PDFLoader(file.path);
docs = await loader.load();
} else if (
file.mimetype ===
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
) {
const loader = new DocxLoader(file.path);
docs = await loader.load();
} else if (file.mimetype === 'text/plain') {
const text = fs.readFileSync(file.path, 'utf-8');
docs = [
new Document({
pageContent: text,
metadata: {
title: file.originalname,
},
}),
];
}
const splitted = await splitter.splitDocuments(docs);
const json = JSON.stringify({
title: file.originalname,
contents: splitted.map((doc) => doc.pageContent),
});
const pathToSave = file.path.replace(/\.\w+$/, '-extracted.json');
fs.writeFileSync(pathToSave, json);
const embeddings = await embeddingsModel.embedDocuments(
splitted.map((doc) => doc.pageContent),
);
const embeddingsJSON = JSON.stringify({
title: file.originalname,
embeddings: embeddings,
});
const pathToSaveEmbeddings = file.path.replace(
/\.\w+$/,
'-embeddings.json',
);
fs.writeFileSync(pathToSaveEmbeddings, embeddingsJSON);
}),
);
res.status(200).json({
files: files.map((file) => {
return {
fileName: file.originalname,
fileExtension: file.filename.split('.').pop(),
fileId: file.filename.replace(/\.\w+$/, ''),
};
}),
});
} catch (err: any) {
logger.error(`Error in uploading file results: ${err.message}`);
res.status(500).json({ message: 'An error has occurred.' });
}
},
);
export default router;

View File

@@ -1,82 +0,0 @@
import express from 'express';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import handleVideoSearch from '../chains/videoSearchAgent';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: VideoSearchBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const videos = await handleVideoSearch(
{ chat_history: chatHistory, query: body.query },
llm,
);
res.status(200).json({ videos });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in video search: ${err.message}`);
}
});
export default router;

View File

@@ -1,22 +0,0 @@
import winston from 'winston';
const logger = winston.createLogger({
level: 'info',
transports: [
new winston.transports.Console({
format: winston.format.combine(
winston.format.colorize(),
winston.format.simple(),
),
}),
new winston.transports.File({
filename: 'app.log',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.json(),
),
}),
],
});
export default logger;

Some files were not shown because too many files have changed in this diff Show More