feat(app): remove backend

This commit is contained in:
ItzCrazyKns
2025-03-19 16:23:27 +05:30
parent 8a24572cd2
commit 217736d05a
145 changed files with 3546 additions and 10516 deletions

6
.gitignore vendored
View File

@@ -4,9 +4,9 @@ npm-debug.log
yarn-error.log yarn-error.log
# Build output # Build output
/.next/ .next/
/out/ out/
/dist/ dist/
# IDE/Editor specific # IDE/Editor specific
.vscode/ .vscode/

View File

@@ -6,7 +6,6 @@ const config = {
endOfLine: 'auto', endOfLine: 'auto',
singleQuote: true, singleQuote: true,
tabWidth: 2, tabWidth: 2,
semi: true,
}; };
module.exports = config; module.exports = config;

View File

@@ -1,13 +1,20 @@
FROM node:20.18.0-alpine FROM node:20.18.0-alpine
ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
WORKDIR /home/perplexica WORKDIR /home/perplexica
COPY ui /home/perplexica/ COPY src /home/perplexica/src
COPY public /home/perplexica/public
COPY package.json /home/perplexica/package.json
COPY yarn.lock /home/perplexica/yarn.lock
COPY tsconfig.json /home/perplexica/tsconfig.json
COPY next.config.mjs /home/perplexica/next.config.mjs
COPY next-env.d.ts /home/perplexica/next-env.d.ts
COPY postcss.config.js /home/perplexica/postcss.config.js
COPY drizzle.config.ts /home/perplexica/drizzle.config.ts
COPY tailwind.config.ts /home/perplexica/tailwind.config.ts
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile RUN yarn install --frozen-lockfile
RUN yarn build RUN yarn build

View File

@@ -1,17 +0,0 @@
FROM node:18-slim
WORKDIR /home/perplexica
COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/
COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/
RUN mkdir /home/perplexica/data
RUN mkdir /home/perplexica/uploads
RUN yarn install --frozen-lockfile --network-timeout 600000
RUN yarn build
CMD ["yarn", "start"]

View File

@@ -9,41 +9,20 @@ services:
- perplexica-network - perplexica-network
restart: unless-stopped restart: unless-stopped
perplexica-backend: app:
build:
context: .
dockerfile: backend.dockerfile
image: itzcrazykns1337/perplexica-backend:main
environment:
- SEARXNG_API_URL=http://searxng:8080
depends_on:
- searxng
ports:
- 3001:3001
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- perplexica-network
restart: unless-stopped
perplexica-frontend:
build: build:
context: . context: .
dockerfile: app.dockerfile dockerfile: app.dockerfile
args: environment:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api - SEARXNG_API_URL=http://searxng:8080
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
ports: ports:
- 3000:3000 - 3000:3000
networks: networks:
- perplexica-network - perplexica-network
volumes:
- backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
restart: unless-stopped restart: unless-stopped
networks: networks:

View File

@@ -2,7 +2,7 @@ import { defineConfig } from 'drizzle-kit';
export default defineConfig({ export default defineConfig({
dialect: 'sqlite', dialect: 'sqlite',
schema: './src/db/schema.ts', schema: './src/lib/db/schema.ts',
out: './drizzle', out: './drizzle',
dbCredentials: { dbCredentials: {
url: './data/db.sqlite', url: './data/db.sqlite',

5
next-env.d.ts vendored Normal file
View File

@@ -0,0 +1,5 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@@ -1,53 +1,62 @@
{ {
"name": "perplexica-backend", "name": "perplexica-frontend",
"version": "1.10.0-rc3", "version": "1.10.0-rc3",
"license": "MIT", "license": "MIT",
"author": "ItzCrazyKns", "author": "ItzCrazyKns",
"scripts": { "scripts": {
"start": "npm run db:push && node dist/app.js", "dev": "next dev",
"build": "tsc", "build": "next build",
"dev": "nodemon --ignore uploads/ src/app.ts ", "start": "next start",
"db:push": "drizzle-kit push sqlite", "lint": "next lint",
"format": "prettier . --check", "format:write": "prettier . --write",
"format:write": "prettier . --write" "db:push": "drizzle-kit push sqlite"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.10",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/html-to-text": "^9.0.4",
"@types/multer": "^1.4.12",
"@types/pdf-parse": "^1.1.4",
"@types/readable-stream": "^4.0.11",
"@types/ws": "^8.5.12",
"drizzle-kit": "^0.22.7",
"nodemon": "^3.1.0",
"prettier": "^3.2.5",
"ts-node": "^10.9.2",
"typescript": "^5.4.3"
}, },
"dependencies": { "dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5", "@iarna/toml": "^2.2.5",
"@langchain/anthropic": "^0.2.3", "@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/community": "^0.2.16", "@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/openai": "^0.0.25", "@langchain/openai": "^0.0.25",
"@langchain/google-genai": "^0.0.23", "@langchain/textsplitters": "^0.1.0",
"@xenova/transformers": "^2.17.1", "@tailwindcss/typography": "^0.5.12",
"axios": "^1.6.8", "axios": "^1.8.3",
"better-sqlite3": "^11.0.0", "better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",
"compute-cosine-similarity": "^1.1.0", "compute-cosine-similarity": "^1.1.0",
"compute-dot": "^1.1.0", "compute-dot": "^1.1.0",
"cors": "^2.8.5", "drizzle-orm": "^0.40.1",
"dotenv": "^16.4.5",
"drizzle-orm": "^0.31.2",
"express": "^4.19.2",
"html-to-text": "^9.0.5", "html-to-text": "^9.0.5",
"langchain": "^0.1.30", "langchain": "^0.1.30",
"mammoth": "^1.8.0", "lucide-react": "^0.363.0",
"multer": "^1.4.5-lts.1", "markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"pdf-parse": "^1.1.1", "pdf-parse": "^1.1.1",
"winston": "^3.13.0", "react": "^18",
"ws": "^8.17.1", "react-dom": "^18",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4" "zod": "^3.22.4"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",
"@types/html-to-text": "^9.0.4",
"@types/node": "^20",
"@types/pdf-parse": "^1.1.4",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"drizzle-kit": "^0.30.5",
"eslint": "^8",
"eslint-config-next": "14.1.4",
"postcss": "^8",
"prettier": "^3.2.5",
"tailwindcss": "^3.3.0",
"typescript": "^5"
} }
} }

View File

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

Before

Width:  |  Height:  |  Size: 629 B

After

Width:  |  Height:  |  Size: 629 B

View File

@@ -24,4 +24,4 @@ MODEL_NAME = ""
API_URL = "" # Ollama API URL - http://host.docker.internal:11434 API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[API_ENDPOINTS] [API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@@ -1,38 +0,0 @@
import { startWebSocketServer } from './websocket';
import express from 'express';
import cors from 'cors';
import http from 'http';
import routes from './routes';
import { getPort } from './config';
import logger from './utils/logger';
const port = getPort();
const app = express();
const server = http.createServer(app);
const corsOptions = {
origin: '*',
};
app.use(cors(corsOptions));
app.use(express.json());
app.use('/api', routes);
app.get('/api', (_, res) => {
res.status(200).json({ status: 'ok' });
});
server.listen(port, () => {
logger.info(`Server is running on port ${port}`);
});
startWebSocketServer(server);
process.on('uncaughtException', (err, origin) => {
logger.error(`Uncaught Exception at ${origin}: ${err}`);
});
process.on('unhandledRejection', (reason, promise) => {
logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`);
});

View File

@@ -3,16 +3,28 @@ import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto'; import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream'; import { EventEmitter } from 'stream';
import { chatModelProviders, embeddingModelProviders } from '@/lib/providers'; import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
import db from '@/lib/db'; import db from '@/lib/db';
import { chats, messages as messagesSchema } from '@/lib/db/schema'; import { chats, messages as messagesSchema } from '@/lib/db/schema';
import { and, eq, gt } from 'drizzle-orm'; import { and, eq, gt } from 'drizzle-orm';
import { getFileDetails } from '@/lib/utils/files'; import { getFileDetails } from '@/lib/utils/files';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '@/lib/config';
export const runtime = 'nodejs'; export const runtime = 'nodejs';
export const dynamic = 'force-dynamic'; export const dynamic = 'force-dynamic';
export const searchHandlers: Record<string, MetaSearchAgent> = { const searchHandlers: Record<string, MetaSearchAgent> = {
webSearch: new MetaSearchAgent({ webSearch: new MetaSearchAgent({
activeEngines: [], activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt, queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
@@ -227,7 +239,7 @@ const handleHistorySave = async (
export const POST = async (req: Request) => { export const POST = async (req: Request) => {
try { try {
const body = (await req.json()) as Body; const body = (await req.json()) as Body;
const { message, chatModel, embeddingModel } = body; const { message } = body;
if (message.content === '') { if (message.content === '') {
return Response.json( return Response.json(
@@ -238,50 +250,52 @@ export const POST = async (req: Request) => {
); );
} }
const getProviderChatModels = chatModelProviders[chatModel.provider]; const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
if (!getProviderChatModels) { const chatModelProvider =
return Response.json( chatModelProviders[
{ body.chatModel?.provider || Object.keys(chatModelProviders)[0]
message: 'Invalid chat model provider', ];
const chatModel =
chatModelProvider[
body.chatModel?.name || Object.keys(chatModelProvider)[0]
];
const embeddingProvider =
embeddingModelProviders[
body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]
];
const embeddingModel =
embeddingProvider[
body.embeddingModel?.name || Object.keys(embeddingProvider)[0]
];
let llm: BaseChatModel | undefined;
let embedding = embeddingModel.model;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
}, },
{ status: 400 }, }) as unknown as BaseChatModel;
); } else if (chatModelProvider && chatModel) {
llm = chatModel.model;
} }
const chatModels = await getProviderChatModels();
const llm = chatModels[chatModel.name].model;
if (!llm) { if (!llm) {
return Response.json( return Response.json({ error: 'Invalid chat model' }, { status: 400 });
{
message: 'Invalid chat model',
},
{ status: 400 },
);
} }
const getProviderEmbeddingModels =
embeddingModelProviders[embeddingModel.provider];
if (!getProviderEmbeddingModels) {
return Response.json(
{
message: 'Invalid embedding model provider',
},
{ status: 400 },
);
}
const embeddingModels = await getProviderEmbeddingModels();
const embedding = embeddingModels[embeddingModel.name].model;
if (!embedding) { if (!embedding) {
return Response.json( return Response.json(
{ { error: 'Invalid embedding model' },
message: 'Invalid embedding model',
},
{ status: 400 }, { status: 400 },
); );
} }

View File

@@ -6,7 +6,7 @@ import {
} from '@/lib/config'; } from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers'; import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, HumanMessage } from '@langchain/core/messages'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
interface ChatModel { interface ChatModel {
@@ -32,7 +32,7 @@ export const POST = async (req: Request) => {
return new AIMessage(msg.content); return new AIMessage(msg.content);
} }
}) })
.filter((msg) => msg !== undefined); .filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders(); const chatModelProviders = await getAvailableChatModelProviders();
@@ -55,7 +55,7 @@ export const POST = async (req: Request) => {
configuration: { configuration: {
baseURL: getCustomOpenaiApiUrl(), baseURL: getCustomOpenaiApiUrl(),
}, },
}); }) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) { } else if (chatModelProvider && chatModel) {
llm = chatModel.model; llm = chatModel.model;
} }

View File

@@ -6,7 +6,7 @@ import {
} from '@/lib/config'; } from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers'; import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, HumanMessage } from '@langchain/core/messages'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
interface ChatModel { interface ChatModel {
@@ -31,7 +31,7 @@ export const POST = async (req: Request) => {
return new AIMessage(msg.content); return new AIMessage(msg.content);
} }
}) })
.filter((msg) => msg !== undefined); .filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders(); const chatModelProviders = await getAvailableChatModelProviders();
@@ -54,7 +54,7 @@ export const POST = async (req: Request) => {
configuration: { configuration: {
baseURL: getCustomOpenaiApiUrl(), baseURL: getCustomOpenaiApiUrl(),
}, },
}); }) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) { } else if (chatModelProvider && chatModel) {
llm = chatModel.model; llm = chatModel.model;
} }

View File

@@ -6,7 +6,7 @@ import {
} from '@/lib/config'; } from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers'; import { getAvailableChatModelProviders } from '@/lib/providers';
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, HumanMessage } from '@langchain/core/messages'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
interface ChatModel { interface ChatModel {
@@ -32,7 +32,7 @@ export const POST = async (req: Request) => {
return new AIMessage(msg.content); return new AIMessage(msg.content);
} }
}) })
.filter((msg) => msg !== undefined); .filter((msg) => msg !== undefined) as BaseMessage[];
const chatModelProviders = await getAvailableChatModelProviders(); const chatModelProviders = await getAvailableChatModelProviders();
@@ -55,7 +55,7 @@ export const POST = async (req: Request) => {
configuration: { configuration: {
baseURL: getCustomOpenaiApiUrl(), baseURL: getCustomOpenaiApiUrl(),
}, },
}); }) as unknown as BaseChatModel;
} else if (chatModelProvider && chatModel) { } else if (chatModelProvider && chatModel) {
llm = chatModel.model; llm = chatModel.model;
} }

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

@@ -1,84 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type ImageSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: ImageSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: ImageSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
return images.slice(0, 10);
}),
]);
};
const handleImageSearch = (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const imageSearchChain = createImageSearchChain(llm);
return imageSearchChain.invoke(input);
};
export default handleImageSearch;

View File

@@ -1,55 +0,0 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../lib/outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
const suggestionGeneratorPrompt = `
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:
<suggestions>
Tell me more about SpaceX and their recent projects
What is the latest news on SpaceX?
Who is the CEO of SpaceX?
</suggestions>
Conversation:
{chat_history}
`;
type SuggestionGeneratorInput = {
chat_history: BaseMessage[];
};
const outputParser = new ListLineOutputParser({
key: 'suggestions',
});
const createSuggestionGeneratorChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: SuggestionGeneratorInput) =>
formatChatHistoryAsString(input.chat_history),
}),
PromptTemplate.fromTemplate(suggestionGeneratorPrompt),
llm,
outputParser,
]);
};
const generateSuggestions = (
input: SuggestionGeneratorInput,
llm: BaseChatModel,
) => {
(llm as unknown as ChatOpenAI).temperature = 0;
const suggestionGeneratorChain = createSuggestionGeneratorChain(llm);
return suggestionGeneratorChain.invoke(input);
};
export default generateSuggestions;

View File

@@ -1,90 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: VideoSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos = [];
res.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
}),
]);
};
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@@ -45,159 +45,138 @@ const checkConfig = async (
setIsConfigReady: (ready: boolean) => void, setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void, setHasError: (hasError: boolean) => void,
) => { ) => {
useEffect(() => { try {
const checkConfig = async () => { let chatModel = localStorage.getItem('chatModel');
try { let chatModelProvider = localStorage.getItem('chatModelProvider');
let chatModel = localStorage.getItem('chatModel'); let embeddingModel = localStorage.getItem('embeddingModel');
let chatModelProvider = localStorage.getItem('chatModelProvider'); let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem( const autoImageSearch = localStorage.getItem('autoImageSearch');
'embeddingModelProvider', const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
); );
return res.json();
});
const autoImageSearch = localStorage.getItem('autoImageSearch'); if (
const autoVideoSearch = localStorage.getItem('autoVideoSearch'); !chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
if (!autoImageSearch) { chatModelProvider =
localStorage.setItem('autoImageSearch', 'true'); chatModelProvider || Object.keys(chatModelProviders)[0];
}
if (!autoVideoSearch) { chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, { if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
headers: { return toast.error('No chat models available');
'Content-Type': 'application/json', }
},
}).then(async (res) => { if (!embeddingModel || !embeddingModelProvider) {
if (!res.ok) const embeddingModelProviders = providers.embeddingModelProviders;
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if ( if (
!chatModel || !embeddingModelProviders ||
!chatModelProvider || Object.keys(embeddingModelProviders).length === 0
!embeddingModel || )
!embeddingModelProvider return toast.error('No embedding models available');
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
chatModelProvider = embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
chatModelProvider || Object.keys(chatModelProviders)[0]; embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; )[0];
if (
!chatModelProviders ||
Object.keys(chatModelProviders).length === 0
)
return toast.error('No chat models available');
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error(
'An error occurred while checking the configuration:',
err,
);
setIsConfigReady(false);
setHasError(true);
} }
};
checkConfig(); localStorage.setItem('chatModel', chatModel!);
// eslint-disable-next-line react-hooks/exhaustive-deps localStorage.setItem('chatModelProvider', chatModelProvider);
}, []); localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
}; };
const loadMessages = async ( const loadMessages = async (
@@ -282,12 +261,15 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [hasError, setHasError] = useState(false); const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false); const [isReady, setIsReady] = useState(false);
checkConfig( useEffect(() => {
setChatModelProvider, checkConfig(
setEmbeddingModelProvider, setChatModelProvider,
setIsConfigReady, setEmbeddingModelProvider,
setHasError, setIsConfigReady,
); setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false); const [messageAppeared, setMessageAppeared] = useState(false);

View File

@@ -1,117 +0,0 @@
import fs from 'fs';
import path from 'path';
import toml from '@iarna/toml';
const configFileName = 'config.toml';
interface Config {
GENERAL: {
PORT: number;
SIMILARITY_MEASURE: string;
KEEP_ALIVE: string;
};
MODELS: {
OPENAI: {
API_KEY: string;
};
GROQ: {
API_KEY: string;
};
ANTHROPIC: {
API_KEY: string;
};
GEMINI: {
API_KEY: string;
};
OLLAMA: {
API_URL: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
MODEL_NAME: string;
};
};
API_ENDPOINTS: {
SEARXNG: string;
};
}
type RecursivePartial<T> = {
[P in keyof T]?: RecursivePartial<T[P]>;
};
const loadConfig = () =>
toml.parse(
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
) as any as Config;
export const getPort = () => loadConfig().GENERAL.PORT;
export const getSimilarityMeasure = () =>
loadConfig().GENERAL.SIMILARITY_MEASURE;
export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE;
export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY;
export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY;
export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
export const getCustomOpenaiApiUrl = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_URL;
export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
}
if (typeof current !== 'object' || current === null) {
return update;
}
const result = { ...current };
for (const key in update) {
if (Object.prototype.hasOwnProperty.call(update, key)) {
const updateValue = update[key];
if (
typeof updateValue === 'object' &&
updateValue !== null &&
typeof result[key] === 'object' &&
result[key] !== null
) {
result[key] = mergeConfigs(result[key], updateValue);
} else if (updateValue !== undefined) {
result[key] = updateValue;
}
}
}
return result;
};
export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig();
const mergedConfig = mergeConfigs(currentConfig, config);
fs.writeFileSync(
path.join(__dirname, `../${configFileName}`),
toml.stringify(mergedConfig),
);
};

View File

@@ -1,10 +0,0 @@
import { drizzle } from 'drizzle-orm/better-sqlite3';
import Database from 'better-sqlite3';
import * as schema from './schema';
const sqlite = new Database('data/db.sqlite');
const db = drizzle(sqlite, {
schema: schema,
});
export default db;

View File

@@ -62,7 +62,7 @@ export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY; export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
export const getSearxngApiEndpoint = () => export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG; loadConfig().API_ENDPOINTS.SEARXNG || process.env.SEARXNG_API_URL;
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL; export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;

View File

@@ -1,82 +0,0 @@
import { Embeddings, type EmbeddingsParams } from '@langchain/core/embeddings';
import { chunkArray } from '@langchain/core/utils/chunk_array';
export interface HuggingFaceTransformersEmbeddingsParams
extends EmbeddingsParams {
modelName: string;
model: string;
timeout?: number;
batchSize?: number;
stripNewLines?: boolean;
}
export class HuggingFaceTransformersEmbeddings
extends Embeddings
implements HuggingFaceTransformersEmbeddingsParams
{
modelName = 'Xenova/all-MiniLM-L6-v2';
model = 'Xenova/all-MiniLM-L6-v2';
batchSize = 512;
stripNewLines = true;
timeout?: number;
private pipelinePromise: Promise<any>;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
super(fields ?? {});
this.modelName = fields?.model ?? fields?.modelName ?? this.model;
this.model = this.modelName;
this.stripNewLines = fields?.stripNewLines ?? this.stripNewLines;
this.timeout = fields?.timeout;
}
async embedDocuments(texts: string[]): Promise<number[][]> {
const batches = chunkArray(
this.stripNewLines ? texts.map((t) => t.replace(/\n/g, ' ')) : texts,
this.batchSize,
);
const batchRequests = batches.map((batch) => this.runEmbedding(batch));
const batchResponses = await Promise.all(batchRequests);
const embeddings: number[][] = [];
for (let i = 0; i < batchResponses.length; i += 1) {
const batchResponse = batchResponses[i];
for (let j = 0; j < batchResponse.length; j += 1) {
embeddings.push(batchResponse[j]);
}
}
return embeddings;
}
async embedQuery(text: string): Promise<number[]> {
const data = await this.runEmbedding([
this.stripNewLines ? text.replace(/\n/g, ' ') : text,
]);
return data[0];
}
private async runEmbedding(texts: string[]) {
const { pipeline } = await import('@xenova/transformers');
const pipe = await (this.pipelinePromise ??= pipeline(
'feature-extraction',
this.model,
));
return this.caller.call(async () => {
const output = await pipe(texts, { pooling: 'mean', normalize: true });
return output.tolist();
});
}
}

View File

@@ -9,7 +9,7 @@ class LineOutputParser extends BaseOutputParser<string> {
constructor(args?: LineOutputParserArgs) { constructor(args?: LineOutputParserArgs) {
super(); super();
this.key = args.key ?? this.key; this.key = args?.key ?? this.key;
} }
static lc_name() { static lc_name() {

View File

@@ -9,7 +9,7 @@ class LineListOutputParser extends BaseOutputParser<string[]> {
constructor(args?: LineListOutputParserArgs) { constructor(args?: LineListOutputParserArgs) {
super(); super();
this.key = args.key ?? this.key; this.key = args?.key ?? this.key;
} }
static lc_name() { static lc_name() {

View File

@@ -1,6 +1,38 @@
import { ChatAnthropic } from '@langchain/anthropic'; import { ChatOpenAI } from '@langchain/openai';
import { getAnthropicApiKey } from '../../config'; import { ChatModel } from '.';
import logger from '../../utils/logger'; import { getAnthropicApiKey } from '../config';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',
},
{
displayName: 'Claude 3.5 Haiku',
key: 'claude-3-5-haiku-20241022',
},
{
displayName: 'Claude 3.5 Sonnet v2',
key: 'claude-3-5-sonnet-20241022',
},
{
displayName: 'Claude 3.5 Sonnet',
key: 'claude-3-5-sonnet-20240620',
},
{
displayName: 'Claude 3 Opus',
key: 'claude-3-opus-20240229',
},
{
displayName: 'Claude 3 Sonnet',
key: 'claude-3-sonnet-20240229',
},
{
displayName: 'Claude 3 Haiku',
key: 'claude-3-haiku-20240307',
},
];
export const loadAnthropicChatModels = async () => { export const loadAnthropicChatModels = async () => {
const anthropicApiKey = getAnthropicApiKey(); const anthropicApiKey = getAnthropicApiKey();
@@ -8,52 +40,25 @@ export const loadAnthropicChatModels = async () => {
if (!anthropicApiKey) return {}; if (!anthropicApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'claude-3-5-sonnet-20241022': {
displayName: 'Claude 3.5 Sonnet', anthropicChatModels.forEach((model) => {
model: new ChatAnthropic({ chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7, temperature: 0.7,
anthropicApiKey: anthropicApiKey, configuration: {
model: 'claude-3-5-sonnet-20241022', baseURL: 'https://api.anthropic.com/v1/',
}), },
}, }) as unknown as BaseChatModel,
'claude-3-5-haiku-20241022': { };
displayName: 'Claude 3.5 Haiku', });
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-5-haiku-20241022',
}),
},
'claude-3-opus-20240229': {
displayName: 'Claude 3 Opus',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-opus-20240229',
}),
},
'claude-3-sonnet-20240229': {
displayName: 'Claude 3 Sonnet',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-sonnet-20240229',
}),
},
'claude-3-haiku-20240307': {
displayName: 'Claude 3 Haiku',
model: new ChatAnthropic({
temperature: 0.7,
anthropicApiKey: anthropicApiKey,
model: 'claude-3-haiku-20240307',
}),
},
};
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Anthropic models: ${err}`); console.error(`Error loading Anthropic models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,9 +1,42 @@
import { import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
ChatGoogleGenerativeAI, import { getGeminiApiKey } from '../config';
GoogleGenerativeAIEmbeddings, import { ChatModel, EmbeddingModel } from '.';
} from '@langchain/google-genai'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getGeminiApiKey } from '../../config'; import { Embeddings } from '@langchain/core/embeddings';
import logger from '../../utils/logger';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
},
{
displayName: 'Gemini 2.0 Flash-Lite',
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Pro Experimental',
key: 'gemini-2.0-pro-exp-02-05',
},
{
displayName: 'Gemini 1.5 Flash',
key: 'gemini-1.5-flash',
},
{
displayName: 'Gemini 1.5 Flash-8B',
key: 'gemini-1.5-flash-8b',
},
{
displayName: 'Gemini 1.5 Pro',
key: 'gemini-1.5-pro',
},
];
const geminiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Gemini Embedding',
key: 'gemini-embedding-exp',
},
];
export const loadGeminiChatModels = async () => { export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey(); const geminiApiKey = getGeminiApiKey();
@@ -11,75 +44,53 @@ export const loadGeminiChatModels = async () => {
if (!geminiApiKey) return {}; if (!geminiApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash', geminiChatModels.forEach((model) => {
model: new ChatGoogleGenerativeAI({ chatModels[model.key] = {
modelName: 'gemini-1.5-flash', displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: geminiApiKey,
modelName: model.key,
temperature: 0.7, temperature: 0.7,
apiKey: geminiApiKey, configuration: {
}), baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
}, },
'gemini-1.5-flash-8b': { }) as unknown as BaseChatModel,
displayName: 'Gemini 1.5 Flash 8B', };
model: new ChatGoogleGenerativeAI({ });
modelName: 'gemini-1.5-flash-8b',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-exp': {
displayName: 'Gemini 2.0 Flash Exp',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-exp',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-2.0-flash-thinking-exp-01-21': {
displayName: 'Gemini 2.0 Flash Thinking Exp 01-21',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
};
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Gemini models: ${err}`); console.error(`Error loading Gemini models: ${err}`);
return {}; return {};
} }
}; };
export const loadGeminiEmbeddingsModels = async () => { export const loadGeminiEmbeddingModels = async () => {
const geminiApiKey = getGeminiApiKey(); const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {}; if (!geminiApiKey) return {};
try { try {
const embeddingModels = { const embeddingModels: Record<string, EmbeddingModel> = {};
'text-embedding-004': {
displayName: 'Text Embedding', geminiEmbeddingModels.forEach((model) => {
model: new GoogleGenerativeAIEmbeddings({ embeddingModels[model.key] = {
apiKey: geminiApiKey, displayName: model.displayName,
modelName: 'text-embedding-004', model: new OpenAIEmbeddings({
}), openAIApiKey: geminiApiKey,
}, modelName: model.key,
}; configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
}) as unknown as Embeddings,
};
});
return embeddingModels; return embeddingModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Gemini embeddings model: ${err}`); console.error(`Error loading OpenAI embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,6 +1,82 @@
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { getGroqApiKey } from '../../config'; import { getGroqApiKey } from '../config';
import logger from '../../utils/logger'; import { ChatModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const groqChatModels: Record<string, string>[] = [
{
displayName: 'Gemma2 9B IT',
key: 'gemma2-9b-it',
},
{
displayName: 'Llama 3.3 70B Versatile',
key: 'llama-3.3-70b-versatile',
},
{
displayName: 'Llama 3.1 8B Instant',
key: 'llama-3.1-8b-instant',
},
{
displayName: 'Llama3 70B 8192',
key: 'llama3-70b-8192',
},
{
displayName: 'Llama3 8B 8192',
key: 'llama3-8b-8192',
},
{
displayName: 'Mixtral 8x7B 32768',
key: 'mixtral-8x7b-32768',
},
{
displayName: 'Qwen QWQ 32B (Preview)',
key: 'qwen-qwq-32b',
},
{
displayName: 'Mistral Saba 24B (Preview)',
key: 'mistral-saba-24b',
},
{
displayName: 'Qwen 2.5 Coder 32B (Preview)',
key: 'qwen-2.5-coder-32b',
},
{
displayName: 'Qwen 2.5 32B (Preview)',
key: 'qwen-2.5-32b',
},
{
displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)',
key: 'deepseek-r1-distill-qwen-32b',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B SpecDec (Preview)',
key: 'deepseek-r1-distill-llama-70b-specdec',
},
{
displayName: 'DeepSeek R1 Distill Llama 70B (Preview)',
key: 'deepseek-r1-distill-llama-70b',
},
{
displayName: 'Llama 3.3 70B SpecDec (Preview)',
key: 'llama-3.3-70b-specdec',
},
{
displayName: 'Llama 3.2 1B Preview (Preview)',
key: 'llama-3.2-1b-preview',
},
{
displayName: 'Llama 3.2 3B Preview (Preview)',
key: 'llama-3.2-3b-preview',
},
{
displayName: 'Llama 3.2 11B Vision Preview (Preview)',
key: 'llama-3.2-11b-vision-preview',
},
{
displayName: 'Llama 3.2 90B Vision Preview (Preview)',
key: 'llama-3.2-90b-vision-preview',
},
];
export const loadGroqChatModels = async () => { export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey(); const groqApiKey = getGroqApiKey();
@@ -8,129 +84,25 @@ export const loadGroqChatModels = async () => {
if (!groqApiKey) return {}; if (!groqApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'llama-3.3-70b-versatile': {
displayName: 'Llama 3.3 70B', groqChatModels.forEach((model) => {
model: new ChatOpenAI( chatModels[model.key] = {
{ displayName: model.displayName,
openAIApiKey: groqApiKey, model: new ChatOpenAI({
modelName: 'llama-3.3-70b-versatile', openAIApiKey: groqApiKey,
temperature: 0.7, modelName: model.key,
}, temperature: 0.7,
{ configuration: {
baseURL: 'https://api.groq.com/openai/v1', baseURL: 'https://api.groq.com/openai/v1',
}, },
), }) as unknown as BaseChatModel,
}, };
'llama-3.2-3b-preview': { });
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-vision-preview': {
displayName: 'Llama 3.2 11B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-vision-preview': {
displayName: 'Llama 3.2 90B Vision',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-vision-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-8b-instant': {
displayName: 'Llama 3.1 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.1-8b-instant',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-8b-8192': {
displayName: 'LLaMA3 8B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-8b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama3-70b-8192': {
displayName: 'LLaMA3 70B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama3-70b-8192',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'mixtral-8x7b-32768': {
displayName: 'Mixtral 8x7B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'mixtral-8x7b-32768',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'gemma2-9b-it': {
displayName: 'Gemma2 9B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'gemma2-9b-it',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
};
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Groq models: ${err}`); console.error(`Error loading Groq models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,33 +1,49 @@
import { loadGroqChatModels } from './groq'; import { Embeddings } from '@langchain/core/embeddings';
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai'; import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import { import {
getCustomOpenaiApiKey, getCustomOpenaiApiKey,
getCustomOpenaiApiUrl, getCustomOpenaiApiUrl,
getCustomOpenaiModelName, getCustomOpenaiModelName,
} from '../../config'; } from '../config';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOpenAI } from '@langchain/openai';
import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama';
import { loadGroqChatModels } from './groq';
import { loadAnthropicChatModels } from './anthropic';
import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini';
const chatModelProviders = { export interface ChatModel {
displayName: string;
model: BaseChatModel;
}
export interface EmbeddingModel {
displayName: string;
model: Embeddings;
}
export const chatModelProviders: Record<
string,
() => Promise<Record<string, ChatModel>>
> = {
openai: loadOpenAIChatModels, openai: loadOpenAIChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels, ollama: loadOllamaChatModels,
groq: loadGroqChatModels,
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
}; };
const embeddingModelProviders = { export const embeddingModelProviders: Record<
openai: loadOpenAIEmbeddingsModels, string,
local: loadTransformersEmbeddingsModels, () => Promise<Record<string, EmbeddingModel>>
ollama: loadOllamaEmbeddingsModels, > = {
gemini: loadGeminiEmbeddingsModels, openai: loadOpenAIEmbeddingModels,
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
}; };
export const getAvailableChatModelProviders = async () => { export const getAvailableChatModelProviders = async () => {
const models = {}; const models: Record<string, Record<string, ChatModel>> = {};
for (const provider in chatModelProviders) { for (const provider in chatModelProviders) {
const providerModels = await chatModelProviders[provider](); const providerModels = await chatModelProviders[provider]();
@@ -52,7 +68,7 @@ export const getAvailableChatModelProviders = async () => {
configuration: { configuration: {
baseURL: customOpenAiApiUrl, baseURL: customOpenAiApiUrl,
}, },
}), }) as unknown as BaseChatModel,
}, },
} }
: {}), : {}),
@@ -62,7 +78,7 @@ export const getAvailableChatModelProviders = async () => {
}; };
export const getAvailableEmbeddingModelProviders = async () => { export const getAvailableEmbeddingModelProviders = async () => {
const models = {}; const models: Record<string, Record<string, EmbeddingModel>> = {};
for (const provider in embeddingModelProviders) { for (const provider in embeddingModelProviders) {
const providerModels = await embeddingModelProviders[provider](); const providerModels = await embeddingModelProviders[provider]();

View File

@@ -1,74 +1,73 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getKeepAlive, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import axios from 'axios'; import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
export const loadOllamaChatModels = async () => { export const loadOllamaChatModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaApiEndpoint = getOllamaApiEndpoint();
const keepAlive = getKeepAlive();
if (!ollamaEndpoint) return {}; if (!ollamaApiEndpoint) return {};
try { try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, { const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}); });
const { models: ollamaModels } = response.data; const { models } = res.data;
const chatModels = ollamaModels.reduce((acc, model) => { const chatModels: Record<string, ChatModel> = {};
acc[model.model] = {
models.forEach((model: any) => {
chatModels[model.model] = {
displayName: model.name, displayName: model.name,
model: new ChatOllama({ model: new ChatOllama({
baseUrl: ollamaEndpoint, baseUrl: ollamaApiEndpoint,
model: model.model, model: model.model,
temperature: 0.7, temperature: 0.7,
keepAlive: keepAlive, keepAlive: getKeepAlive(),
}), }),
}; };
});
return acc;
}, {});
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Ollama models: ${err}`); console.error(`Error loading Ollama models: ${err}`);
return {}; return {};
} }
}; };
export const loadOllamaEmbeddingsModels = async () => { export const loadOllamaEmbeddingModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaApiEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {}; if (!ollamaApiEndpoint) return {};
try { try {
const response = await axios.get(`${ollamaEndpoint}/api/tags`, { const res = await axios.get(`${ollamaApiEndpoint}/api/tags`, {
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}, },
}); });
const { models: ollamaModels } = response.data; const { models } = res.data;
const embeddingsModels = ollamaModels.reduce((acc, model) => { const embeddingModels: Record<string, EmbeddingModel> = {};
acc[model.model] = {
models.forEach((model: any) => {
embeddingModels[model.model] = {
displayName: model.name, displayName: model.name,
model: new OllamaEmbeddings({ model: new OllamaEmbeddings({
baseUrl: ollamaEndpoint, baseUrl: ollamaApiEndpoint,
model: model.model, model: model.model,
}), }),
}; };
});
return acc; return embeddingModels;
}, {});
return embeddingsModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Ollama embeddings model: ${err}`); console.error(`Error loading Ollama embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,89 +1,90 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getOpenaiApiKey } from '../../config'; import { getOpenaiApiKey } from '../config';
import logger from '../../utils/logger'; import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const openaiChatModels: Record<string, string>[] = [
{
displayName: 'GPT-3.5 Turbo',
key: 'gpt-3.5-turbo',
},
{
displayName: 'GPT-4',
key: 'gpt-4',
},
{
displayName: 'GPT-4 turbo',
key: 'gpt-4-turbo',
},
{
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
{
displayName: 'Text Embedding 3 Small',
key: 'text-embedding-3-small',
},
{
displayName: 'Text Embedding 3 Large',
key: 'text-embedding-3-large',
},
];
export const loadOpenAIChatModels = async () => { export const loadOpenAIChatModels = async () => {
const openAIApiKey = getOpenaiApiKey(); const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {}; if (!openaiApiKey) return {};
try { try {
const chatModels = { const chatModels: Record<string, ChatModel> = {};
'gpt-3.5-turbo': {
displayName: 'GPT-3.5 Turbo', openaiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({ model: new ChatOpenAI({
openAIApiKey, openAIApiKey: openaiApiKey,
modelName: 'gpt-3.5-turbo', modelName: model.key,
temperature: 0.7, temperature: 0.7,
}), }) as unknown as BaseChatModel,
}, };
'gpt-4': { });
displayName: 'GPT-4',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4',
temperature: 0.7,
}),
},
'gpt-4-turbo': {
displayName: 'GPT-4 turbo',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4-turbo',
temperature: 0.7,
}),
},
'gpt-4o': {
displayName: 'GPT-4 omni',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o',
temperature: 0.7,
}),
},
'gpt-4o-mini': {
displayName: 'GPT-4 omni mini',
model: new ChatOpenAI({
openAIApiKey,
modelName: 'gpt-4o-mini',
temperature: 0.7,
}),
},
};
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading OpenAI models: ${err}`); console.error(`Error loading OpenAI models: ${err}`);
return {}; return {};
} }
}; };
export const loadOpenAIEmbeddingsModels = async () => { export const loadOpenAIEmbeddingModels = async () => {
const openAIApiKey = getOpenaiApiKey(); const openaiApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {}; if (!openaiApiKey) return {};
try { try {
const embeddingModels = { const embeddingModels: Record<string, EmbeddingModel> = {};
'text-embedding-3-small': {
displayName: 'Text Embedding 3 Small', openaiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({ model: new OpenAIEmbeddings({
openAIApiKey, openAIApiKey: openaiApiKey,
modelName: 'text-embedding-3-small', modelName: model.key,
}), }) as unknown as Embeddings,
}, };
'text-embedding-3-large': { });
displayName: 'Text Embedding 3 Large',
model: new OpenAIEmbeddings({
openAIApiKey,
modelName: 'text-embedding-3-large',
}),
},
};
return embeddingModels; return embeddingModels;
} catch (err) { } catch (err) {
logger.error(`Error loading OpenAI embeddings model: ${err}`); console.error(`Error loading OpenAI embeddings models: ${err}`);
return {}; return {};
} }
}; };

View File

@@ -1,32 +0,0 @@
import logger from '../../utils/logger';
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const loadTransformersEmbeddingsModels = async () => {
try {
const embeddingModels = {
'xenova-bge-small-en-v1.5': {
displayName: 'BGE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bge-small-en-v1.5',
}),
},
'xenova-gte-small': {
displayName: 'GTE Small',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/gte-small',
}),
},
'xenova-bert-base-multilingual-uncased': {
displayName: 'Bert Multilingual',
model: new HuggingFaceTransformersEmbeddings({
modelName: 'Xenova/bert-base-multilingual-uncased',
}),
},
};
return embeddingModels;
} catch (err) {
logger.error(`Error loading Transformers embeddings model: ${err}`);
return {};
}
};

View File

@@ -1,5 +1,5 @@
import axios from 'axios'; import axios from 'axios';
import { getSearxngApiEndpoint } from '../config'; import { getSearxngApiEndpoint } from './config';
interface SearxngSearchOptions { interface SearxngSearchOptions {
categories?: string[]; categories?: string[];
@@ -30,11 +30,12 @@ export const searchSearxng = async (
if (opts) { if (opts) {
Object.keys(opts).forEach((key) => { Object.keys(opts).forEach((key) => {
if (Array.isArray(opts[key])) { const value = opts[key as keyof SearxngSearchOptions];
url.searchParams.append(key, opts[key].join(',')); if (Array.isArray(value)) {
url.searchParams.append(key, value.join(','));
return; return;
} }
url.searchParams.append(key, opts[key]); url.searchParams.append(key, value as string);
}); });
} }

View File

@@ -1,66 +0,0 @@
import express from 'express';
import logger from '../utils/logger';
import db from '../db/index';
import { eq } from 'drizzle-orm';
import { chats, messages } from '../db/schema';
const router = express.Router();
router.get('/', async (_, res) => {
try {
let chats = await db.query.chats.findMany();
chats = chats.reverse();
return res.status(200).json({ chats: chats });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chats: ${err.message}`);
}
});
router.get('/:id', async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, req.params.id),
});
return res.status(200).json({ chat: chatExists, messages: chatMessages });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in getting chat: ${err.message}`);
}
});
router.delete(`/:id`, async (req, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: 'Chat not found' });
}
await db.delete(chats).where(eq(chats.id, req.params.id)).execute();
await db
.delete(messages)
.where(eq(messages.chatId, req.params.id))
.execute();
return res.status(200).json({ message: 'Chat deleted successfully' });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in deleting chat: ${err.message}`);
}
});
export default router;

View File

@@ -1,104 +0,0 @@
import express from 'express';
import {
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '../lib/providers';
import {
getGroqApiKey,
getOllamaApiEndpoint,
getAnthropicApiKey,
getGeminiApiKey,
getOpenaiApiKey,
updateConfig,
getCustomOpenaiApiUrl,
getCustomOpenaiApiKey,
getCustomOpenaiModelName,
} from '../config';
import logger from '../utils/logger';
const router = express.Router();
router.get('/', async (_, res) => {
try {
const config = {};
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
config['chatModelProviders'] = {};
config['embeddingModelProviders'] = {};
for (const provider in chatModelProviders) {
config['chatModelProviders'][provider] = Object.keys(
chatModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: chatModelProviders[provider][model].displayName,
};
});
}
for (const provider in embeddingModelProviders) {
config['embeddingModelProviders'][provider] = Object.keys(
embeddingModelProviders[provider],
).map((model) => {
return {
name: model,
displayName: embeddingModelProviders[provider][model].displayName,
};
});
}
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
res.status(200).json(config);
} catch (err: any) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error getting config: ${err.message}`);
}
});
router.post('/', async (req, res) => {
const config = req.body;
const updatedConfig = {
MODELS: {
OPENAI: {
API_KEY: config.openaiApiKey,
},
GROQ: {
API_KEY: config.groqApiKey,
},
ANTHROPIC: {
API_KEY: config.anthropicApiKey,
},
GEMINI: {
API_KEY: config.geminiApiKey,
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,
MODEL_NAME: config.customOpenaiModelName,
},
},
};
updateConfig(updatedConfig);
res.status(200).json({ message: 'Config updated' });
});
export default router;

View File

@@ -1,48 +0,0 @@
import express from 'express';
import { searchSearxng } from '../lib/searxng';
import logger from '../utils/logger';
const router = express.Router();
router.get('/', async (req, res) => {
try {
const data = (
await Promise.all([
searchSearxng('site:businessinsider.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:www.exchangewire.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:yahoo.com AI', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:businessinsider.com tech', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:www.exchangewire.com tech', {
engines: ['bing news'],
pageno: 1,
}),
searchSearxng('site:yahoo.com tech', {
engines: ['bing news'],
pageno: 1,
}),
])
)
.map((result) => result.results)
.flat()
.sort(() => Math.random() - 0.5);
return res.json({ blogs: data });
} catch (err: any) {
logger.error(`Error in discover route: ${err.message}`);
return res.status(500).json({ message: 'An error has occurred' });
}
});
export default router;

View File

@@ -1,82 +0,0 @@
import express from 'express';
import handleImageSearch from '../chains/imageSearchAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
}
interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
}
router.post('/', async (req, res) => {
try {
let body: ImageSearchBody = req.body;
const chatHistory = body.chatHistory.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const chatModelProviders = await getAvailableChatModelProviders();
const chatModelProvider =
body.chatModel?.provider || Object.keys(chatModelProviders)[0];
const chatModel =
body.chatModel?.model ||
Object.keys(chatModelProviders[chatModelProvider])[0];
let llm: BaseChatModel | undefined;
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: getCustomOpenaiModelName(),
openAIApiKey: getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
chatModelProviders[chatModelProvider] &&
chatModelProviders[chatModelProvider][chatModel]
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
}
if (!llm) {
return res.status(400).json({ message: 'Invalid model selected' });
}
const images = await handleImageSearch(
{ query: body.query, chat_history: chatHistory },
llm,
);
res.status(200).json({ images });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in image search: ${err.message}`);
}
});
export default router;

View File

@@ -1,24 +0,0 @@
import express from 'express';
import imagesRouter from './images';
import videosRouter from './videos';
import configRouter from './config';
import modelsRouter from './models';
import suggestionsRouter from './suggestions';
import chatsRouter from './chats';
import searchRouter from './search';
import discoverRouter from './discover';
import uploadsRouter from './uploads';
const router = express.Router();
router.use('/images', imagesRouter);
router.use('/videos', videosRouter);
router.use('/config', configRouter);
router.use('/models', modelsRouter);
router.use('/suggestions', suggestionsRouter);
router.use('/chats', chatsRouter);
router.use('/search', searchRouter);
router.use('/discover', discoverRouter);
router.use('/uploads', uploadsRouter);
export default router;

Some files were not shown because too many files have changed in this diff Show More