Compare commits

...

53 Commits

Author SHA1 Message Date
ItzCrazyKns
d6c364fdcb feat(models): remove old providers 2025-11-22 22:23:10 +05:30
ItzCrazyKns
8d04f636d0 Delete index.ts 2025-11-22 22:22:43 +05:30
ItzCrazyKns
9ac2da3607 feat(app): remove old search agent 2025-11-22 22:22:34 +05:30
ItzCrazyKns
55cf88822d feat(package): add modules 2025-11-21 23:58:04 +05:30
ItzCrazyKns
c4acc83fd5 feat(agents): add search agent 2025-11-21 23:57:50 +05:30
ItzCrazyKns
08feb18197 feat(search-agent): add researcher, research actions 2025-11-21 23:57:29 +05:30
ItzCrazyKns
0df0114e76 feat(prompts): add researcher prompt 2025-11-21 23:54:30 +05:30
ItzCrazyKns
4016b21bdf Update formatHistory.ts 2025-11-21 23:54:16 +05:30
ItzCrazyKns
f7a43b3cb9 feat(session): use blocks, use rfc6902 for stream with patching 2025-11-21 23:52:55 +05:30
ItzCrazyKns
70bcd8c6f1 feat(types): add artifact to block, add more blocks 2025-11-21 23:51:09 +05:30
ItzCrazyKns
2568088341 feat(db): add new migration files 2025-11-21 23:49:52 +05:30
ItzCrazyKns
a494d4c329 feat(app): fix migration errors 2025-11-21 23:49:27 +05:30
ItzCrazyKns
9b85c63a80 feat(db): migrate schema 2025-11-21 23:49:14 +05:30
ItzCrazyKns
1614cfa5e5 feat(app): add widgets 2025-11-20 14:55:50 +05:30
ItzCrazyKns
036b44611f feat(search): add classifier 2025-11-20 14:55:24 +05:30
ItzCrazyKns
8b515201f3 feat(app): add search types 2025-11-20 14:53:03 +05:30
ItzCrazyKns
cbcb03c7ac feat(llm): update return type to partial 2025-11-20 14:52:41 +05:30
ItzCrazyKns
afc68ca91f feat(ollamaLLM): disable thinking in obj mode 2025-11-20 14:52:24 +05:30
ItzCrazyKns
3cc8882b28 feat(prompts): add classifier prompt 2025-11-20 14:51:49 +05:30
ItzCrazyKns
c3830795cb feat(app): add new session manager 2025-11-20 14:51:17 +05:30
ItzCrazyKns
f44ad973aa feat(types): add llm types 2025-11-18 14:39:43 +05:30
ItzCrazyKns
4bcbdad6cb feat(providers): implement custom classes 2025-11-18 14:39:04 +05:30
ItzCrazyKns
5272c7fd3e feat(models): add new base classes 2025-11-18 14:38:12 +05:30
ItzCrazyKns
657a577ec8 feat(app): enhance UI 2025-11-18 14:37:41 +05:30
ItzCrazyKns
f6dac43d7a feat(types): add message & chunk type 2025-11-18 01:17:19 +05:30
ItzCrazyKns
a00f2231d4 feat(chat-window): remove loading state 2025-11-14 23:17:41 +05:30
ItzCrazyKns
1da9b7655c Merge branch 'canary' into feat/improve-search-architecture 2025-11-14 14:38:58 +05:30
ItzCrazyKns
2edef888a3 Merge branch 'master' into canary 2025-11-14 13:29:22 +05:30
ItzCrazyKns
2dc8078848 Update Exa sponsor image and README styling 2025-11-14 13:23:50 +05:30
ItzCrazyKns
8df81c20cf Update README.md 2025-11-14 13:19:49 +05:30
ItzCrazyKns
34bd02236d Update README.md 2025-11-14 13:17:52 +05:30
ItzCrazyKns
2430376a0c feat(readme): update sponsers 2025-11-14 13:15:59 +05:30
ItzCrazyKns
bd5628b390 feat(package): bump langchain package 2025-11-14 11:45:48 +05:30
ItzCrazyKns
3d5d04eda0 Merge branch 'canary' into feat/improve-search-architecture 2025-11-13 11:54:24 +05:30
ItzCrazyKns
07a17925b1 feat(media-search): supply full history 2025-11-13 11:53:53 +05:30
ItzCrazyKns
3bcf646af1 feat(search-route): handle history processing after llm validation 2025-11-13 11:52:12 +05:30
ItzCrazyKns
e499c0b96e feat(app): migrate video search chain 2025-11-13 11:51:25 +05:30
ItzCrazyKns
33b736e1e8 feat(app): migrate image search chain 2025-11-13 11:51:13 +05:30
Kushagra Srivastava
5e1746f646 Merge pull request #928 from ItzCrazyKns/master
Merge master into canary
2025-11-13 11:49:42 +05:30
ItzCrazyKns
41fe009847 feat(app): migrate suggestion chain 2025-11-13 11:47:28 +05:30
ItzCrazyKns
1a8889c71c feat(app): add new agents directory 2025-11-10 16:45:48 +05:30
ItzCrazyKns
70c1f7230c feat(assets): remove old preview 2025-11-08 21:31:56 +05:30
ItzCrazyKns
c0771095a6 feat(app): lint & beautify 2025-10-30 17:21:48 +05:30
ItzCrazyKns
0856896aff feat(settings): fix text size, enhance UI 2025-10-30 17:21:40 +05:30
ItzCrazyKns
3da53aed03 Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-10-30 11:36:30 +05:30
ItzCrazyKns
244675759c feat(config): add getAutoMediaSearch, update uses 2025-10-30 11:29:14 +05:30
ItzCrazyKns
ce6a37aaff feat(settingsFields): add switch field 2025-10-30 11:28:15 +05:30
ItzCrazyKns
c3abba8462 feat(settings): separate personalization & preferences 2025-10-29 23:13:51 +05:30
ItzCrazyKns
f709aa8224 feat(config): add new switch config field 2025-10-29 23:12:09 +05:30
Kushagra Srivastava
22695f4ef6 Merge pull request #916 from skoved/gemini-embedding-fix
fix: list all available gemini embedding models
2025-10-28 21:56:44 +05:30
skoved
75ef2e0282 fix: list all available gemini embedding models
the new settings window does not list all available gemini embedding models. this happens because some gemini embedding models have `embedContent` instead of `embedText`
2025-10-28 11:31:41 -04:00
ItzCrazyKns
2e736613c5 Merge branch 'master' into canary 2025-10-27 11:43:18 +05:30
ItzCrazyKns
046daf442a feat(docker): update searxng build script 2025-10-23 19:06:27 +05:30
88 changed files with 2677 additions and 2339 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 MiB

BIN
.assets/sponsers/exa.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

View File

@@ -49,10 +49,29 @@ Perplexica's development is powered by the generous support of our sponsors. The
<img alt="Warp Terminal" src=".assets/sponsers/warp.png" width="100%">
</a>
**[Warp](https://www.warp.dev/perplexica)** - The AI-powered terminal revolutionizing development workflows
### **✨ [Try Warp - The AI-Powered Terminal →](https://www.warp.dev/perplexica)**
Warp is revolutionizing development workflows with AI-powered features, modern UX, and blazing-fast performance. Used by developers at top companies worldwide.
</div>
---
We'd also like to thank the following partners for their generous support:
<table>
<tr>
<td>
<a href="https://dashboard.exa.ai" target="_blank">
<img src=".assets/sponsers/exa.png" alt="Exa" style="max-width: 8rem; max-height: 8rem; border-radius: .75rem;" />
</a>
</td>
<td>
<a href="https://dashboard.exa.ai">Exa</a> • The Perfect Web Search API for LLMs - web search, crawling, deep research, and answer APIs
</td>
</tr>
</table>
## Installation
There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. Using Docker is highly recommended.

View File

@@ -0,0 +1,15 @@
PRAGMA foreign_keys=OFF;--> statement-breakpoint
CREATE TABLE `__new_messages` (
`id` integer PRIMARY KEY NOT NULL,
`messageId` text NOT NULL,
`chatId` text NOT NULL,
`backendId` text NOT NULL,
`query` text NOT NULL,
`createdAt` text NOT NULL,
`responseBlocks` text DEFAULT '[]',
`status` text DEFAULT 'answering'
);
--> statement-breakpoint
DROP TABLE `messages`;--> statement-breakpoint
ALTER TABLE `__new_messages` RENAME TO `messages`;--> statement-breakpoint
PRAGMA foreign_keys=ON;

View File

@@ -0,0 +1,132 @@
{
"version": "6",
"dialect": "sqlite",
"id": "1c5eb804-d6b4-48ec-9a8f-75fb729c8e52",
"prevId": "6dedf55f-0e44-478f-82cf-14a21ac686f8",
"tables": {
"chats": {
"name": "chats",
"columns": {
"id": {
"name": "id",
"type": "text",
"primaryKey": true,
"notNull": true,
"autoincrement": false
},
"title": {
"name": "title",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"createdAt": {
"name": "createdAt",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"focusMode": {
"name": "focusMode",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"files": {
"name": "files",
"type": "text",
"primaryKey": false,
"notNull": false,
"autoincrement": false,
"default": "'[]'"
}
},
"indexes": {},
"foreignKeys": {},
"compositePrimaryKeys": {},
"uniqueConstraints": {},
"checkConstraints": {}
},
"messages": {
"name": "messages",
"columns": {
"id": {
"name": "id",
"type": "integer",
"primaryKey": true,
"notNull": true,
"autoincrement": false
},
"messageId": {
"name": "messageId",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"chatId": {
"name": "chatId",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"backendId": {
"name": "backendId",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"query": {
"name": "query",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"createdAt": {
"name": "createdAt",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"responseBlocks": {
"name": "responseBlocks",
"type": "text",
"primaryKey": false,
"notNull": false,
"autoincrement": false,
"default": "'[]'"
},
"status": {
"name": "status",
"type": "text",
"primaryKey": false,
"notNull": false,
"autoincrement": false,
"default": "'answering'"
}
},
"indexes": {},
"foreignKeys": {},
"compositePrimaryKeys": {},
"uniqueConstraints": {},
"checkConstraints": {}
}
},
"views": {},
"enums": {},
"_meta": {
"schemas": {},
"tables": {},
"columns": {}
},
"internal": {
"indexes": {}
}
}

View File

@@ -15,6 +15,13 @@
"when": 1758863991284,
"tag": "0001_wise_rockslide",
"breakpoints": true
},
{
"idx": 2,
"version": "6",
"when": 1763732708332,
"tag": "0002_daffy_wrecker",
"breakpoints": true
}
]
}

View File

@@ -16,13 +16,14 @@
"@huggingface/transformers": "^3.7.5",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^1.0.0",
"@langchain/community": "^1.0.0",
"@langchain/core": "^1.0.1",
"@langchain/google-genai": "^1.0.0",
"@langchain/groq": "^1.0.0",
"@langchain/ollama": "^1.0.0",
"@langchain/openai": "^1.0.0",
"@langchain/anthropic": "^1.0.1",
"@langchain/community": "^1.0.3",
"@langchain/core": "^1.0.5",
"@langchain/google-genai": "^1.0.1",
"@langchain/groq": "^1.0.1",
"@langchain/langgraph": "^1.0.1",
"@langchain/ollama": "^1.0.1",
"@langchain/openai": "^1.1.1",
"@langchain/textsplitters": "^1.0.0",
"@tailwindcss/typography": "^0.5.12",
"axios": "^1.8.3",
@@ -33,22 +34,26 @@
"framer-motion": "^12.23.24",
"html-to-text": "^9.0.5",
"jspdf": "^3.0.1",
"langchain": "^1.0.1",
"langchain": "^1.0.4",
"lucide-react": "^0.363.0",
"mammoth": "^1.9.1",
"markdown-to-jsx": "^7.7.2",
"next": "^15.2.2",
"next-themes": "^0.3.0",
"ollama": "^0.6.3",
"openai": "^6.9.0",
"partial-json": "^0.1.7",
"pdf-parse": "^1.1.1",
"react": "^18",
"react-dom": "^18",
"react-text-to-speech": "^0.14.5",
"react-textarea-autosize": "^8.5.3",
"rfc6902": "^5.1.2",
"sonner": "^1.4.41",
"tailwind-merge": "^2.2.2",
"winston": "^3.17.0",
"yet-another-react-lightbox": "^3.17.2",
"zod": "^3.22.4"
"zod": "^4.1.12"
},
"devDependencies": {
"@types/better-sqlite3": "^7.6.12",

View File

@@ -1,4 +1,4 @@
import handleImageSearch from '@/lib/chains/imageSearchAgent';
import searchImages from '@/lib/agents/media/image';
import ModelRegistry from '@/lib/models/registry';
import { ModelWithProvider } from '@/lib/models/types';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
@@ -13,6 +13,13 @@ export const POST = async (req: Request) => {
try {
const body: ImageSearchBody = await req.json();
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
@@ -23,16 +30,9 @@ export const POST = async (req: Request) => {
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const images = await handleImageSearch(
const images = await searchImages(
{
chat_history: chatHistory,
chatHistory: chatHistory,
query: body.query,
},
llm,

View File

@@ -30,12 +30,6 @@ export const POST = async (req: Request) => {
body.optimizationMode = body.optimizationMode || 'balanced';
body.stream = body.stream || false;
const history: BaseMessage[] = body.history.map((msg) => {
return msg[0] === 'human'
? new HumanMessage({ content: msg[1] })
: new AIMessage({ content: msg[1] });
});
const registry = new ModelRegistry();
const [llm, embeddings] = await Promise.all([
@@ -46,6 +40,12 @@ export const POST = async (req: Request) => {
),
]);
const history: BaseMessage[] = body.history.map((msg) => {
return msg[0] === 'human'
? new HumanMessage({ content: msg[1] })
: new AIMessage({ content: msg[1] });
});
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
if (!searchHandler) {

View File

@@ -1,7 +1,6 @@
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
import generateSuggestions from '@/lib/agents/suggestions';
import ModelRegistry from '@/lib/models/registry';
import { ModelWithProvider } from '@/lib/models/types';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
interface SuggestionsGenerationBody {
@@ -13,6 +12,13 @@ export const POST = async (req: Request) => {
try {
const body: SuggestionsGenerationBody = await req.json();
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
@@ -23,16 +29,9 @@ export const POST = async (req: Request) => {
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
chatHistory,
},
llm,
);

View File

@@ -1,4 +1,4 @@
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
import handleVideoSearch from '@/lib/agents/media/video';
import ModelRegistry from '@/lib/models/registry';
import { ModelWithProvider } from '@/lib/models/types';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
@@ -13,6 +13,13 @@ export const POST = async (req: Request) => {
try {
const body: VideoSearchBody = await req.json();
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const chatHistory = body.chatHistory
.map((msg: any) => {
if (msg.role === 'user') {
@@ -23,16 +30,9 @@ export const POST = async (req: Request) => {
})
.filter((msg) => msg !== undefined) as BaseMessage[];
const registry = new ModelRegistry();
const llm = await registry.loadChatModel(
body.chatModel.providerId,
body.chatModel.key,
);
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
chatHistory: chatHistory,
query: body.query,
},
llm,

View File

@@ -8,7 +8,6 @@ import { Settings } from 'lucide-react';
import Link from 'next/link';
import NextError from 'next/error';
import { useChat } from '@/lib/hooks/useChat';
import Loader from './ui/Loader';
import SettingsButtonMobile from './Settings/SettingsButtonMobile';
export interface BaseMessage {
@@ -52,7 +51,7 @@ export interface File {
}
const ChatWindow = () => {
const { hasError, isReady, notFound, messages } = useChat();
const { hasError, notFound, messages } = useChat();
if (hasError) {
return (
<div className="relative">
@@ -68,8 +67,7 @@ const ChatWindow = () => {
);
}
return isReady ? (
notFound ? (
return notFound ? (
<NextError statusCode={404} />
) : (
<div>
@@ -82,11 +80,6 @@ const ChatWindow = () => {
<EmptyChat />
)}
</div>
)
) : (
<div className="flex flex-row items-center justify-center min-h-screen">
<Loader />
</div>
);
};

View File

@@ -20,9 +20,9 @@ const Copy = ({
setCopied(true);
setTimeout(() => setCopied(false), 1000);
}}
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
className="p-2 text-black/70 dark:text-white/70 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
>
{copied ? <Check size={18} /> : <ClipboardList size={18} />}
{copied ? <Check size={16} /> : <ClipboardList size={16} />}
</button>
);
};

View File

@@ -1,4 +1,4 @@
import { ArrowLeftRight } from 'lucide-react';
import { ArrowLeftRight, Repeat } from 'lucide-react';
const Rewrite = ({
rewrite,
@@ -10,12 +10,11 @@ const Rewrite = ({
return (
<button
onClick={() => rewrite(messageId)}
className="py-2 px-3 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white flex flex-row items-center space-x-1"
className="p-2 text-black/70 dark:text-white/70 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white flex flex-row items-center space-x-1"
>
<ArrowLeftRight size={18} />
<p className="text-xs font-medium">Rewrite</p>
<Repeat size={16} />
</button>
);
};
1;
export default Rewrite;

View File

@@ -10,6 +10,7 @@ import {
StopCircle,
Layers3,
Plus,
CornerDownRight,
} from 'lucide-react';
import Markdown, { MarkdownToJSX } from 'markdown-to-jsx';
import Copy from './MessageActions/Copy';
@@ -122,14 +123,14 @@ const MessageBox = ({
</Markdown>
{loading && isLast ? null : (
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
<div className="flex flex-row items-center space-x-1">
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4">
<div className="flex flex-row items-center -ml-2">
<Rewrite
rewrite={rewrite}
messageId={section.assistantMessage.messageId}
/>
</div>
<div className="flex flex-row items-center space-x-1">
<div className="flex flex-row items-center -mr-2">
<Copy
initialMessage={section.assistantMessage.content}
section={section}
@@ -142,12 +143,12 @@ const MessageBox = ({
start();
}
}}
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
className="p-2 text-black/70 dark:text-white/70 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
>
{speechStatus === 'started' ? (
<StopCircle size={18} />
<StopCircle size={16} />
) : (
<Volume2 size={18} />
<Volume2 size={16} />
)}
</button>
</div>
@@ -159,7 +160,7 @@ const MessageBox = ({
section.suggestions.length > 0 &&
section.assistantMessage &&
!loading && (
<div className="mt-8 pt-6 border-t border-light-200/50 dark:border-dark-200/50">
<div className="mt-6">
<div className="flex flex-row items-center space-x-2 mb-4">
<Layers3
className="text-black dark:text-white"
@@ -173,20 +174,24 @@ const MessageBox = ({
{section.suggestions.map(
(suggestion: string, i: number) => (
<div key={i}>
{i > 0 && (
<div className="h-px bg-light-200/40 dark:bg-dark-200/40 mx-3" />
)}
<div className="h-px bg-light-200/40 dark:bg-dark-200/40" />
<button
onClick={() => sendMessage(suggestion)}
className="group w-full px-3 py-4 text-left transition-colors duration-200"
className="group w-full py-4 text-left transition-colors duration-200"
>
<div className="flex items-center justify-between gap-3">
<p className="text-sm text-black/70 dark:text-white/70 group-hover:text-[#24A0ED] transition-colors duration-200 leading-relaxed">
<div className="flex flex-row space-x-3 items-center ">
<CornerDownRight
size={17}
className="group-hover:text-sky-400 transition-colors duration-200"
/>
<p className="text-sm text-black/70 dark:text-white/70 group-hover:text-sky-400 transition-colors duration-200 leading-relaxed">
{suggestion}
</p>
</div>
<Plus
size={16}
className="text-black/40 dark:text-white/40 group-hover:text-[#24A0ED] transition-colors duration-200 flex-shrink-0"
className="text-black/40 dark:text-white/40 group-hover:text-sky-400 transition-colors duration-200 flex-shrink-0"
/>
</div>
</button>
@@ -205,11 +210,11 @@ const MessageBox = ({
<div className="lg:sticky lg:top-20 flex flex-col items-center space-y-3 w-full lg:w-3/12 z-30 h-full pb-4">
<SearchImages
query={section.userMessage.content}
chatHistory={chatTurns.slice(0, sectionIndex * 2)}
chatHistory={chatTurns}
messageId={section.assistantMessage.messageId}
/>
<SearchVideos
chatHistory={chatTurns.slice(0, sectionIndex * 2)}
chatHistory={chatTurns}
query={section.userMessage.content}
messageId={section.assistantMessage.messageId}
/>

View File

@@ -97,7 +97,7 @@ const AddModel = ({
>
<DialogPanel className="w-full mx-4 lg:w-[600px] max-h-[85vh] flex flex-col border bg-light-primary dark:bg-dark-primary border-light-secondary dark:border-dark-secondary rounded-lg">
<div className="px-6 pt-6 pb-4">
<h3 className="text-black/90 dark:text-white/90 font-medium">
<h3 className="text-black/90 dark:text-white/90 font-medium text-sm">
Add new {type === 'chat' ? 'chat' : 'embedding'} model
</h3>
</div>
@@ -115,7 +115,7 @@ const AddModel = ({
<input
value={modelName}
onChange={(e) => setModelName(e.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder="e.g., GPT-4"
type="text"
required
@@ -128,7 +128,7 @@ const AddModel = ({
<input
value={modelKey}
onChange={(e) => setModelKey(e.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder="e.g., gpt-4"
type="text"
required
@@ -140,7 +140,7 @@ const AddModel = ({
<button
type="submit"
disabled={loading}
className="px-4 py-2 rounded-lg text-sm bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
className="px-4 py-2 rounded-lg text-[13px] bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
>
{loading ? (
<Loader2 className="animate-spin" size={16} />

View File

@@ -96,7 +96,7 @@ const AddProvider = ({
<>
<button
onClick={() => setOpen(true)}
className="px-3 md:px-4 py-1.5 md:py-2 rounded-lg text-xs sm:text-sm border border-light-200 dark:border-dark-200 text-black dark:text-white bg-light-secondary/50 dark:bg-dark-secondary/50 hover:bg-light-secondary hover:dark:bg-dark-secondary hover:border-light-300 hover:dark:border-dark-300 flex flex-row items-center space-x-1 active:scale-95 transition duration-200"
className="px-3 md:px-4 py-1.5 md:py-2 rounded-lg text-xs sm:text-xs border border-light-200 dark:border-dark-200 text-black dark:text-white bg-light-secondary/50 dark:bg-dark-secondary/50 hover:bg-light-secondary hover:dark:bg-dark-secondary hover:border-light-300 hover:dark:border-dark-300 flex flex-row items-center space-x-1 active:scale-95 transition duration-200"
>
<Plus className="w-3.5 h-3.5 md:w-4 md:h-4" />
<span>Add Connection</span>
@@ -119,7 +119,7 @@ const AddProvider = ({
<DialogPanel className="w-full mx-4 lg:w-[600px] max-h-[85vh] flex flex-col border bg-light-primary dark:bg-dark-primary border-light-secondary dark:border-dark-secondary rounded-lg">
<form onSubmit={handleSubmit} className="flex flex-col flex-1">
<div className="px-6 pt-6 pb-4">
<h3 className="text-black/90 dark:text-white/90 font-medium">
<h3 className="text-black/90 dark:text-white/90 font-medium text-sm">
Add new connection
</h3>
</div>
@@ -178,7 +178,7 @@ const AddProvider = ({
[field.key]: event.target.value,
}))
}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={
(field as StringUIConfigField).placeholder
}
@@ -194,7 +194,7 @@ const AddProvider = ({
<button
type="submit"
disabled={loading}
className="px-4 py-2 rounded-lg text-sm bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
className="px-4 py-2 rounded-lg text-[13px] bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
>
{loading ? (
<Loader2 className="animate-spin" size={16} />

View File

@@ -84,11 +84,11 @@ const ModelProvider = ({
<Plug2 size={14} className="text-sky-500" />
</div>
<div className="flex flex-col">
<p className="text-sm lg:text-base text-black dark:text-white font-medium">
<p className="text-sm lg:text-sm text-black dark:text-white font-medium">
{modelProvider.name}
</p>
{modelCount > 0 && (
<p className="text-[10px] lg:text-xs text-black/50 dark:text-white/50">
<p className="text-[10px] lg:text-[11px] text-black/50 dark:text-white/50">
{modelCount} model{modelCount !== 1 ? 's' : ''} configured
</p>
)}
@@ -109,7 +109,7 @@ const ModelProvider = ({
<div className="flex flex-col gap-y-4 px-5 py-4">
<div className="flex flex-col gap-y-2">
<div className="flex flex-row w-full justify-between items-center">
<p className="text-[11px] lg:text-xs font-medium text-black/70 dark:text-white/70 uppercase tracking-wide">
<p className="text-[11px] lg:text-[11px] font-medium text-black/70 dark:text-white/70 uppercase tracking-wide">
Chat Models
</p>
{!modelProvider.chatModels.some((m) => m.key === 'error') && (
@@ -122,7 +122,7 @@ const ModelProvider = ({
</div>
<div className="flex flex-col gap-2">
{modelProvider.chatModels.some((m) => m.key === 'error') ? (
<div className="flex flex-row items-center gap-2 text-xs lg:text-sm text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<div className="flex flex-row items-center gap-2 text-xs lg:text-xs text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<AlertCircle size={16} className="shrink-0" />
<span className="break-words">
{
@@ -144,7 +144,7 @@ const ModelProvider = ({
{modelProvider.chatModels.map((model, index) => (
<div
key={`${modelProvider.id}-chat-${model.key}-${index}`}
className="flex flex-row items-center space-x-1.5 text-xs lg:text-sm text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5 border border-light-200 dark:border-dark-200"
className="flex flex-row items-center space-x-1.5 text-xs lg:text-xs text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5 border border-light-200 dark:border-dark-200"
>
<span>{model.name}</span>
<button
@@ -164,7 +164,7 @@ const ModelProvider = ({
<div className="flex flex-col gap-y-2">
<div className="flex flex-row w-full justify-between items-center">
<p className="text-[11px] lg:text-xs font-medium text-black/70 dark:text-white/70 uppercase tracking-wide">
<p className="text-[11px] lg:text-[11px] font-medium text-black/70 dark:text-white/70 uppercase tracking-wide">
Embedding Models
</p>
{!modelProvider.embeddingModels.some((m) => m.key === 'error') && (
@@ -177,7 +177,7 @@ const ModelProvider = ({
</div>
<div className="flex flex-col gap-2">
{modelProvider.embeddingModels.some((m) => m.key === 'error') ? (
<div className="flex flex-row items-center gap-2 text-xs lg:text-sm text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<div className="flex flex-row items-center gap-2 text-xs lg:text-xs text-red-500 dark:text-red-400 rounded-lg bg-red-50 dark:bg-red-950/20 px-3 py-2 border border-red-200 dark:border-red-900/30">
<AlertCircle size={16} className="shrink-0" />
<span className="break-words">
{
@@ -199,7 +199,7 @@ const ModelProvider = ({
{modelProvider.embeddingModels.map((model, index) => (
<div
key={`${modelProvider.id}-embedding-${model.key}-${index}`}
className="flex flex-row items-center space-x-1.5 text-xs lg:text-sm text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5 border border-light-200 dark:border-dark-200"
className="flex flex-row items-center space-x-1.5 text-xs lg:text-xs text-black/70 dark:text-white/70 rounded-lg bg-light-secondary dark:bg-dark-secondary px-3 py-1.5 border border-light-200 dark:border-dark-200"
>
<span>{model.name}</span>
<button

View File

@@ -59,7 +59,7 @@ const ModelSelect = ({
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-sm lg:text-base text-black dark:text-white">
<h4 className="text-sm lg:text-sm text-black dark:text-white">
Select {type === 'chat' ? 'Chat Model' : 'Embedding Model'}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
@@ -86,7 +86,7 @@ const ModelSelect = ({
})),
)
}
className="!text-xs lg:!text-sm"
className="!text-xs lg:!text-[13px]"
loading={loading}
disabled={loading}
/>

View File

@@ -20,7 +20,7 @@ const Models = ({
return (
<div className="flex-1 space-y-6 overflow-y-auto py-6">
<div className="flex flex-col px-6 gap-y-4">
<h3 className="text-xs lg:text-sm text-black/70 dark:text-white/70">
<h3 className="text-xs lg:text-xs text-black/70 dark:text-white/70">
Select models
</h3>
<ModelSelect
@@ -38,7 +38,7 @@ const Models = ({
</div>
<div className="border-t border-light-200 dark:border-dark-200" />
<div className="flex flex-row justify-between items-center px-6 ">
<p className="text-xs lg:text-sm text-black/70 dark:text-white/70">
<p className="text-xs lg:text-xs text-black/70 dark:text-white/70">
Manage connections
</p>
<AddProvider modelProviders={fields} setProviders={setProviders} />

View File

@@ -109,7 +109,7 @@ const UpdateProvider = ({
<DialogPanel className="w-full mx-4 lg:w-[600px] max-h-[85vh] flex flex-col border bg-light-primary dark:bg-dark-primary border-light-secondary dark:border-dark-secondary rounded-lg">
<form onSubmit={handleSubmit} className="flex flex-col flex-1">
<div className="px-6 pt-6 pb-4">
<h3 className="text-black/90 dark:text-white/90 font-medium">
<h3 className="text-black/90 dark:text-white/90 font-medium text-sm">
Update connection
</h3>
</div>
@@ -150,7 +150,7 @@ const UpdateProvider = ({
[field.key]: event.target.value,
}))
}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-4 py-3 pr-10 text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={
(field as StringUIConfigField).placeholder
}
@@ -166,7 +166,7 @@ const UpdateProvider = ({
<button
type="submit"
disabled={loading}
className="px-4 py-2 rounded-lg text-sm bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
className="px-4 py-2 rounded-lg text-[13px] bg-sky-500 text-white font-medium disabled:opacity-85 hover:opacity-85 active:scale-95 transition duration-200"
>
{loading ? (
<Loader2 className="animate-spin" size={16} />

View File

@@ -0,0 +1,29 @@
import { UIConfigField } from '@/lib/config/types';
import SettingsField from '../SettingsField';
const Personalization = ({
fields,
values,
}: {
fields: UIConfigField[];
values: Record<string, any>;
}) => {
return (
<div className="flex-1 space-y-6 overflow-y-auto px-6 py-6">
{fields.map((field) => (
<SettingsField
key={field.key}
field={field}
value={
(field.scope === 'client'
? localStorage.getItem(field.key)
: values[field.key]) ?? field.default
}
dataAdd="personalization"
/>
))}
</div>
);
};
export default Personalization;

View File

@@ -1,7 +1,7 @@
import { UIConfigField } from '@/lib/config/types';
import SettingsField from '../SettingsField';
const General = ({
const Preferences = ({
fields,
values,
}: {
@@ -19,11 +19,11 @@ const General = ({
? localStorage.getItem(field.key)
: values[field.key]) ?? field.default
}
dataAdd="general"
dataAdd="preferences"
/>
))}
</div>
);
};
export default General;
export default Preferences;

View File

@@ -4,9 +4,10 @@ import {
BrainCog,
ChevronLeft,
Search,
Settings,
Sliders,
ToggleRight,
} from 'lucide-react';
import General from './Sections/General';
import Preferences from './Sections/Preferences';
import { motion } from 'framer-motion';
import { useEffect, useState } from 'react';
import { toast } from 'sonner';
@@ -15,15 +16,24 @@ import { cn } from '@/lib/utils';
import Models from './Sections/Models/Section';
import SearchSection from './Sections/Search';
import Select from '@/components/ui/Select';
import Personalization from './Sections/Personalization';
const sections = [
{
key: 'general',
name: 'General',
description: 'Adjust common settings.',
icon: Settings,
component: General,
dataAdd: 'general',
key: 'preferences',
name: 'Preferences',
description: 'Customize your application preferences.',
icon: Sliders,
component: Preferences,
dataAdd: 'preferences',
},
{
key: 'personalization',
name: 'Personalization',
description: 'Customize the behavior and tone of the model.',
icon: ToggleRight,
component: Personalization,
dataAdd: 'personalization',
},
{
key: 'models',
@@ -166,7 +176,7 @@ const SettingsDialogue = ({
<div className="flex flex-1 flex-col overflow-hidden">
<div className="border-b border-light-200/60 px-6 pb-6 lg:pt-6 dark:border-dark-200/60 flex-shrink-0">
<div className="flex flex-col">
<h4 className="font-medium text-black dark:text-white text-sm lg:text-base">
<h4 className="font-medium text-black dark:text-white text-sm lg:text-sm">
{selectedSection.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">

View File

@@ -1,6 +1,7 @@
import {
SelectUIConfigField,
StringUIConfigField,
SwitchUIConfigField,
TextareaUIConfigField,
UIConfigField,
} from '@/lib/config/types';
@@ -9,6 +10,7 @@ import Select from '../ui/Select';
import { toast } from 'sonner';
import { useTheme } from 'next-themes';
import { Loader2 } from 'lucide-react';
import { Switch } from '@headlessui/react';
const SettingsSelect = ({
field,
@@ -62,7 +64,7 @@ const SettingsSelect = ({
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-sm lg:text-base text-black dark:text-white">
<h4 className="text-sm lg:text-sm text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
@@ -133,7 +135,7 @@ const SettingsInput = ({
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-sm lg:text-base text-black dark:text-white">
<h4 className="text-sm lg:text-sm text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
@@ -145,7 +147,7 @@ const SettingsInput = ({
value={value ?? field.default ?? ''}
onChange={(event) => setValue(event.target.value)}
onBlur={(event) => handleSave(event.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-3 py-2 lg:px-4 lg:py-3 pr-10 !text-xs lg:!text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-3 py-2 lg:px-4 lg:py-3 pr-10 !text-xs lg:!text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={field.placeholder}
type="text"
disabled={loading}
@@ -209,7 +211,7 @@ const SettingsTextarea = ({
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="space-y-3 lg:space-y-5">
<div>
<h4 className="text-sm lg:text-base text-black dark:text-white">
<h4 className="text-sm lg:text-sm text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
@@ -221,7 +223,7 @@ const SettingsTextarea = ({
value={value ?? field.default ?? ''}
onChange={(event) => setValue(event.target.value)}
onBlur={(event) => handleSave(event.target.value)}
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-3 py-2 lg:px-4 lg:py-3 pr-10 !text-xs lg:!text-sm text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
className="w-full rounded-lg border border-light-200 dark:border-dark-200 bg-light-primary dark:bg-dark-primary px-3 py-2 lg:px-4 lg:py-3 pr-10 !text-xs lg:!text-[13px] text-black/80 dark:text-white/80 placeholder:text-black/40 dark:placeholder:text-white/40 focus-visible:outline-none focus-visible:border-light-300 dark:focus-visible:border-dark-300 transition-colors disabled:cursor-not-allowed disabled:opacity-60"
placeholder={field.placeholder}
rows={4}
disabled={loading}
@@ -237,6 +239,79 @@ const SettingsTextarea = ({
);
};
const SettingsSwitch = ({
field,
value,
setValue,
dataAdd,
}: {
field: SwitchUIConfigField;
value?: any;
setValue: (value: any) => void;
dataAdd: string;
}) => {
const [loading, setLoading] = useState(false);
const handleSave = async (newValue: boolean) => {
setLoading(true);
setValue(newValue);
try {
if (field.scope === 'client') {
localStorage.setItem(field.key, String(newValue));
} else {
const res = await fetch('/api/config', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
key: `${dataAdd}.${field.key}`,
value: newValue,
}),
});
if (!res.ok) {
console.error('Failed to save config:', await res.text());
throw new Error('Failed to save configuration');
}
}
} catch (error) {
console.error('Error saving config:', error);
toast.error('Failed to save configuration.');
} finally {
setTimeout(() => setLoading(false), 150);
}
};
const isChecked = value === true || value === 'true';
return (
<section className="rounded-xl border border-light-200 bg-light-primary/80 p-4 lg:p-6 transition-colors dark:border-dark-200 dark:bg-dark-primary/80">
<div className="flex flex-row items-center space-x-3 lg:space-x-5 w-full justify-between">
<div>
<h4 className="text-sm lg:text-sm text-black dark:text-white">
{field.name}
</h4>
<p className="text-[11px] lg:text-xs text-black/50 dark:text-white/50">
{field.description}
</p>
</div>
<Switch
checked={isChecked}
onChange={handleSave}
disabled={loading}
className="group relative flex h-6 w-12 shrink-0 cursor-pointer rounded-full bg-white/10 p-1 duration-200 ease-in-out focus:outline-none transition-colors disabled:opacity-60 disabled:cursor-not-allowed data-[checked]:bg-sky-500"
>
<span
aria-hidden="true"
className="pointer-events-none inline-block size-4 translate-x-0 rounded-full bg-white shadow-lg ring-0 transition duration-200 ease-in-out group-data-[checked]:translate-x-6"
/>
</Switch>
</div>
</section>
);
};
const SettingsField = ({
field,
value,
@@ -276,6 +351,15 @@ const SettingsField = ({
dataAdd={dataAdd}
/>
);
case 'switch':
return (
<SettingsSwitch
field={field}
value={val}
setValue={setVal}
dataAdd={dataAdd}
/>
);
default:
return <div>Unsupported field type: {field.type}</div>;
}

View File

@@ -0,0 +1,65 @@
/* I don't think can be classified as agents but to keep the structure consistent i guess ill keep it here */
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '@/lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '@/lib/outputParsers/lineOutputParser';
import { imageSearchFewShots, imageSearchPrompt } from '@/lib/prompts/media/image';
type ImageSearchChainInput = {
chatHistory: BaseMessage[];
query: string;
};
type ImageSearchResult = {
img_src: string;
url: string;
title: string;
}
const outputParser = new LineOutputParser({
key: 'query',
})
const searchImages = async (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const chatPrompt = await ChatPromptTemplate.fromMessages([
new SystemMessage(imageSearchPrompt),
...imageSearchFewShots,
new HumanMessage(`<conversation>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<follow_up>\n${input.query}\n</follow_up>`)
]).formatMessages({})
const res = await llm.invoke(chatPrompt)
const query = await outputParser.invoke(res)
const searchRes = await searchSearxng(query!, {
engines: ['bing images', 'google images'],
});
const images: ImageSearchResult[] = [];
searchRes.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
return images.slice(0, 10);
};
export default searchImages;

View File

@@ -0,0 +1,65 @@
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
import { searchSearxng } from '@/lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '@/lib/outputParsers/lineOutputParser';
import { videoSearchFewShots, videoSearchPrompt } from '@/lib/prompts/media/videos';
type VideoSearchChainInput = {
chatHistory: BaseMessage[];
query: string;
};
type VideoSearchResult = {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const outputParser = new LineOutputParser({
key: 'query',
});
const searchVideos = async (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const chatPrompt = await ChatPromptTemplate.fromMessages([
new SystemMessage(videoSearchPrompt),
...videoSearchFewShots,
new HumanMessage(`<conversation>${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<follow_up>\n${input.query}\n</follow_up>`)
]).formatMessages({})
const res = await llm.invoke(chatPrompt)
const query = await outputParser.invoke(res)
const searchRes = await searchSearxng(query!, {
engines: ['youtube'],
});
const videos: VideoSearchResult[] = [];
searchRes.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
};
export default searchVideos;

View File

@@ -0,0 +1,72 @@
import z from 'zod';
import { ClassifierInput, ClassifierOutput } from '../types';
import { WidgetRegistry } from '../widgets';
import { IntentRegistry } from './intents';
import { getClassifierPrompt } from '@/lib/prompts/search/classifier';
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
class Classifier {
async classify(input: ClassifierInput): Promise<ClassifierOutput> {
const availableIntents = IntentRegistry.getAvailableIntents({
sources: input.enabledSources,
});
const availableWidgets = WidgetRegistry.getAll();
const classificationSchema = z.object({
skipSearch: z
.boolean()
.describe(
'Set to true to SKIP search. Skip ONLY when: (1) widgets alone fully answer the query (e.g., weather, stocks, calculator), (2) simple greetings or writing tasks (NOT questions). Set to false for ANY question or information request.',
),
standaloneFollowUp: z
.string()
.describe(
'A self-contained, context-independent reformulation of the user\'s question. Must include all necessary context from chat history, replace pronouns with specific nouns, and be clear enough to answer without seeing the conversation. Keep the same complexity as the original question.',
),
intents: z
.array(z.enum(availableIntents.map((i) => i.name)))
.describe(
'The intent(s) that best describe how to fulfill the user\'s query. Can include multiple intents (e.g., [\'web_search\', \'widget_response\'] for \'weather in NYC and recent news\'). Always include at least one intent when applicable.',
),
widgets: z
.array(z.union(availableWidgets.map((w) => w.schema)))
.describe(
'Widgets that can display structured data to answer (fully or partially) the query. Include all applicable widgets regardless of skipSearch value.',
),
});
const classifierPrompt = getClassifierPrompt({
intentDesc: IntentRegistry.getDescriptions({
sources: input.enabledSources,
}),
widgetDesc: WidgetRegistry.getDescriptions(),
});
const res = await input.llm.generateObject<
z.infer<typeof classificationSchema>
>({
messages: [
{
role: 'system',
content: classifierPrompt,
},
{
role: 'user',
content: `<conversation>${formatChatHistoryAsString(input.chatHistory)}</conversation>\n\n<query>${input.query}</query>`,
},
],
schema: classificationSchema,
});
res.widgets = res.widgets.map((widgetConfig) => {
return {
type: widgetConfig.type,
params: widgetConfig,
};
});
return res as ClassifierOutput;
}
}
export default Classifier;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const academicSearchIntent: Intent = {
name: 'academic_search',
description:
'Use this intent to find scholarly articles, research papers, and academic resources when the user is seeking credible and authoritative information on a specific topic.',
requiresSearch: true,
enabled: (config) => config.sources.includes('academic'),
};
export default academicSearchIntent;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const discussionSearchIntent: Intent = {
name: 'discussion_search',
description:
'Use this intent to search through discussion forums, community boards, or social media platforms when the user is looking for opinions, experiences, or community-driven information on a specific topic.',
requiresSearch: true,
enabled: (config) => config.sources.includes('discussions'),
};
export default discussionSearchIntent;

View File

@@ -0,0 +1,14 @@
import academicSearchIntent from './academicSearch';
import discussionSearchIntent from './discussionSearch';
import IntentRegistry from './registry';
import webSearchIntent from './webSearch';
import widgetResponseIntent from './widgetResponse';
import writingTaskIntent from './writingTask';
IntentRegistry.register(webSearchIntent);
IntentRegistry.register(academicSearchIntent);
IntentRegistry.register(discussionSearchIntent);
IntentRegistry.register(widgetResponseIntent);
IntentRegistry.register(writingTaskIntent);
export { IntentRegistry };

View File

@@ -0,0 +1,29 @@
import { Intent, SearchAgentConfig, SearchSources } from '../../types';
class IntentRegistry {
private static intents = new Map<string, Intent>();
static register(intent: Intent) {
this.intents.set(intent.name, intent);
}
static get(name: string): Intent | undefined {
return this.intents.get(name);
}
static getAvailableIntents(config: { sources: SearchSources[] }): Intent[] {
return Array.from(
this.intents.values().filter((intent) => intent.enabled(config)),
);
}
static getDescriptions(config: { sources: SearchSources[] }): string {
const availableintnets = this.getAvailableIntents(config);
return availableintnets
.map((intent) => `${intent.name}: ${intent.description}`)
.join('\n\n');
}
}
export default IntentRegistry;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const webSearchIntent: Intent = {
name: 'web_search',
description:
'Use this intent to find current information from the web when the user is asking a question or needs up-to-date information that cannot be provided by widgets or other intents.',
requiresSearch: true,
enabled: (config) => config.sources.includes('web'),
};
export default webSearchIntent;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const widgetResponseIntent: Intent = {
name: 'widget_response',
description:
'Use this intent to respond to user queries using available widgets when the required information can be obtained from them.',
requiresSearch: false,
enabled: (config) => true,
};
export default widgetResponseIntent;

View File

@@ -0,0 +1,11 @@
import { Intent } from '../../types';
const writingTaskIntent: Intent = {
name: 'writing_task',
description:
'Use this intent to assist users with writing tasks such as drafting emails, creating documents, or generating content based on their instructions or greetings.',
requiresSearch: false,
enabled: (config) => true,
};
export default writingTaskIntent;

View File

@@ -0,0 +1,48 @@
import { ResearcherOutput, SearchAgentInput } from './types';
import SessionManager from '@/lib/session';
import Classifier from './classifier';
import { WidgetRegistry } from './widgets';
import Researcher from './researcher';
class SearchAgent {
async searchAsync(session: SessionManager, input: SearchAgentInput) {
const classifier = new Classifier();
const classification = await classifier.classify({
chatHistory: input.chatHistory,
enabledSources: input.config.sources,
query: input.followUp,
llm: input.config.llm,
});
session.emit('data', {
type: 'classification',
classification: classification,
});
const widgetPromise = WidgetRegistry.executeAll(classification.widgets, {
llm: input.config.llm,
embedding: input.config.embedding,
session: session,
});
let searchPromise: Promise<ResearcherOutput> | null = null;
if (!classification.skipSearch) {
const researcher = new Researcher();
searchPromise = researcher.research(session, {
chatHistory: input.chatHistory,
followUp: input.followUp,
classification: classification,
config: input.config,
});
}
const [widgetOutputs, searchResults] = await Promise.all([
widgetPromise,
searchPromise,
]);
}
}
export default SearchAgent;

View File

@@ -0,0 +1,19 @@
import z from 'zod';
import { ResearchAction } from '../../types';
const doneAction: ResearchAction<any> = {
name: 'done',
description:
"Indicates that the research process is complete and no further actions are needed. Use this action when you have gathered sufficient information to answer the user's query.",
enabled: (_) => true,
schema: z.object({
type: z.literal('done'),
}),
execute: async (params, additionalConfig) => {
return {
type: 'done',
};
},
};
export default doneAction;

View File

@@ -0,0 +1,8 @@
import doneAction from './done';
import ActionRegistry from './registry';
import webSearchAction from './webSearch';
ActionRegistry.register(webSearchAction);
ActionRegistry.register(doneAction);
export { ActionRegistry };

View File

@@ -0,0 +1,73 @@
import {
ActionConfig,
ActionOutput,
AdditionalConfig,
ClassifierOutput,
ResearchAction,
} from '../../types';
class ActionRegistry {
private static actions: Map<string, ResearchAction> = new Map();
static register(action: ResearchAction<any>) {
this.actions.set(action.name, action);
}
static get(name: string): ResearchAction | undefined {
return this.actions.get(name);
}
static getAvailableActions(config: {
classification: ClassifierOutput;
}): ResearchAction[] {
return Array.from(
this.actions.values().filter((action) => action.enabled(config)),
);
}
static getAvailableActionsDescriptions(config: {
classification: ClassifierOutput;
}): string {
const availableActions = this.getAvailableActions(config);
return availableActions
.map((action) => `------------\n##${action.name}\n${action.description}`)
.join('\n\n');
}
static async execute(
name: string,
params: any,
additionalConfig: AdditionalConfig,
) {
const action = this.actions.get(name);
if (!action) {
throw new Error(`Action with name ${name} not found`);
}
return action.execute(params, additionalConfig);
}
static async executeAll(
actions: ActionConfig[],
additionalConfig: AdditionalConfig,
): Promise<ActionOutput[]> {
const results: ActionOutput[] = [];
await Promise.all(
actions.map(async (actionConfig) => {
const output = await this.execute(
actionConfig.type,
actionConfig.params,
additionalConfig,
);
results.push(output);
}),
);
return results;
}
}
export default ActionRegistry;

View File

@@ -0,0 +1,54 @@
import z from 'zod';
import { ResearchAction } from '../../types';
import { searchSearxng } from '@/lib/searxng';
const actionSchema = z.object({
type: z.literal('web_search'),
queries: z
.array(z.string())
.describe('An array of search queries to perform web searches for.'),
});
const actionDescription = `
You have to use this action aggressively to find relevant information from the web to answer user queries. You can combine this action with other actions to gather comprehensive data. Always ensure that you provide accurate and up-to-date information by leveraging web search results.
When this action is present, you must use it to obtain current information from the web.
### How to use:
1. For fast search mode, you can use this action once. Make sure to cover all aspects of the user's query in that single search.
2. If you're on quality mode, you'll get to use this action up to two times. Use the first search to gather general information, and the second search to fill in any gaps or get more specific details based on the initial findings.
3. If you're set on Deep research mode, then you will get to use this action multiple times to gather more information. Use your judgment to decide when additional searches are necessary to provide a thorough and accurate response.
Input: An array of search queries. Make sure the queries are relevant to the user's request and cover different aspects if necessary. You can include a maximum of 3 queries. Make sure the queries are SEO friendly and not sentences rather keywords which can be used to search a search engine like Google, Bing, etc.
`;
const webSearchAction: ResearchAction<typeof actionSchema> = {
name: 'web_search',
description: actionDescription,
schema: actionSchema,
enabled: (config) => config.classification.intents.includes('web_search'),
execute: async (input, _) => {
let results: Chunk[] = [];
const search = async (q: string) => {
const res = await searchSearxng(q);
res.results.forEach((r) => {
results.push({
content: r.content || r.title,
metadata: {
title: r.title,
url: r.url,
},
});
});
};
await Promise.all(input.queries.map(search));
return {
type: 'search_results',
results,
};
},
};
export default webSearchAction;

View File

@@ -0,0 +1,113 @@
import z from 'zod';
import {
ActionConfig,
ActionOutput,
ResearcherInput,
ResearcherOutput,
} from '../types';
import { ActionRegistry } from './actions';
import { getResearcherPrompt } from '@/lib/prompts/search/researcher';
import SessionManager from '@/lib/session';
class Researcher {
async research(
session: SessionManager,
input: ResearcherInput,
): Promise<ResearcherOutput> {
let findings: string = '';
let actionOutput: ActionOutput[] = [];
let maxIteration =
input.config.mode === 'fast'
? 1
: input.config.mode === 'balanced'
? 3
: 25;
const availableActions = ActionRegistry.getAvailableActions({
classification: input.classification,
});
const schema = z.object({
reasoning: z
.string()
.describe('The reasoning behind choosing the next action.'),
action: z
.union(availableActions.map((a) => a.schema))
.describe('The action to be performed next.'),
});
const availableActionsDescription =
ActionRegistry.getAvailableActionsDescriptions({
classification: input.classification,
});
for (let i = 0; i < maxIteration; i++) {
const researcherPrompt = getResearcherPrompt(availableActionsDescription);
const res = await input.config.llm.generateObject<z.infer<typeof schema>>(
{
messages: [
{
role: 'system',
content: researcherPrompt,
},
{
role: 'user',
content: `
<research_query>
${input.classification.standaloneFollowUp}
</research_query>
<previous_actions>
${findings}
</previous_actions>
`,
},
],
schema,
},
);
if (res.action.type === 'done') {
console.log('Research complete - "done" action selected');
break;
}
const actionConfig: ActionConfig = {
type: res.action.type as string,
params: res.action,
};
findings += 'Reasoning: ' + res.reasoning + '\n';
findings += `Executing Action: ${actionConfig.type} with params ${JSON.stringify(actionConfig.params)}\n`;
const actionResult = await ActionRegistry.execute(
actionConfig.type,
actionConfig.params,
{
llm: input.config.llm,
embedding: input.config.embedding,
session: session,
},
);
actionOutput.push(actionResult);
if (actionResult.type === 'search_results') {
findings += actionResult.results
.map(
(r) =>
`Title: ${r.metadata.title}\nURL: ${r.metadata.url}\nContent: ${r.content}\n`,
)
.join('\n');
}
}
return {
findings: actionOutput,
};
}
}
export default Researcher;

View File

@@ -0,0 +1,106 @@
import z from 'zod';
import BaseLLM from '../../models/base/llm';
import BaseEmbedding from '@/lib/models/base/embedding';
import SessionManager from '@/lib/session';
export type SearchSources = 'web' | 'discussions' | 'academic';
export type SearchAgentConfig = {
sources: SearchSources[];
llm: BaseLLM<any>;
embedding: BaseEmbedding<any>;
mode: 'fast' | 'balanced' | 'deep_research';
};
export type SearchAgentInput = {
chatHistory: ChatTurnMessage[];
followUp: string;
config: SearchAgentConfig;
};
export interface Intent {
name: string;
description: string;
requiresSearch: boolean;
enabled: (config: { sources: SearchSources[] }) => boolean;
}
export type Widget<TSchema extends z.ZodObject<any> = z.ZodObject<any>> = {
name: string;
description: string;
schema: TSchema;
execute: (
params: z.infer<TSchema>,
additionalConfig: AdditionalConfig,
) => Promise<WidgetOutput>;
};
export type WidgetConfig = {
type: string;
params: Record<string, any>;
};
export type WidgetOutput = {
type: string;
data: any;
};
export type ClassifierInput = {
llm: BaseLLM<any>;
enabledSources: SearchSources[];
query: string;
chatHistory: ChatTurnMessage[];
};
export type ClassifierOutput = {
skipSearch: boolean;
standaloneFollowUp: string;
intents: string[];
widgets: WidgetConfig[];
};
export type AdditionalConfig = {
llm: BaseLLM<any>;
embedding: BaseEmbedding<any>;
session: SessionManager;
};
export type ResearcherInput = {
chatHistory: ChatTurnMessage[];
followUp: string;
classification: ClassifierOutput;
config: SearchAgentConfig;
};
export type ResearcherOutput = {
findings: ActionOutput[];
};
export type SearchActionOutput = {
type: 'search_results';
results: Chunk[];
};
export type DoneActionOutput = {
type: 'done';
};
export type ActionOutput = SearchActionOutput | DoneActionOutput;
export interface ResearchAction<
TSchema extends z.ZodObject<any> = z.ZodObject<any>,
> {
name: string;
description: string;
schema: z.ZodObject<any>;
enabled: (config: { classification: ClassifierOutput }) => boolean;
execute: (
params: z.infer<TSchema>,
additionalConfig: AdditionalConfig,
) => Promise<ActionOutput>;
}
export type ActionConfig = {
type: string;
params: Record<string, any>;
};

View File

@@ -0,0 +1,6 @@
import WidgetRegistry from './registry';
import weatherWidget from './weatherWidget';
WidgetRegistry.register(weatherWidget);
export { WidgetRegistry };

View File

@@ -0,0 +1,65 @@
import {
AdditionalConfig,
SearchAgentConfig,
Widget,
WidgetConfig,
WidgetOutput,
} from '../types';
class WidgetRegistry {
private static widgets = new Map<string, Widget>();
static register(widget: Widget<any>) {
this.widgets.set(widget.name, widget);
}
static get(name: string): Widget | undefined {
return this.widgets.get(name);
}
static getAll(): Widget[] {
return Array.from(this.widgets.values());
}
static getDescriptions(): string {
return Array.from(this.widgets.values())
.map((widget) => `${widget.name}: ${widget.description}`)
.join('\n\n');
}
static async execute(
name: string,
params: any,
config: AdditionalConfig,
): Promise<WidgetOutput> {
const widget = this.get(name);
if (!widget) {
throw new Error(`Widget with name ${name} not found`);
}
return widget.execute(params, config);
}
static async executeAll(
widgets: WidgetConfig[],
additionalConfig: AdditionalConfig,
): Promise<WidgetOutput[]> {
const results: WidgetOutput[] = [];
await Promise.all(
widgets.map(async (widgetConfig) => {
const output = await this.execute(
widgetConfig.type,
widgetConfig.params,
additionalConfig,
);
results.push(output);
}),
);
return results;
}
}
export default WidgetRegistry;

View File

@@ -0,0 +1,123 @@
import z from 'zod';
import { Widget } from '../types';
const WeatherWidgetSchema = z.object({
type: z.literal('weather'),
location: z
.string()
.describe(
'Human-readable location name (e.g., "New York, NY, USA", "London, UK"). Use this OR lat/lon coordinates, never both. Leave empty string if providing coordinates.',
),
lat: z
.number()
.describe(
'Latitude coordinate in decimal degrees (e.g., 40.7128). Only use when location name is empty.',
),
lon: z
.number()
.describe(
'Longitude coordinate in decimal degrees (e.g., -74.0060). Only use when location name is empty.',
),
});
const weatherWidget = {
name: 'weather',
description:
'Provides current weather information for a specified location. It can return details such as temperature, humidity, wind speed, and weather conditions. It needs either a location name or latitude/longitude coordinates to function.',
schema: WeatherWidgetSchema,
execute: async (params, _) => {
if (
params.location === '' &&
(params.lat === undefined || params.lon === undefined)
) {
throw new Error(
'Either location name or both latitude and longitude must be provided.',
);
}
if (params.location !== '') {
const openStreetMapUrl = `https://nominatim.openstreetmap.org/search?q=${encodeURIComponent(params.location)}&format=json&limit=1`;
const locationRes = await fetch(openStreetMapUrl, {
headers: {
'User-Agent': 'Perplexica',
'Content-Type': 'application/json',
},
});
const data = await locationRes.json();
const location = data[0];
if (!location) {
throw new Error(
`Could not find coordinates for location: ${params.location}`,
);
}
const weatherRes = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${location.lat}&longitude=${location.lon}&current_weather=true`,
{
headers: {
'User-Agent': 'Perplexica',
'Content-Type': 'application/json',
},
},
);
const weatherData = await weatherRes.json();
/* this is like a very simple implementation just to see the bacckend works, when we're working on the frontend, we'll return more data i guess? */
return {
type: 'weather',
data: {
location: params.location,
latitude: location.lat,
longitude: location.lon,
weather: weatherData.current_weather,
},
};
} else if (params.lat !== undefined && params.lon !== undefined) {
const [weatherRes, locationRes] = await Promise.all([
fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${params.lat}&longitude=${params.lon}&current_weather=true`,
{
headers: {
'User-Agent': 'Perplexica',
'Content-Type': 'application/json',
},
},
),
fetch(
`https://nominatim.openstreetmap.org/reverse?lat=${params.lat}&lon=${params.lon}&format=json`,
{
headers: {
'User-Agent': 'Perplexica',
'Content-Type': 'application/json',
},
},
),
]);
const weatherData = await weatherRes.json();
const locationData = await locationRes.json();
return {
type: 'weather',
data: {
location: locationData.display_name,
latitude: params.lat,
longitude: params.lon,
weather: weatherData.current_weather,
},
};
}
return {
type: 'weather',
data: null,
};
},
} satisfies Widget<typeof WeatherWidgetSchema>;
export default weatherWidget;

View File

@@ -0,0 +1,32 @@
import ListLineOutputParser from '@/lib/outputParsers/listLineOutputParser';
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { suggestionGeneratorPrompt } from '@/lib/prompts/suggestions';
type SuggestionGeneratorInput = {
chatHistory: BaseMessage[];
};
const outputParser = new ListLineOutputParser({
key: 'suggestions',
});
const generateSuggestions = async (
input: SuggestionGeneratorInput,
llm: BaseChatModel,
) => {
const chatPrompt = await ChatPromptTemplate.fromMessages([
new SystemMessage(suggestionGeneratorPrompt),
new HumanMessage(`<conversation>${formatChatHistoryAsString(input.chatHistory)}</conversation>`)
]).formatMessages({})
const res = await llm.invoke(chatPrompt)
const suggestions = await outputParser.invoke(res)
return suggestions
};
export default generateSuggestions;

View File

@@ -1,105 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type ImageSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
interface ImageSearchResult {
img_src: string;
url: string;
title: string;
}
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: ImageSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: ImageSearchChainInput) => {
return input.query;
},
}),
ChatPromptTemplate.fromMessages([
['system', imageSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
],
['assistant', '<query>A cat</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
],
['assistant', '<query>Car working</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
const images: ImageSearchResult[] = [];
res.results.forEach((result) => {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
url: result.url,
title: result.title,
});
}
});
return images.slice(0, 10);
}),
]);
};
const handleImageSearch = (
input: ImageSearchChainInput,
llm: BaseChatModel,
) => {
const imageSearchChain = createImageSearchChain(llm);
return imageSearchChain.invoke(input);
};
export default handleImageSearch;

View File

@@ -1,55 +0,0 @@
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { ChatOpenAI } from '@langchain/openai';
const suggestionGeneratorPrompt = `
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:
<suggestions>
Tell me more about SpaceX and their recent projects
What is the latest news on SpaceX?
Who is the CEO of SpaceX?
</suggestions>
Conversation:
{chat_history}
`;
type SuggestionGeneratorInput = {
chat_history: BaseMessage[];
};
const outputParser = new ListLineOutputParser({
key: 'suggestions',
});
const createSuggestionGeneratorChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: SuggestionGeneratorInput) =>
formatChatHistoryAsString(input.chat_history),
}),
PromptTemplate.fromTemplate(suggestionGeneratorPrompt),
llm,
outputParser,
]);
};
const generateSuggestions = (
input: SuggestionGeneratorInput,
llm: BaseChatModel,
) => {
(llm as unknown as ChatOpenAI).temperature = 0;
const suggestionGeneratorChain = createSuggestionGeneratorChain(llm);
return suggestionGeneratorChain.invoke(input);
};
export default generateSuggestions;

View File

@@ -1,110 +0,0 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const videoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
interface VideoSearchResult {
img_src: string;
url: string;
title: string;
iframe_src: string;
}
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: VideoSearchChainInput) => {
return input.query;
},
}),
ChatPromptTemplate.fromMessages([
['system', videoSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
],
['assistant', '<query>How does a car work?</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
],
['assistant', '<query>Theory of relativity</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos: VideoSearchResult[] = [];
res.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
}),
]);
};
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const videoSearchChain = createVideoSearchChain(llm);
return videoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@@ -6,11 +6,8 @@ const getClientConfig = (key: string, defaultVal?: any) => {
export const getTheme = () => getClientConfig('theme', 'dark');
export const getAutoImageSearch = () =>
Boolean(getClientConfig('autoImageSearch', 'true'));
export const getAutoVideoSearch = () =>
Boolean(getClientConfig('autoVideoSearch', 'true'));
export const getAutoMediaSearch = () =>
getClientConfig('autoMediaSearch', 'true') === 'true';
export const getSystemInstructions = () =>
getClientConfig('systemInstructions', '');

View File

@@ -13,14 +13,15 @@ class ConfigManager {
currentConfig: Config = {
version: this.configVersion,
setupComplete: false,
general: {},
preferences: {},
personalization: {},
modelProviders: [],
search: {
searxngURL: '',
},
};
uiConfigSections: UIConfigSections = {
general: [
preferences: [
{
name: 'Theme',
key: 'theme',
@@ -40,16 +41,6 @@ class ConfigManager {
default: 'dark',
scope: 'client',
},
{
name: 'System Instructions',
key: 'systemInstructions',
type: 'textarea',
required: false,
description: 'Add custom behavior or tone for the model.',
placeholder:
'e.g., "Respond in a friendly and concise tone" or "Use British English and format answers as bullet points."',
scope: 'client',
},
{
name: 'Measurement Unit',
key: 'measureUnit',
@@ -69,6 +60,27 @@ class ConfigManager {
default: 'Metric',
scope: 'client',
},
{
name: 'Auto video & image search',
key: 'autoMediaSearch',
type: 'switch',
required: false,
description: 'Automatically search for relevant images and videos.',
default: true,
scope: 'client',
},
],
personalization: [
{
name: 'System Instructions',
key: 'systemInstructions',
type: 'textarea',
required: false,
description: 'Add custom behavior or tone for the model.',
placeholder:
'e.g., "Respond in a friendly and concise tone" or "Use British English and format answers as bullet points."',
scope: 'client',
},
],
modelProviders: [],
search: [

View File

@@ -38,11 +38,17 @@ type TextareaUIConfigField = BaseUIConfigField & {
default?: string;
};
type SwitchUIConfigField = BaseUIConfigField & {
type: 'switch';
default?: boolean;
};
type UIConfigField =
| StringUIConfigField
| SelectUIConfigField
| PasswordUIConfigField
| TextareaUIConfigField;
| TextareaUIConfigField
| SwitchUIConfigField;
type ConfigModelProvider = {
id: string;
@@ -57,7 +63,10 @@ type ConfigModelProvider = {
type Config = {
version: number;
setupComplete: boolean;
general: {
preferences: {
[key: string]: any;
};
personalization: {
[key: string]: any;
};
modelProviders: ConfigModelProvider[];
@@ -80,7 +89,8 @@ type ModelProviderUISection = {
};
type UIConfigSections = {
general: UIConfigField[];
preferences: UIConfigField[];
personalization: UIConfigField[];
modelProviders: ModelProviderUISection[];
search: UIConfigField[];
};
@@ -95,4 +105,5 @@ export type {
ModelProviderUISection,
ConfigModelProvider,
TextareaUIConfigField,
SwitchUIConfigField,
};

View File

@@ -18,12 +18,18 @@ db.exec(`
`);
function sanitizeSql(content: string) {
return content
const statements = content
.split(/--> statement-breakpoint/g)
.map((stmt) =>
stmt
.split(/\r?\n/)
.filter(
(l) => !l.trim().startsWith('-->') && !l.includes('statement-breakpoint'),
.filter((l) => !l.trim().startsWith('-->'))
.join('\n')
.trim(),
)
.join('\n');
.filter((stmt) => stmt.length > 0);
return statements;
}
fs.readdirSync(migrationsFolder)
@@ -32,7 +38,7 @@ fs.readdirSync(migrationsFolder)
.forEach((file) => {
const filePath = path.join(migrationsFolder, file);
let content = fs.readFileSync(filePath, 'utf-8');
content = sanitizeSql(content);
const statements = sanitizeSql(content);
const migrationName = file.split('_')[0] || file;
@@ -108,7 +114,12 @@ fs.readdirSync(migrationsFolder)
db.exec('DROP TABLE messages;');
db.exec('ALTER TABLE messages_with_sources RENAME TO messages;');
} else {
db.exec(content);
// Execute each statement separately
statements.forEach((stmt) => {
if (stmt.trim()) {
db.exec(stmt);
}
});
}
db.prepare('INSERT OR IGNORE INTO ran_migrations (name) VALUES (?)').run(

View File

@@ -1,26 +1,22 @@
import { sql } from 'drizzle-orm';
import { text, integer, sqliteTable } from 'drizzle-orm/sqlite-core';
import { Document } from '@langchain/core/documents';
export const messages = sqliteTable('messages', {
id: integer('id').primaryKey(),
role: text('type', { enum: ['assistant', 'user', 'source'] }).notNull(),
chatId: text('chatId').notNull(),
createdAt: text('createdAt')
.notNull()
.default(sql`CURRENT_TIMESTAMP`),
messageId: text('messageId').notNull(),
content: text('content'),
sources: text('sources', {
mode: 'json',
})
.$type<Document[]>()
chatId: text('chatId').notNull(),
backendId: text('backendId').notNull(),
query: text('query').notNull(),
createdAt: text('createdAt').notNull(),
responseBlocks: text('responseBlocks', { mode: 'json' })
.$type<Block[]>()
.default(sql`'[]'`),
status: text({ enum: ['answering', 'completed', 'error'] }).default(
'answering',
),
});
interface File {
interface DBFile {
name: string;
fileId: string;
}
@@ -31,6 +27,6 @@ export const chats = sqliteTable('chats', {
createdAt: text('createdAt').notNull(),
focusMode: text('focusMode').notNull(),
files: text('files', { mode: 'json' })
.$type<File[]>()
.$type<DBFile[]>()
.default(sql`'[]'`),
});

View File

@@ -21,6 +21,7 @@ import { useParams, useSearchParams } from 'next/navigation';
import { toast } from 'sonner';
import { getSuggestions } from '../actions';
import { MinimalProvider } from '../models/types';
import { getAutoMediaSearch } from '../config/clientRegistry';
export type Section = {
userMessage: UserMessage;
@@ -94,17 +95,6 @@ const checkConfig = async (
'embeddingModelProviderId',
);
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const res = await fetch(`/api/providers`, {
headers: {
'Content-Type': 'application/json',
@@ -624,16 +614,13 @@ export const ChatProvider = ({ children }: { children: React.ReactNode }) => {
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
const autoMediaSearch = getAutoMediaSearch();
if (autoImageSearch === 'true') {
if (autoMediaSearch) {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();

View File

@@ -0,0 +1,7 @@
abstract class BaseEmbedding<CONFIG> {
constructor(protected config: CONFIG) {}
abstract embedText(texts: string[]): Promise<number[][]>;
abstract embedChunks(chunks: Chunk[]): Promise<number[][]>;
}
export default BaseEmbedding;

View File

@@ -0,0 +1,22 @@
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../types';
abstract class BaseLLM<CONFIG> {
constructor(protected config: CONFIG) {}
abstract withOptions(options: GenerateOptions): this;
abstract generateText(input: GenerateTextInput): Promise<GenerateTextOutput>;
abstract streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput>;
abstract generateObject<T>(input: GenerateObjectInput): Promise<T>;
abstract streamObject<T>(
input: GenerateObjectInput,
): AsyncGenerator<Partial<T>>;
}
export default BaseLLM;

View File

@@ -1,7 +1,9 @@
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import { ModelList, ProviderMetadata } from '../types';
import { UIConfigField } from '@/lib/config/types';
import BaseLLM from './llm';
import BaseEmbedding from './embedding';
abstract class BaseModelProvider<CONFIG> {
constructor(
@@ -11,8 +13,8 @@ abstract class BaseModelProvider<CONFIG> {
) {}
abstract getDefaultModels(): Promise<ModelList>;
abstract getModelList(): Promise<ModelList>;
abstract loadChatModel(modelName: string): Promise<BaseChatModel>;
abstract loadEmbeddingModel(modelName: string): Promise<Embeddings>;
abstract loadChatModel(modelName: string): Promise<BaseLLM<any>>;
abstract loadEmbeddingModel(modelName: string): Promise<BaseEmbedding<any>>;
static getProviderConfigFields(): UIConfigField[] {
throw new Error('Method not implemented.');
}

View File

@@ -1,152 +0,0 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface AimlConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your AI/ML API key',
required: true,
placeholder: 'AI/ML API Key',
env: 'AIML_API_KEY',
scope: 'server',
},
];
class AimlProvider extends BaseModelProvider<AimlConfig> {
constructor(id: string, name: string, config: AimlConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const res = await fetch('https://api.aimlapi.com/models', {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
},
});
const data = await res.json();
const chatModels: Model[] = data.data
.filter((m: any) => m.type === 'chat-completion')
.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
const embeddingModels: Model[] = data.data
.filter((m: any) => m.type === 'embedding')
.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: embeddingModels,
chat: chatModels,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to AI/ML API. Please ensure your API key is correct and the service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading AI/ML API Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
configuration: {
baseURL: 'https://api.aimlapi.com',
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading AI/ML API Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: this.config.apiKey,
model: key,
configuration: {
baseURL: 'https://api.aimlapi.com',
},
});
}
static parseAndValidate(raw: any): AimlConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'aiml',
name: 'AI/ML API',
};
}
}
export default AimlProvider;

View File

@@ -1,115 +0,0 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatAnthropic } from '@langchain/anthropic';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface AnthropicConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Anthropic API key',
required: true,
placeholder: 'Anthropic API Key',
env: 'ANTHROPIC_API_KEY',
scope: 'server',
},
];
class AnthropicProvider extends BaseModelProvider<AnthropicConfig> {
constructor(id: string, name: string, config: AnthropicConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch('https://api.anthropic.com/v1/models?limit=999', {
method: 'GET',
headers: {
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
'Content-type': 'application/json',
},
});
if (!res.ok) {
throw new Error(`Failed to fetch Anthropic models: ${res.statusText}`);
}
const data = (await res.json()).data;
const models: Model[] = data.map((m: any) => {
return {
key: m.id,
name: m.display_name,
};
});
return {
embedding: [],
chat: models,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Anthropic Chat Model. Invalid Model Selected',
);
}
return new ChatAnthropic({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('Anthropic provider does not support embedding models.');
}
static parseAndValidate(raw: any): AnthropicConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'anthropic',
name: 'Anthropic',
};
}
}
export default AnthropicProvider;

View File

@@ -1,107 +0,0 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface DeepSeekConfig {
apiKey: string;
}
const defaultChatModels: Model[] = [
{
name: 'Deepseek Chat / DeepSeek V3.2 Exp',
key: 'deepseek-chat',
},
{
name: 'Deepseek Reasoner / DeepSeek V3.2 Exp',
key: 'deepseek-reasoner',
},
];
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your DeepSeek API key',
required: true,
placeholder: 'DeepSeek API Key',
env: 'DEEPSEEK_API_KEY',
scope: 'server',
},
];
class DeepSeekProvider extends BaseModelProvider<DeepSeekConfig> {
constructor(id: string, name: string, config: DeepSeekConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
return {
embedding: [],
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading DeepSeek Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
configuration: {
baseURL: 'https://api.deepseek.com',
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('DeepSeek provider does not support embedding models.');
}
static parseAndValidate(raw: any): DeepSeekConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'deepseek',
name: 'Deepseek AI',
};
}
}
export default DeepSeekProvider;

View File

@@ -1,140 +0,0 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface GeminiConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Google Gemini API key',
required: true,
placeholder: 'Google Gemini API Key',
env: 'GEMINI_API_KEY',
scope: 'server',
},
];
class GeminiProvider extends BaseModelProvider<GeminiConfig> {
constructor(id: string, name: string, config: GeminiConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
const res = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models?key=${this.config.apiKey}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
},
);
const data = await res.json();
let defaultEmbeddingModels: Model[] = [];
let defaultChatModels: Model[] = [];
data.models.forEach((m: any) => {
if (m.supportedGenerationMethods.includes('embedText')) {
defaultEmbeddingModels.push({
key: m.name,
name: m.displayName,
});
} else if (m.supportedGenerationMethods.includes('generateContent')) {
defaultChatModels.push({
key: m.name,
name: m.displayName,
});
}
});
return {
embedding: defaultEmbeddingModels,
chat: defaultChatModels,
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Chat Model. Invalid Model Selected',
);
}
return new ChatGoogleGenerativeAI({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Gemini Embedding Model. Invalid Model Selected.',
);
}
return new GoogleGenerativeAIEmbeddings({
apiKey: this.config.apiKey,
model: key,
});
}
static parseAndValidate(raw: any): GeminiConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'gemini',
name: 'Google Gemini',
};
}
}
export default GeminiProvider;

View File

@@ -1,118 +0,0 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatGroq } from '@langchain/groq';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface GroqConfig {
apiKey: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Groq API key',
required: true,
placeholder: 'Groq API Key',
env: 'GROQ_API_KEY',
scope: 'server',
},
];
class GroqProvider extends BaseModelProvider<GroqConfig> {
constructor(id: string, name: string, config: GroqConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const res = await fetch('https://api.groq.com/openai/v1/models', {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: [],
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Groq API. Please ensure your API key is correct and the Groq service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error('Error Loading Groq Chat Model. Invalid Model Selected');
}
return new ChatGroq({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
throw new Error('Groq provider does not support embedding models.');
}
static parseAndValidate(raw: any): GroqConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');
return {
apiKey: String(raw.apiKey),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'groq',
name: 'Groq',
};
}
}
export default GroqProvider;

View File

@@ -1,27 +1,11 @@
import { ModelProviderUISection } from '@/lib/config/types';
import { ProviderConstructor } from './baseProvider';
import { ProviderConstructor } from '../base/provider';
import OpenAIProvider from './openai';
import OllamaProvider from './ollama';
import TransformersProvider from './transformers';
import AnthropicProvider from './anthropic';
import GeminiProvider from './gemini';
import GroqProvider from './groq';
import DeepSeekProvider from './deepseek';
import LMStudioProvider from './lmstudio';
import LemonadeProvider from './lemonade';
import AimlProvider from '@/lib/models/providers/aiml';
export const providers: Record<string, ProviderConstructor<any>> = {
openai: OpenAIProvider,
ollama: OllamaProvider,
transformers: TransformersProvider,
anthropic: AnthropicProvider,
gemini: GeminiProvider,
groq: GroqProvider,
deepseek: DeepSeekProvider,
aiml: AimlProvider,
lmstudio: LMStudioProvider,
lemonade: LemonadeProvider,
};
export const getModelProvidersUIConfigSection =

View File

@@ -1,158 +0,0 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface LemonadeConfig {
baseURL: string;
apiKey?: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for Lemonade API',
required: true,
placeholder: 'https://api.lemonade.ai/v1',
env: 'LEMONADE_BASE_URL',
scope: 'server',
},
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your Lemonade API key (optional)',
required: false,
placeholder: 'Lemonade API Key',
env: 'LEMONADE_API_KEY',
scope: 'server',
},
];
class LemonadeProvider extends BaseModelProvider<LemonadeConfig> {
constructor(id: string, name: string, config: LemonadeConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
try {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
};
if (this.config.apiKey) {
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
}
const res = await fetch(`${this.config.baseURL}/models`, {
method: 'GET',
headers,
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to Lemonade API. Please ensure the base URL is correct and the service is available.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: this.config.apiKey || 'not-needed',
temperature: 0.7,
model: key,
configuration: {
baseURL: this.config.baseURL,
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading Lemonade Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: this.config.apiKey || 'not-needed',
model: key,
configuration: {
baseURL: this.config.baseURL,
},
});
}
static parseAndValidate(raw: any): LemonadeConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
apiKey: raw.apiKey ? String(raw.apiKey) : undefined,
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lemonade',
name: 'Lemonade',
};
}
}
export default LemonadeProvider;

View File

@@ -1,148 +0,0 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
interface LMStudioConfig {
baseURL: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for LM Studio server',
required: true,
placeholder: 'http://localhost:1234',
env: 'LM_STUDIO_BASE_URL',
scope: 'server',
},
];
class LMStudioProvider extends BaseModelProvider<LMStudioConfig> {
constructor(id: string, name: string, config: LMStudioConfig) {
super(id, name, config);
}
private normalizeBaseURL(url: string): string {
const trimmed = url.trim().replace(/\/+$/, '');
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`;
}
async getDefaultModels(): Promise<ModelList> {
try {
const baseURL = this.normalizeBaseURL(this.config.baseURL);
const res = await fetch(`${baseURL}/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to LM Studio. Please ensure the base URL is correct and the LM Studio server is running.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Chat Model. Invalid Model Selected',
);
}
return new ChatOpenAI({
apiKey: 'lm-studio',
temperature: 0.7,
model: key,
streaming: true,
configuration: {
baseURL: this.normalizeBaseURL(this.config.baseURL),
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Embedding Model. Invalid Model Selected.',
);
}
return new OpenAIEmbeddings({
apiKey: 'lm-studio',
model: key,
configuration: {
baseURL: this.normalizeBaseURL(this.config.baseURL),
},
});
}
static parseAndValidate(raw: any): LMStudioConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lmstudio',
name: 'LM Studio',
};
}
}
export default LMStudioProvider;

View File

@@ -1,10 +1,11 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOllama, OllamaEmbeddings } from '@langchain/ollama';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import BaseModelProvider from '../../base/provider';
import { Model, ModelList, ProviderMetadata } from '../../types';
import BaseLLM from '../../base/llm';
import BaseEmbedding from '../../base/embedding';
import OllamaLLM from './ollamaLLM';
import OllamaEmbedding from './ollamaEmbedding';
interface OllamaConfig {
baseURL: string;
@@ -76,7 +77,7 @@ class OllamaProvider extends BaseModelProvider<OllamaConfig> {
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
@@ -87,14 +88,13 @@ class OllamaProvider extends BaseModelProvider<OllamaConfig> {
);
}
return new ChatOllama({
temperature: 0.7,
return new OllamaLLM({
baseURL: this.config.baseURL,
model: key,
baseUrl: this.config.baseURL,
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
@@ -104,9 +104,9 @@ class OllamaProvider extends BaseModelProvider<OllamaConfig> {
);
}
return new OllamaEmbeddings({
return new OllamaEmbedding({
model: key,
baseUrl: this.config.baseURL,
baseURL: this.config.baseURL,
});
}

View File

@@ -0,0 +1,39 @@
import { Ollama } from 'ollama';
import BaseEmbedding from '../../base/embedding';
type OllamaConfig = {
model: string;
baseURL?: string;
};
class OllamaEmbedding extends BaseEmbedding<OllamaConfig> {
ollamaClient: Ollama;
constructor(protected config: OllamaConfig) {
super(config);
this.ollamaClient = new Ollama({
host: this.config.baseURL || 'http://localhost:11434',
});
}
async embedText(texts: string[]): Promise<number[][]> {
const response = await this.ollamaClient.embed({
input: texts,
model: this.config.model,
});
return response.embeddings;
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
const response = await this.ollamaClient.embed({
input: chunks.map((c) => c.content),
model: this.config.model,
});
return response.embeddings;
}
}
export default OllamaEmbedding;

View File

@@ -0,0 +1,151 @@
import z from 'zod';
import BaseLLM from '../../base/llm';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types';
import { Ollama } from 'ollama';
import { parse } from 'partial-json';
type OllamaConfig = {
baseURL: string;
model: string;
options?: GenerateOptions;
};
class OllamaLLM extends BaseLLM<OllamaConfig> {
ollamaClient: Ollama;
constructor(protected config: OllamaConfig) {
super(config);
this.ollamaClient = new Ollama({
host: this.config.baseURL || 'http://localhost:11434',
});
}
withOptions(options: GenerateOptions) {
this.config.options = {
...this.config.options,
...options,
};
return this;
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
this.withOptions(input.options || {});
const res = await this.ollamaClient.chat({
model: this.config.model,
messages: input.messages,
options: {
top_p: this.config.options?.topP,
temperature: this.config.options?.temperature,
num_predict: this.config.options?.maxTokens,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stop: this.config.options?.stopSequences,
},
});
return {
content: res.message.content,
additionalInfo: {
reasoning: res.message.thinking,
},
};
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
this.withOptions(input.options || {});
const stream = await this.ollamaClient.chat({
model: this.config.model,
messages: input.messages,
stream: true,
options: {
top_p: this.config.options?.topP,
temperature: this.config.options?.temperature,
num_predict: this.config.options?.maxTokens,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stop: this.config.options?.stopSequences,
},
});
for await (const chunk of stream) {
yield {
contentChunk: chunk.message.content,
done: chunk.done,
additionalInfo: {
reasoning: chunk.message.thinking,
},
};
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
this.withOptions(input.options || {});
const response = await this.ollamaClient.chat({
model: this.config.model,
messages: input.messages,
format: z.toJSONSchema(input.schema),
think: false,
options: {
top_p: this.config.options?.topP,
temperature: 0,
num_predict: this.config.options?.maxTokens,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stop: this.config.options?.stopSequences,
},
});
try {
return input.schema.parse(JSON.parse(response.message.content)) as T;
} catch (err) {
throw new Error(`Error parsing response from Ollama: ${err}`);
}
}
async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
let recievedObj: string = '';
this.withOptions(input.options || {});
const stream = await this.ollamaClient.chat({
model: this.config.model,
messages: input.messages,
format: z.toJSONSchema(input.schema),
stream: true,
think: false,
options: {
top_p: this.config.options?.topP,
temperature: this.config.options?.temperature,
num_predict: this.config.options?.maxTokens,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stop: this.config.options?.stopSequences,
},
});
for await (const chunk of stream) {
recievedObj += chunk.message.content;
try {
yield parse(recievedObj) as T;
} catch (err) {
console.log('Error parsing partial object from Ollama:', err);
yield {} as T;
}
}
}
}
export default OllamaLLM;

View File

@@ -1,10 +1,13 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import { Model, ModelList, ProviderMetadata } from '../../types';
import OpenAIEmbedding from './openaiEmbedding';
import BaseEmbedding from '../../base/embedding';
import BaseModelProvider from '../../base/provider';
import BaseLLM from '../../base/llm';
import OpenAILLM from './openaiLLM';
interface OpenAIConfig {
apiKey: string;
@@ -145,7 +148,7 @@ class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
@@ -156,17 +159,14 @@ class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
);
}
return new ChatOpenAI({
return new OpenAILLM({
apiKey: this.config.apiKey,
temperature: 0.7,
model: key,
configuration: {
baseURL: this.config.baseURL,
},
});
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
@@ -176,12 +176,10 @@ class OpenAIProvider extends BaseModelProvider<OpenAIConfig> {
);
}
return new OpenAIEmbeddings({
return new OpenAIEmbedding({
apiKey: this.config.apiKey,
model: key,
configuration: {
baseURL: this.config.baseURL,
},
});
}

View File

@@ -0,0 +1,41 @@
import OpenAI from 'openai';
import BaseEmbedding from '../../base/embedding';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
};
class OpenAIEmbedding extends BaseEmbedding<OpenAIConfig> {
openAIClient: OpenAI;
constructor(protected config: OpenAIConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: config.apiKey,
baseURL: config.baseURL,
});
}
async embedText(texts: string[]): Promise<number[][]> {
const response = await this.openAIClient.embeddings.create({
model: this.config.model,
input: texts,
});
return response.data.map((embedding) => embedding.embedding);
}
async embedChunks(chunks: Chunk[]): Promise<number[][]> {
const response = await this.openAIClient.embeddings.create({
model: this.config.model,
input: chunks.map((c) => c.content),
});
return response.data.map((embedding) => embedding.embedding);
}
}
export default OpenAIEmbedding;

View File

@@ -0,0 +1,163 @@
import OpenAI from 'openai';
import BaseLLM from '../../base/llm';
import { zodTextFormat, zodResponseFormat } from 'openai/helpers/zod';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types';
import { parse } from 'partial-json';
type OpenAIConfig = {
apiKey: string;
model: string;
baseURL?: string;
options?: GenerateOptions;
};
class OpenAILLM extends BaseLLM<OpenAIConfig> {
openAIClient: OpenAI;
constructor(protected config: OpenAIConfig) {
super(config);
this.openAIClient = new OpenAI({
apiKey: this.config.apiKey,
baseURL: this.config.baseURL || 'https://api.openai.com/v1',
});
}
withOptions(options: GenerateOptions) {
this.config.options = {
...this.config.options,
...options,
};
return this;
}
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
this.withOptions(input.options || {});
const response = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: input.messages,
temperature: this.config.options?.temperature || 1.0,
top_p: this.config.options?.topP,
max_completion_tokens: this.config.options?.maxTokens,
stop: this.config.options?.stopSequences,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
});
if (response.choices && response.choices.length > 0) {
return {
content: response.choices[0].message.content!,
additionalInfo: {
finishReason: response.choices[0].finish_reason,
},
};
}
throw new Error('No response from OpenAI');
}
async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
this.withOptions(input.options || {});
const stream = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: input.messages,
temperature: this.config.options?.temperature || 1.0,
top_p: this.config.options?.topP,
max_completion_tokens: this.config.options?.maxTokens,
stop: this.config.options?.stopSequences,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
stream: true,
});
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
yield {
contentChunk: chunk.choices[0].delta.content || '',
done: chunk.choices[0].finish_reason !== null,
additionalInfo: {
finishReason: chunk.choices[0].finish_reason,
},
};
}
}
}
async generateObject<T>(input: GenerateObjectInput): Promise<T> {
this.withOptions(input.options || {});
const response = await this.openAIClient.chat.completions.parse({
messages: input.messages,
model: this.config.model,
temperature: this.config.options?.temperature || 1.0,
top_p: this.config.options?.topP,
max_completion_tokens: this.config.options?.maxTokens,
stop: this.config.options?.stopSequences,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
response_format: zodResponseFormat(input.schema, 'object'),
});
if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(response.choices[0].message.parsed) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
throw new Error('No response from OpenAI');
}
async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
let recievedObj: string = '';
this.withOptions(input.options || {});
const stream = this.openAIClient.responses.stream({
model: this.config.model,
input: input.messages,
temperature: this.config.options?.temperature || 1.0,
top_p: this.config.options?.topP,
max_completion_tokens: this.config.options?.maxTokens,
stop: this.config.options?.stopSequences,
frequency_penalty: this.config.options?.frequencyPenalty,
presence_penalty: this.config.options?.presencePenalty,
text: {
format: zodTextFormat(input.schema, 'object'),
},
});
for await (const chunk of stream) {
if (chunk.type === 'response.output_text.delta' && chunk.delta) {
recievedObj += chunk.delta;
try {
yield parse(recievedObj) as T;
} catch (err) {
console.log('Error parsing partial object from OpenAI:', err);
yield {} as T;
}
} else if (chunk.type === 'response.output_text.done' && chunk.text) {
try {
yield parse(chunk.text) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
}
}
}
export default OpenAILLM;

View File

@@ -1,87 +0,0 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Model, ModelList, ProviderMetadata } from '../types';
import BaseModelProvider from './baseProvider';
import { Embeddings } from '@langchain/core/embeddings';
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import { HuggingFaceTransformersEmbeddings } from '@langchain/community/embeddings/huggingface_transformers';
interface TransformersConfig {}
const defaultEmbeddingModels: Model[] = [
{
name: 'all-MiniLM-L6-v2',
key: 'Xenova/all-MiniLM-L6-v2',
},
{
name: 'mxbai-embed-large-v1',
key: 'mixedbread-ai/mxbai-embed-large-v1',
},
{
name: 'nomic-embed-text-v1',
key: 'Xenova/nomic-embed-text-v1',
},
];
const providerConfigFields: UIConfigField[] = [];
class TransformersProvider extends BaseModelProvider<TransformersConfig> {
constructor(id: string, name: string, config: TransformersConfig) {
super(id, name, config);
}
async getDefaultModels(): Promise<ModelList> {
return {
embedding: [...defaultEmbeddingModels],
chat: [],
};
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [],
};
}
async loadChatModel(key: string): Promise<BaseChatModel> {
throw new Error('Transformers Provider does not support chat models.');
}
async loadEmbeddingModel(key: string): Promise<Embeddings> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading OpenAI Embedding Model. Invalid Model Selected.',
);
}
return new HuggingFaceTransformersEmbeddings({
model: key,
});
}
static parseAndValidate(raw: any): TransformersConfig {
return {};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'transformers',
name: 'Transformers',
};
}
}
export default TransformersProvider;

View File

@@ -1,3 +1,5 @@
import z from 'zod';
type Model = {
name: string;
key: string;
@@ -25,10 +27,59 @@ type ModelWithProvider = {
providerId: string;
};
type GenerateOptions = {
temperature?: number;
maxTokens?: number;
topP?: number;
stopSequences?: string[];
frequencyPenalty?: number;
presencePenalty?: number;
};
type GenerateTextInput = {
messages: Message[];
options?: GenerateOptions;
};
type GenerateTextOutput = {
content: string;
additionalInfo?: Record<string, any>;
};
type StreamTextOutput = {
contentChunk: string;
additionalInfo?: Record<string, any>;
done?: boolean;
};
type GenerateObjectInput = {
schema: z.ZodTypeAny;
messages: Message[];
options?: GenerateOptions;
};
type GenerateObjectOutput<T> = {
object: T;
additionalInfo?: Record<string, any>;
};
type StreamObjectOutput<T> = {
objectChunk: Partial<T>;
additionalInfo?: Record<string, any>;
done?: boolean;
};
export type {
Model,
ModelList,
ProviderMetadata,
MinimalProvider,
ModelWithProvider,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
GenerateObjectInput,
GenerateObjectOutput,
StreamObjectOutput,
};

View File

@@ -1,13 +0,0 @@
import {
webSearchResponsePrompt,
webSearchRetrieverFewShots,
webSearchRetrieverPrompt,
} from './webSearch';
import { writingAssistantPrompt } from './writingAssistant';
export default {
webSearchResponsePrompt,
webSearchRetrieverPrompt,
webSearchRetrieverFewShots,
writingAssistantPrompt,
};

View File

@@ -0,0 +1,26 @@
import { BaseMessageLike } from "@langchain/core/messages";
export const imageSearchPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
export const imageSearchFewShots: BaseMessageLike[] = [
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
],
['assistant', '<query>A cat</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
],
['assistant', '<query>Car working</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>']
]

View File

@@ -0,0 +1,25 @@
import { BaseMessageLike } from "@langchain/core/messages";
export const videoSearchPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
export const videoSearchFewShots: BaseMessageLike[] = [
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
],
['assistant', '<query>How does a car work?</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
],
['assistant', '<query>Theory of relativity</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
]

View File

@@ -0,0 +1,176 @@
export const getClassifierPrompt = (input: {
intentDesc: string;
widgetDesc: string;
}) => {
return `
<role>
You are an expert query classifier for an intelligent search agent. Your task is to analyze user queries and determine the optimal way to answer them—selecting the right intent(s) and widgets.
</role>
<task>
Given a conversation history and follow-up question, you must:
1. Determine if search should be skipped (skipSearch: boolean)
2. Generate a standalone, self-contained version of the question (standaloneFollowUp: string)
3. Identify the intent(s) that describe how to fulfill the query (intent: array)
4. Select appropriate widgets (widgets: array)
</task>
<critical_decision_rule>
**THE MOST IMPORTANT RULE**: skipSearch should be TRUE only in TWO cases:
1. Widget-only queries (weather, stocks, calculator)
2. Greetings or simple writing tasks (NOT questions)
**DEFAULT TO skipSearch: false** for everything else, including:
- Any question ("what is", "how does", "explain", "tell me about")
- Any request for information or facts
- Anything you're unsure about
Ask yourself: "Is the user ASKING about something or requesting INFORMATION?"
- YES → skipSearch: false (use web_search)
- NO (just greeting or simple writing) → skipSearch: true
</critical_decision_rule>
<skip_search_decision_tree>
Follow this decision tree IN ORDER:
1. **Widget-Only Queries** → skipSearch: TRUE, intent: ['widget_response']
- Weather queries: "weather in NYC", "temperature in Paris", "is it raining in Seattle"
- Stock queries: "AAPL stock price", "how is Tesla doing", "MSFT stock"
- Calculator queries: "what is 25% of 80", "calculate 15*23", "sqrt(144)"
- These are COMPLETE answers—no search needed
2. **Writing/Greeting Tasks** → skipSearch: TRUE, intent: ['writing_task']
- ONLY for greetings and simple writing:
- Greetings: "hello", "hi", "how are you", "thanks", "goodbye"
- Simple writing needing NO facts: "write a thank you email", "draft a birthday message", "compose a poem"
- NEVER for: questions, "what is X", "how does X work", explanations, definitions, facts, code help
- If user is ASKING about something (not requesting writing), use web_search
3. **Image Display Queries** → skipSearch: FALSE, intent: ['image_preview']
- "Show me images of cats"
- "Pictures of the Eiffel Tower"
- "Visual examples of modern architecture"
- Requests for images to visualize something
4. **Widget + Additional Info** → skipSearch: FALSE, intent: ['web_search', 'widget_response']
- "weather in NYC and best things to do there"
- "AAPL stock and recent Apple news"
- "calculate my mortgage and explain how interest works"
5. **Pure Search Queries** → skipSearch: FALSE
- Default to web_search for general questions
- Use discussions_search when user explicitly mentions Reddit, forums, opinions, experiences
- Use academic_search when user explicitly mentions research, papers, studies, scientific
- Can combine multiple search intents when appropriate
6. **Fallback when web_search unavailable** → skipSearch: TRUE, intent: ['writing_task'] or []
- If no search intents are available and no widgets apply
- Set skipSearch to true and use writing_task or empty intent
</skip_search_decision_tree>
<examples>
Example 1: Widget-only query
Query: "What is the weather in New York?"
Reasoning: User wants current weather → weather widget provides this completely
Output: skipSearch: true, intent: ['widget_response'], widgets: [weather widget for New York]
Example 2: Widget-only query
Query: "AAPL stock price"
Reasoning: User wants stock price → stock_ticker widget provides this completely
Output: skipSearch: true, intent: ['widget_response'], widgets: [stock_ticker for AAPL]
Example 3: Widget + search query
Query: "What's the weather in NYC and what are some good outdoor activities?"
Reasoning: Weather widget handles weather, but outdoor activities need web search
Output: skipSearch: false, intent: ['web_search', 'widget_response'], widgets: [weather widget for NYC]
Example 4: Pure search query
Query: "What are the latest developments in AI?"
Reasoning: No widget applies, needs current web information
Output: skipSearch: false, intent: ['web_search'], widgets: []
Example 5: Writing task (greeting/simple writing only)
Query: "Write me a thank you email for a job interview"
Reasoning: Simple writing task needing no external facts → writing_task
Output: skipSearch: true, intent: ['writing_task'], widgets: []
Example 5b: Question about something - ALWAYS needs search
Query: "What is Kimi K2?"
Reasoning: User is ASKING about something → needs web search for accurate info
Output: skipSearch: false, intent: ['web_search'], widgets: []
Example 5c: Another question - needs search
Query: "Explain how photosynthesis works"
Reasoning: User is ASKING for explanation → needs web search
Output: skipSearch: false, intent: ['web_search'], widgets: []
Example 6: Image display
Query: "Show me images of cats"
Reasoning: User wants to see images → requires image search
Output: skipSearch: false, intent: ['image_preview'], widgets: []
Example 7: Multiple search sources
Query: "What does the research say about meditation benefits?"
Reasoning: Benefits from both academic papers and web articles
Output: skipSearch: false, intent: ['academic_search', 'web_search'], widgets: []
Example 8: Discussions search
Query: "What do people on Reddit think about the new iPhone?"
Reasoning: User explicitly wants forum/community opinions → discussions_search
Output: skipSearch: false, intent: ['discussions_search'], widgets: []
Example 9: Academic search only
Query: "Find scientific papers on climate change effects"
Reasoning: User explicitly wants academic/research papers
Output: skipSearch: false, intent: ['academic_search'], widgets: []
</examples>
<standalone_follow_up_guidelines>
Transform the follow-up into a self-contained question:
- Include ALL necessary context from chat history
- Replace pronouns (it, they, this, that) with specific nouns
- Replace references ("the previous one", "what you mentioned") with actual content
- Preserve the original complexity—don't over-elaborate simple questions
- The question should be answerable without seeing the conversation
</standalone_follow_up_guidelines>
<intent_selection_rules>
Available intents:
${input.intentDesc}
Rules:
- Include at least one intent when applicable
- For questions/information requests:
- Default to web_search unless user explicitly requests another source
- Use discussions_search when user mentions: Reddit, forums, opinions, experiences, "what do people think"
- Use academic_search when user mentions: research, papers, studies, scientific, scholarly
- Can combine intents (e.g., ['academic_search', 'web_search'])
- If web_search is NOT in available intents and query needs search:
- Check if discussions_search or academic_search applies
- If no search intent available and no widgets: use writing_task or empty array []
- private_search: ONLY when user provides specific URLs/documents
- widget_response: when widgets fully answer the query
- writing_task: ONLY for greetings and simple writing (never for questions)
</intent_selection_rules>
<widget_selection_rules>
Available widgets:
${input.widgetDesc}
Rules:
- Include ALL applicable widgets regardless of skipSearch value
- Each widget type can only be included once
- Widgets provide structured, real-time data that enhances any response
</widget_selection_rules>
<output_format>
Your classification must be precise and consistent:
{
"skipSearch": <true|false>,
"standaloneFollowUp": "<self-contained question>",
"intent": [<array of selected intents>],
"widgets": [<array of selected widgets>]
}
</output_format>
`;
};

View File

@@ -0,0 +1,241 @@
export const getResearcherPrompt = (
actionDesc: string,
mode: 'fast' | 'balanced' | 'deep_research',
) => {
const today = new Date().toLocaleDateString('en-US', {
year: 'numeric',
month: 'long',
day: 'numeric',
});
return `
You are an action orchestrator. Your job is to fulfill user requests by selecting and executing appropriate actions - whether that's searching for information, creating calendar events, sending emails, or any other available action.
Today's date: ${today}
You are operating in "${mode}" mode. ${
mode === 'fast'
? 'Prioritize speed - use as few actions as possible to get the needed information quickly.'
: mode === 'balanced'
? 'Balance speed and depth - use a moderate number of actions to get good information efficiently. Never stop at the first action unless there is no action available or the query is simple.'
: 'Conduct deep research - use multiple actions to gather comprehensive information, even if it takes longer.'
}
<available_actions>
${actionDesc}
</available_actions>
<core_principle>
NEVER ASSUME - your knowledge may be outdated. When a user asks about something you're not certain about, go find out. Don't assume it exists or doesn't exist - just look it up directly.
</core_principle>
<reasoning_approach>
Think like a human would. Your reasoning should be natural and show:
- What the user is asking for
- What you need to find out or do
- Your plan to accomplish it
Keep it to 2-3 natural sentences.
</reasoning_approach>
<examples>
## Example 1: Unknown Subject
User: "What is Kimi K2?"
Good reasoning:
"I'm not sure what Kimi K2 is - could be an AI model, a product, or something else. Let me look it up to find out what it actually is and get the relevant details."
Actions: web_search ["Kimi K2", "Kimi K2 AI"]
## Example 2: Subject You're Uncertain About
User: "What are the features of GPT-5.1?"
Good reasoning:
"I don't have current information on GPT-5.1 - my knowledge might be outdated. Let me look up GPT-5.1 to see what's available and what features it has."
Actions: web_search ["GPT-5.1", "GPT-5.1 features", "GPT-5.1 release"]
Bad reasoning (wastes time on verification):
"GPT-5.1 might not exist based on my knowledge. I need to verify if it exists first before looking for features."
## Example 3: After Actions Return Results
User: "What are the features of GPT-5.1?"
[Previous actions returned information about GPT-5.1]
Good reasoning:
"Got the information I needed about GPT-5.1. The results cover its features and capabilities - I can now provide a complete answer."
Action: done
## Example 4: Ambiguous Query
User: "Tell me about Mercury"
Good reasoning:
"Mercury could refer to several things - the planet, the element, or something else. I'll look up both main interpretations to give a useful answer."
Actions: web_search ["Mercury planet facts", "Mercury element"]
## Example 5: Current Events
User: "What's happening with AI regulation?"
Good reasoning:
"I need current news on AI regulation developments. Let me find the latest updates on this topic."
Actions: web_search ["AI regulation news 2024", "AI regulation bill latest"]
## Example 6: Technical Query
User: "How do I set up authentication in Next.js 14?"
Good reasoning:
"This is a technical implementation question. I'll find the current best practices and documentation for Next.js 14 authentication."
Actions: web_search ["Next.js 14 authentication guide", "NextAuth.js App Router"]
## Example 7: Comparison Query
User: "Prisma vs Drizzle - which should I use?"
Good reasoning:
"Need to find factual comparisons between these ORMs - performance, features, trade-offs. Let me gather objective information."
Actions: web_search ["Prisma vs Drizzle comparison 2024", "Drizzle ORM performance"]
## Example 8: Fact-Check
User: "Is it true you only use 10% of your brain?"
Good reasoning:
"This is a common claim that needs scientific verification. Let me find what the actual research says about this."
Actions: web_search ["10 percent brain myth science", "brain usage neuroscience"]
## Example 9: Recent Product
User: "What are the specs of MacBook Pro M4?"
Good reasoning:
"I need current information on the MacBook Pro M4. Let me look up the latest specs and details."
Actions: web_search ["MacBook Pro M4 specs", "MacBook Pro M4 specifications Apple"]
## Example 10: Multi-Part Query
User: "Population of Tokyo vs New York?"
Good reasoning:
"Need current population stats for both cities. I'll look up the comparison data."
Actions: web_search ["Tokyo population 2024", "Tokyo vs New York population"]
## Example 11: Calendar Task
User: "Add a meeting with John tomorrow at 3pm"
Good reasoning:
"This is a calendar task. I have all the details - meeting with John, tomorrow, 3pm. I'll create the event."
Action: create_calendar_event with the provided details
## Example 12: Email Task
User: "Send an email to sarah@company.com about the project update"
Good reasoning:
"Need to send an email. I have the recipient but need to compose appropriate content about the project update."
Action: send_email to sarah@company.com with project update content
## Example 13: Multi-Step Task
User: "What's the weather in Tokyo and add a reminder to pack an umbrella if it's rainy"
Good reasoning:
"Two things here - first I need to check Tokyo's weather, then based on that I might need to create a reminder. Let me start with the weather lookup."
Actions: web_search ["Tokyo weather today forecast"]
## Example 14: Research Then Act
User: "Find the best Italian restaurant near me and make a reservation for 7pm"
Good reasoning:
"I need to first find top Italian restaurants in the area, then make a reservation. Let me start by finding the options."
Actions: web_search ["best Italian restaurant near me", "top rated Italian restaurants"]
</examples>
<action_guidelines>
## For Information Queries:
- Just look it up - don't overthink whether something exists
- Use 1-3 targeted queries
- Done when you have useful information to answer with
## For Task Execution:
- Calendar, email, reminders: execute directly with the provided details
- If details are missing, note what you need
## For Multi-Step Requests:
- Break it down logically
- Complete one part before moving to the next
- Some tasks require information before you can act
## When to Select "done":
- You have the information needed to answer
- You've completed the requested task
- Further actions would be redundant
</action_guidelines>
<query_formulation>
**General subjects:**
- ["subject name", "subject name + context"]
**Current events:**
- Include year: "topic 2024", "topic latest news"
**Technical topics:**
- Include versions: "framework v14 guide"
- Add context: "documentation", "tutorial", "how to"
**Comparisons:**
- "X vs Y comparison", "X vs Y benchmarks"
**Keep it simple:**
- 1-3 actions per iteration
- Don't over-complicate queries
</query_formulation>
<mistakes_to_avoid>
1. **Over-assuming**: Don't assume things exist or don't exist - just look them up
2. **Verification obsession**: Don't waste actions "verifying existence" - just search for the thing directly
3. **Endless loops**: If 2-3 actions don't find something, it probably doesn't exist - report that and move on
4. **Ignoring task context**: If user wants a calendar event, don't just search - create the event
5. **Overthinking**: Keep reasoning simple and action-focused
</mistakes_to_avoid>
<output_format>
Reasoning should be 2-3 natural sentences showing your thought process and plan. Then select and configure the appropriate action(s).
</output_format>
`;
};

View File

@@ -0,0 +1,15 @@
export const suggestionGeneratorPrompt = `
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:
<suggestions>
Tell me more about SpaceX and their recent projects
What is the latest news on SpaceX?
Who is the CEO of SpaceX?
</suggestions>
Today's date is ${new Date().toISOString()}
`;

View File

@@ -1,137 +0,0 @@
import { BaseMessageLike } from '@langchain/core/messages';
export const webSearchRetrieverPrompt = `
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
**Note**: All user messages are individual entities and should be treated as such do not mix conversations.
`;
export const webSearchRetrieverFewShots: BaseMessageLike[] = [
[
'user',
`<conversation>
</conversation>
<query>
What is the capital of France
</query>`,
],
[
'assistant',
`<question>
Capital of france
</question>`,
],
[
'user',
`<conversation>
</conversation>
<query>
Hi, how are you?
</query>`,
],
[
'assistant',
`<question>
not_needed
</question>`,
],
[
'user',
`<conversation>
</conversation>
<query>
What is Docker?
</query>`,
],
[
'assistant',
`<question>
What is Docker
</question>`,
],
[
'user',
`<conversation>
</conversation>
<query>
Can you tell me what is X from https://example.com
</query>`,
],
[
'assistant',
`<question>
What is X?
</question>
<links>
https://example.com
</links>`,
],
[
'user',
`<conversation>
</conversation>
<query>
Summarize the content from https://example.com
</query>`,
],
[
'assistant',
`<question>
summarize
</question>
<links>
https://example.com
</links>`,
],
];
export const webSearchResponsePrompt = `
You are Perplexica, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses.
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically.
- **Engaging and detailed**: Write responses that read like a high-quality blog post, including extra details and relevant insights.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included.
- **Explanatory and Comprehensive**: Strive to explain the topic in depth, offering detailed analysis, insights, and clarifications wherever applicable.
### Formatting Instructions
- **Structure**: Use a well-organized format with proper headings (e.g., "## Example heading 1" or "## Example heading 2"). Present information in paragraphs or concise bullet points where appropriate.
- **Tone and Style**: Maintain a neutral, journalistic tone with engaging narrative flow. Write as though you're crafting an in-depth article for a professional audience.
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability.
- **Length and Depth**: Provide comprehensive coverage of the topic. Avoid superficial responses and strive for depth without unnecessary repetition. Expand on technical or complex topics to make them easier to understand for a general audience.
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title.
- **Conclusion or Summary**: Include a concluding paragraph that synthesizes the provided information or suggests potential next steps, where appropriate.
### Citation Requirements
- Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`.
- Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]."
- Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context.
- Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]."
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources.
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation.
### Special Instructions
- If the query involves technical, historical, or complex topics, provide detailed background and explanatory sections to ensure clarity.
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.
- Provide explanations or historical context as needed to enhance understanding.
- End with a conclusion or overall perspective if relevant.
<context>
{context}
</context>
Current date & time in ISO format (UTC timezone) is: {date}.
`;

View File

@@ -1,17 +0,0 @@
export const writingAssistantPrompt = `
You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Writing Assistant', this means you will be helping the user write a response to a given query.
Since you are a writing assistant, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode.
You will be shared a context that can contain information from files user has uploaded to get answers from. You will have to generate answers upon that.
You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from.
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
<context>
{context}
</context>
`;

View File

@@ -1,59 +0,0 @@
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import prompts from '../prompts';
export const searchHandlers: Record<string, MetaSearchAgent> = {
webSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
queryGeneratorFewShots: [],
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
}),
};

View File

@@ -1,514 +0,0 @@
import { ChatOpenAI } from '@langchain/openai';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
import {
ChatPromptTemplate,
MessagesPlaceholder,
PromptTemplate,
} from '@langchain/core/prompts';
import {
RunnableLambda,
RunnableMap,
RunnableSequence,
} from '@langchain/core/runnables';
import { BaseMessage, BaseMessageLike } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../outputParsers/lineOutputParser';
import { getDocumentsFromLinks } from '../utils/documents';
import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../searxng';
import path from 'node:path';
import fs from 'node:fs';
import computeSimilarity from '../utils/computeSimilarity';
import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events';
import { StreamEvent } from '@langchain/core/tracers/log_stream';
export interface MetaSearchAgentType {
searchAndAnswer: (
message: string,
history: BaseMessage[],
llm: BaseChatModel,
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) => Promise<eventEmitter>;
}
interface Config {
searchWeb: boolean;
rerank: boolean;
rerankThreshold: number;
queryGeneratorPrompt: string;
queryGeneratorFewShots: BaseMessageLike[];
responsePrompt: string;
activeEngines: string[];
}
type BasicChainInput = {
chat_history: BaseMessage[];
query: string;
};
class MetaSearchAgent implements MetaSearchAgentType {
private config: Config;
private strParser = new StringOutputParser();
constructor(config: Config) {
this.config = config;
}
private async createSearchRetrieverChain(llm: BaseChatModel) {
(llm as unknown as ChatOpenAI).temperature = 0;
return RunnableSequence.from([
ChatPromptTemplate.fromMessages([
['system', this.config.queryGeneratorPrompt],
...this.config.queryGeneratorFewShots,
[
'user',
`
<conversation>
{chat_history}
</conversation>
<query>
{query}
</query>
`,
],
]),
llm,
this.strParser,
RunnableLambda.from(async (input: string) => {
const linksOutputParser = new LineListOutputParser({
key: 'links',
});
const questionOutputParser = new LineOutputParser({
key: 'question',
});
const links = await linksOutputParser.parse(input);
let question = (await questionOutputParser.parse(input)) ?? input;
if (question === 'not_needed') {
return { query: '', docs: [] };
}
if (links.length > 0) {
if (question.length === 0) {
question = 'summarize';
}
let docs: Document[] = [];
const linkDocs = await getDocumentsFromLinks({ links });
const docGroups: Document[] = [];
linkDocs.map((doc) => {
const URLDocExists = docGroups.find(
(d) =>
d.metadata.url === doc.metadata.url &&
d.metadata.totalDocs < 10,
);
if (!URLDocExists) {
docGroups.push({
...doc,
metadata: {
...doc.metadata,
totalDocs: 1,
},
});
}
const docIndex = docGroups.findIndex(
(d) =>
d.metadata.url === doc.metadata.url &&
d.metadata.totalDocs < 10,
);
if (docIndex !== -1) {
docGroups[docIndex].pageContent =
docGroups[docIndex].pageContent + `\n\n` + doc.pageContent;
docGroups[docIndex].metadata.totalDocs += 1;
}
});
await Promise.all(
docGroups.map(async (doc) => {
const res = await llm.invoke(`
You are a web search summarizer, tasked with summarizing a piece of text retrieved from a web search. Your job is to summarize the
text into a detailed, 2-4 paragraph explanation that captures the main ideas and provides a comprehensive answer to the query.
If the query is \"summarize\", you should provide a detailed summary of the text. If the query is a specific question, you should answer it in the summary.
- **Journalistic tone**: The summary should sound professional and journalistic, not too casual or vague.
- **Thorough and detailed**: Ensure that every key point from the text is captured and that the summary directly answers the query.
- **Not too lengthy, but detailed**: The summary should be informative but not excessively long. Focus on providing detailed information in a concise format.
The text will be shared inside the \`text\` XML tag, and the query inside the \`query\` XML tag.
<example>
1. \`<text>
Docker is a set of platform-as-a-service products that use OS-level virtualization to deliver software in packages called containers.
It was first released in 2013 and is developed by Docker, Inc. Docker is designed to make it easier to create, deploy, and run applications
by using containers.
</text>
<query>
What is Docker and how does it work?
</query>
Response:
Docker is a revolutionary platform-as-a-service product developed by Docker, Inc., that uses container technology to make application
deployment more efficient. It allows developers to package their software with all necessary dependencies, making it easier to run in
any environment. Released in 2013, Docker has transformed the way applications are built, deployed, and managed.
\`
2. \`<text>
The theory of relativity, or simply relativity, encompasses two interrelated theories of Albert Einstein: special relativity and general
relativity. However, the word "relativity" is sometimes used in reference to Galilean invariance. The term "theory of relativity" was based
on the expression "relative theory" used by Max Planck in 1906. The theory of relativity usually encompasses two interrelated theories by
Albert Einstein: special relativity and general relativity. Special relativity applies to all physical phenomena in the absence of gravity.
General relativity explains the law of gravitation and its relation to other forces of nature. It applies to the cosmological and astrophysical
realm, including astronomy.
</text>
<query>
summarize
</query>
Response:
The theory of relativity, developed by Albert Einstein, encompasses two main theories: special relativity and general relativity. Special
relativity applies to all physical phenomena in the absence of gravity, while general relativity explains the law of gravitation and its
relation to other forces of nature. The theory of relativity is based on the concept of "relative theory," as introduced by Max Planck in
1906. It is a fundamental theory in physics that has revolutionized our understanding of the universe.
\`
</example>
Everything below is the actual data you will be working with. Good luck!
<query>
${question}
</query>
<text>
${doc.pageContent}
</text>
Make sure to answer the query in the summary.
`);
const document = new Document({
pageContent: res.content as string,
metadata: {
title: doc.metadata.title,
url: doc.metadata.url,
},
});
docs.push(document);
}),
);
return { query: question, docs: docs };
} else {
question = question.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
});
const documents = res.results.map(
(result) =>
new Document({
pageContent:
result.content ||
(this.config.activeEngines.includes('youtube')
? result.title
: '') /* Todo: Implement transcript grabbing using Youtubei (source: https://www.npmjs.com/package/youtubei) */,
metadata: {
title: result.title,
url: result.url,
...(result.img_src && { img_src: result.img_src }),
},
}),
);
return { query: question, docs: documents };
}
}),
]);
}
private async createAnsweringChain(
llm: BaseChatModel,
fileIds: string[],
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
systemInstructions: string,
) {
return RunnableSequence.from([
RunnableMap.from({
systemInstructions: () => systemInstructions,
query: (input: BasicChainInput) => input.query,
chat_history: (input: BasicChainInput) => input.chat_history,
date: () => new Date().toISOString(),
context: RunnableLambda.from(async (input: BasicChainInput) => {
const processedHistory = formatChatHistoryAsString(
input.chat_history,
);
let docs: Document[] | null = null;
let query = input.query;
if (this.config.searchWeb) {
const searchRetrieverChain =
await this.createSearchRetrieverChain(llm);
const searchRetrieverResult = await searchRetrieverChain.invoke({
chat_history: processedHistory,
query,
});
query = searchRetrieverResult.query;
docs = searchRetrieverResult.docs;
}
const sortedDocs = await this.rerankDocs(
query,
docs ?? [],
fileIds,
embeddings,
optimizationMode,
);
return sortedDocs;
})
.withConfig({
runName: 'FinalSourceRetriever',
})
.pipe(this.processDocs),
}),
ChatPromptTemplate.fromMessages([
['system', this.config.responsePrompt],
new MessagesPlaceholder('chat_history'),
['user', '{query}'],
]),
llm,
this.strParser,
]).withConfig({
runName: 'FinalResponseGenerator',
});
}
private async rerankDocs(
query: string,
docs: Document[],
fileIds: string[],
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
) {
if (docs.length === 0 && fileIds.length === 0) {
return docs;
}
const filesData = fileIds
.map((file) => {
const filePath = path.join(process.cwd(), 'uploads', file);
const contentPath = filePath + '-extracted.json';
const embeddingsPath = filePath + '-embeddings.json';
const content = JSON.parse(fs.readFileSync(contentPath, 'utf8'));
const embeddings = JSON.parse(fs.readFileSync(embeddingsPath, 'utf8'));
const fileSimilaritySearchObject = content.contents.map(
(c: string, i: number) => {
return {
fileName: content.title,
content: c,
embeddings: embeddings.embeddings[i],
};
},
);
return fileSimilaritySearchObject;
})
.flat();
if (query.toLocaleLowerCase() === 'summarize') {
return docs.slice(0, 15);
}
const docsWithContent = docs.filter(
(doc) => doc.pageContent && doc.pageContent.length > 0,
);
if (optimizationMode === 'speed' || this.config.rerank === false) {
if (filesData.length > 0) {
const [queryEmbedding] = await Promise.all([
embeddings.embedQuery(query),
]);
const fileDocs = filesData.map((fileData) => {
return new Document({
pageContent: fileData.content,
metadata: {
title: fileData.fileName,
url: `File`,
},
});
});
const similarity = filesData.map((fileData, i) => {
const sim = computeSimilarity(queryEmbedding, fileData.embeddings);
return {
index: i,
similarity: sim,
};
});
let sortedDocs = similarity
.filter(
(sim) => sim.similarity > (this.config.rerankThreshold ?? 0.3),
)
.sort((a, b) => b.similarity - a.similarity)
.slice(0, 15)
.map((sim) => fileDocs[sim.index]);
sortedDocs =
docsWithContent.length > 0 ? sortedDocs.slice(0, 8) : sortedDocs;
return [
...sortedDocs,
...docsWithContent.slice(0, 15 - sortedDocs.length),
];
} else {
return docsWithContent.slice(0, 15);
}
} else if (optimizationMode === 'balanced') {
const [docEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(
docsWithContent.map((doc) => doc.pageContent),
),
embeddings.embedQuery(query),
]);
docsWithContent.push(
...filesData.map((fileData) => {
return new Document({
pageContent: fileData.content,
metadata: {
title: fileData.fileName,
url: `File`,
},
});
}),
);
docEmbeddings.push(...filesData.map((fileData) => fileData.embeddings));
const similarity = docEmbeddings.map((docEmbedding, i) => {
const sim = computeSimilarity(queryEmbedding, docEmbedding);
return {
index: i,
similarity: sim,
};
});
const sortedDocs = similarity
.filter((sim) => sim.similarity > (this.config.rerankThreshold ?? 0.3))
.sort((a, b) => b.similarity - a.similarity)
.slice(0, 15)
.map((sim) => docsWithContent[sim.index]);
return sortedDocs;
}
return [];
}
private processDocs(docs: Document[]) {
return docs
.map(
(_, index) =>
`${index + 1}. ${docs[index].metadata.title} ${docs[index].pageContent}`,
)
.join('\n');
}
private async handleStream(
stream: AsyncGenerator<StreamEvent, any, any>,
emitter: eventEmitter,
) {
for await (const event of stream) {
if (
event.event === 'on_chain_end' &&
event.name === 'FinalSourceRetriever'
) {
emitter.emit(
'data',
JSON.stringify({ type: 'sources', data: event.data.output }),
);
}
if (
event.event === 'on_chain_stream' &&
event.name === 'FinalResponseGenerator'
) {
emitter.emit(
'data',
JSON.stringify({ type: 'response', data: event.data.chunk }),
);
}
if (
event.event === 'on_chain_end' &&
event.name === 'FinalResponseGenerator'
) {
emitter.emit('end');
}
}
}
async searchAndAnswer(
message: string,
history: BaseMessage[],
llm: BaseChatModel,
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) {
const emitter = new eventEmitter();
const answeringChain = await this.createAnsweringChain(
llm,
fileIds,
embeddings,
optimizationMode,
systemInstructions,
);
const stream = answeringChain.streamEvents(
{
chat_history: history,
query: message,
},
{
version: 'v1',
},
);
this.handleStream(stream, emitter);
return emitter;
}
}
export default MetaSearchAgent;

81
src/lib/session.ts Normal file
View File

@@ -0,0 +1,81 @@
import { EventEmitter } from 'stream';
import { applyPatch } from 'rfc6902';
class SessionManager {
private static sessions = new Map<string, SessionManager>();
readonly id: string;
private blocks = new Map<string, Block>();
private events: { event: string; data: any }[] = [];
private emitter = new EventEmitter();
private TTL_MS = 30 * 60 * 1000;
constructor(id?: string) {
this.id = id ?? crypto.randomUUID();
setTimeout(() => {
SessionManager.sessions.delete(this.id);
}, this.TTL_MS);
}
static getSession(id: string): SessionManager | undefined {
return this.sessions.get(id);
}
static getAllSessions(): SessionManager[] {
return Array.from(this.sessions.values());
}
static createSession(): SessionManager {
const session = new SessionManager();
this.sessions.set(session.id, session);
return session;
}
removeAllListeners() {
this.emitter.removeAllListeners();
}
emit(event: string, data: any) {
this.emitter.emit(event, data);
this.events.push({ event, data });
}
emitBlock(block: Block) {
this.blocks.set(block.id, block);
this.emit('data', {
type: 'block',
block: block,
});
}
getBlock(blockId: string): Block | undefined {
return this.blocks.get(blockId);
}
updateBlock(blockId: string, patch: any[]) {
const block = this.blocks.get(blockId);
if (block) {
applyPatch(block, patch);
this.blocks.set(blockId, block);
this.emit('data', {
type: 'updateBlock',
blockId: blockId,
patch: patch,
});
}
}
addListener(event: string, listener: (data: any) => void) {
this.emitter.addListener(event, listener);
}
replay() {
for (const { event, data } of this.events) {
/* Using emitter directly to avoid infinite loop */
this.emitter.emit(event, data);
}
}
}
export default SessionManager;

71
src/lib/types.ts Normal file
View File

@@ -0,0 +1,71 @@
type ChatTurnMessage = {
role: 'user' | 'assistant' | 'system';
content: string;
};
type Chunk = {
content: string;
metadata: Record<string, any>;
};
type TextBlock = {
id: string;
type: 'text';
data: string;
};
type SourceBlock = {
id: string;
type: 'source';
data: Chunk[];
};
type SuggestionBlock = {
id: string;
type: 'suggestion';
data: string[];
};
type WidgetBlock = {
id: string;
type: 'widget';
data: {
widgetType: string;
params: Record<string, any>;
};
};
type ReasoningResearchBlock = {
id: string;
reasoning: string;
};
type SearchingResearchBlock = {
id: string;
searching: string[];
};
type ReadingResearchBlock = {
id: string;
reading: Chunk[];
};
type ResearchBlockSubStep =
| ReasoningResearchBlock
| SearchingResearchBlock
| ReadingResearchBlock;
type ResearchBlock = {
id: string;
type: 'research';
data: {
subSteps: ResearchBlockSubStep[];
};
};
type Block =
| TextBlock
| SourceBlock
| SuggestionBlock
| WidgetBlock
| ResearchBlock;

View File

@@ -1,10 +1,8 @@
import { BaseMessage, isAIMessage } from '@langchain/core/messages';
const formatChatHistoryAsString = (history: BaseMessage[]) => {
const formatChatHistoryAsString = (history: Message[]) => {
return history
.map(
(message) =>
`${isAIMessage(message) ? 'AI' : 'User'}: ${message.content}`,
`${message.role === 'assistant' ? 'AI' : 'User'}: ${message.content}`,
)
.join('\n');
};

158
yarn.lock
View File

@@ -746,19 +746,19 @@
"@jridgewell/resolve-uri" "^3.1.0"
"@jridgewell/sourcemap-codec" "^1.4.14"
"@langchain/anthropic@^1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@langchain/anthropic/-/anthropic-1.0.0.tgz#48535c5682851bf8fddcf37aa7ca78d4d93da932"
integrity sha512-Lud/FrkFmXMYW5R9y0FC+RGdgjBBVQ2JAnG3A8E1I4+sqv5JgJttw3HKRpFkyBUSyacs6LMfSn5dbJ6TT9nMiQ==
"@langchain/anthropic@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@langchain/anthropic/-/anthropic-1.0.1.tgz#a9f836b11ecbce282fc2afb8d707c52fd37711c4"
integrity sha512-yVKePAT+nNHtybyyPlWqiq6lqcoDlIuMgL9B4WMEU5gbmzL170iodiqcgcZNFQLOC1V2wCOzywq6Zr0kB24AFg==
dependencies:
"@anthropic-ai/sdk" "^0.65.0"
"@langchain/classic@1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@langchain/classic/-/classic-1.0.0.tgz#acbc15eebba03499cf93e73d2c93703a3da0a46e"
integrity sha512-darZFvO5g5e3TqZ4rvZ938F94D4a34v2ZdWfyipmyu7WB4RXMshmYtWCp98o4ec3bfRD9S4+oHMmaPcnk5cs5A==
"@langchain/classic@1.0.3":
version "1.0.3"
resolved "https://registry.yarnpkg.com/@langchain/classic/-/classic-1.0.3.tgz#92482cb4cb8692407b4ecde0df312f035934472f"
integrity sha512-XyoaiJSi4y7SzrZMCb3DdDfC+M3gqIQpVH2cOCh9xQf4244jNrncpLXF/MwOJYWxzTsjfcCAHIbFJ0kSH5nqmg==
dependencies:
"@langchain/openai" "1.0.0-alpha.3"
"@langchain/openai" "1.1.1"
"@langchain/textsplitters" "1.0.0"
handlebars "^4.7.8"
js-yaml "^4.1.0"
@@ -771,24 +771,24 @@
optionalDependencies:
langsmith "^0.3.64"
"@langchain/community@^1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@langchain/community/-/community-1.0.0.tgz#8e587605b7c981882e20281aa9e644a166620145"
integrity sha512-CM4vUZHaFHq8HpWBMIWPO5bo/rmRPJ1/iaJk7s8CghkkQ0WLaZzDtoG/wJKJZMDJOUVCtZKTw+TytlGu00/9dg==
"@langchain/community@^1.0.3":
version "1.0.3"
resolved "https://registry.yarnpkg.com/@langchain/community/-/community-1.0.3.tgz#278c82eee22ff37b120e182b07b7c23ffc6786ab"
integrity sha512-86L7qooSY8Fh5Sf2Tu/X8PvDJqvEXohyZUGusuv0XtnWGivwtecBm0vEbVPkLF07I2ZMtyAGzHJOblbveq6Nmg==
dependencies:
"@langchain/classic" "1.0.0"
"@langchain/openai" "1.0.0"
"@langchain/classic" "1.0.3"
"@langchain/openai" "1.1.1"
binary-extensions "^2.2.0"
expr-eval "^2.0.2"
flat "^5.0.2"
js-yaml "^4.1.0"
math-expression-evaluator "^2.0.0"
uuid "^10.0.0"
zod "^3.25.76 || ^4"
"@langchain/core@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@langchain/core/-/core-1.0.1.tgz#c2bdbdff87649fe17b2c86bf535d749ac73a586c"
integrity sha512-hVM3EkojYOk4ISJQKjLuWYSH6kyyOFlZIrLFETDA1L0Z2/Iu0q32aJawZ0FDn6rlXE8QZjBt/9OaOL36rXc05w==
"@langchain/core@^1.0.5":
version "1.0.5"
resolved "https://registry.yarnpkg.com/@langchain/core/-/core-1.0.5.tgz#1e20ecce80fa4d0b979ea05b24b879b8357d8092"
integrity sha512-9Hy/b9+j+mm0Bhnm8xD9B0KpBYTidroLrDHdbrHoMC2DqXoY2umvi1M3M/9D744qsMSaIMP0ZwFcy5YbqI/dGw==
dependencies:
"@cfworker/json-schema" "^4.0.2"
ansi-styles "^5.0.0"
@@ -802,18 +802,18 @@
uuid "^10.0.0"
zod "^3.25.76 || ^4"
"@langchain/google-genai@^1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@langchain/google-genai/-/google-genai-1.0.0.tgz#2785fa163788cb6214dffc1dc29fcd5bbb751493"
integrity sha512-ICUBZl/46nG6+Yhe5v7kp/2TQBGOzqEkpfKPLDeNyJ4x9OOL46xsW3ZZrHJjhGMQuR6/JMmQMTU9kLoYgsd1Tg==
"@langchain/google-genai@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@langchain/google-genai/-/google-genai-1.0.1.tgz#3601645f652f24e3beb55acc61878070b49c24ed"
integrity sha512-a9Bzaswp1P+eA2V8hAWSBypqjxmH+/zhOY1TBdalQuPQBTRH35jBMVgX3CTTAheAzBUGQtlDD4/dR9tyemDbhw==
dependencies:
"@google/generative-ai" "^0.24.0"
uuid "^11.1.0"
"@langchain/groq@^1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@langchain/groq/-/groq-1.0.0.tgz#413b02158761ff406238467325cd4f9fe0990f3a"
integrity sha512-6fG9MEQHNXnxgObFHSPh+BPYyTGcoDnKd+GhI9l96cpHh+QNI+IvypicRCZVSsLdqzRCFHISvBQaH+SP5vgjIw==
"@langchain/groq@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@langchain/groq/-/groq-1.0.1.tgz#7ec8822cd2f29eef4ae0f9c20f67268d1924ab96"
integrity sha512-vDQzv6A3mjG0/W/7vL4Iq+dnmhSbMHln+b7Rna810trjZzfNPZhAP6omqZyzCKIqjsQYUH4ODLnSUCNiarfYsQ==
dependencies:
groq-sdk "^0.19.0"
@@ -842,30 +842,30 @@
"@langchain/langgraph-sdk" "~1.0.0"
uuid "^10.0.0"
"@langchain/ollama@^1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@langchain/ollama/-/ollama-1.0.0.tgz#803c353e9dfb1a9e7b20f1460a6a201fec29bb77"
integrity sha512-zqn6i7haMjvZW4FQWo0GrF4wYL5mLurdL0qoe+moYWYSCGaay4K7e/4dqM5C/MR16/HPFDzFbBRMkni2PDRBgA==
"@langchain/langgraph@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-1.0.1.tgz#d0be714653e8a27665f86ea795c5c34189455406"
integrity sha512-7y8OTDLrHrpJ55Y5x7c7zU2BbqNllXwxM106Xrd+NaQB5CpEb4hbUfIwe4XmhhscKPwvhXAq3tjeUxw9MCiurQ==
dependencies:
"@langchain/langgraph-checkpoint" "^1.0.0"
"@langchain/langgraph-sdk" "~1.0.0"
uuid "^10.0.0"
"@langchain/ollama@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@langchain/ollama/-/ollama-1.0.1.tgz#c63ac6db65110beef4020a5e2b167ad0bc678d33"
integrity sha512-Pe32hhTpMvnRlNFJxkdu6r1QzsONGz5uvoLiMU1TpgAUu7EyKr2osymlgjBLqDe2vMKUmqHb+yWRH0IppDBUOg==
dependencies:
ollama "^0.5.12"
uuid "^10.0.0"
"@langchain/openai@1.0.0", "@langchain/openai@^1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-1.0.0.tgz#03b15312286b30ce0149f6052620c6c95b4387bc"
integrity sha512-olKEUIjb3HBOiD/NR056iGJz4wiN6HhQ/u65YmGWYadWWoKOcGwheBw/FE0x6SH4zDlI3QmP+vMhuQoaww19BQ==
"@langchain/openai@1.1.1", "@langchain/openai@^1.1.1":
version "1.1.1"
resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-1.1.1.tgz#67ddcf54ee7ac402f6b75b4b9e25447e78c56a93"
integrity sha512-0kUaXejo/sn6QAohWHDaAUapC4CJRkJIajGaWfJC+llSqpDBnmBE1oHg1M2fi1OCeP+ns9SxB6BTsq4Qbiqmig==
dependencies:
js-tiktoken "^1.0.12"
openai "^6.3.0"
zod "^3.25.76 || ^4"
"@langchain/openai@1.0.0-alpha.3":
version "1.0.0-alpha.3"
resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-1.0.0-alpha.3.tgz#35c4e770e3421b75a226087af54fbeff147e201a"
integrity sha512-re2NXLYeLatPzoB6YRoFgB1fW6i5ygcLGa7PlNOhi3f93uU1vSlWMgjkO9dcN9ALmr/bhoruqJEn7U0Eva+6/w==
dependencies:
js-tiktoken "^1.0.12"
openai "^6.3.0"
openai "^6.9.0"
zod "^3.25.76 || ^4"
"@langchain/textsplitters@1.0.0", "@langchain/textsplitters@^1.0.0":
@@ -2607,11 +2607,6 @@ expand-template@^2.0.3:
resolved "https://registry.yarnpkg.com/expand-template/-/expand-template-2.0.3.tgz#6e14b3fcee0f3a6340ecb57d2e8918692052a47c"
integrity sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==
expr-eval@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/expr-eval/-/expr-eval-2.0.2.tgz#fa6f044a7b0c93fde830954eb9c5b0f7fbc7e201"
integrity sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg==
fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3:
version "3.1.3"
resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525"
@@ -3514,17 +3509,16 @@ kuler@^2.0.0:
resolved "https://registry.yarnpkg.com/kuler/-/kuler-2.0.0.tgz#e2c570a3800388fb44407e851531c1d670b061b3"
integrity sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==
langchain@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/langchain/-/langchain-1.0.1.tgz#fb181176f4aa443ef02e9e5b563bcb4e170dfeb6"
integrity sha512-IT4JBVbKBh2AjaUFT9OsmOfeK3UbKy3SgdzZOuvet25sAaMpAR8IaM9XVddRs+OXQqVg6sOS01KUUVCJksVhHg==
langchain@^1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/langchain/-/langchain-1.0.4.tgz#c4fa22d927f41d56c356ecfccea5c08ae7b682ef"
integrity sha512-g7z2kKvnXOecybbVGHfI2ZmdmP309mxC1FYlq6WC/7RsKgX5MwY9gBjwK16mpKOaozOD9QCo1Ia7o2UcUBRb9Q==
dependencies:
"@langchain/langgraph" "^1.0.0"
"@langchain/langgraph-checkpoint" "^1.0.0"
langsmith "~0.3.74"
uuid "^10.0.0"
zod "^3.25.76 || ^4"
optionalDependencies:
langsmith "^0.3.64"
langsmith@^0.3.64:
version "0.3.74"
@@ -3539,6 +3533,19 @@ langsmith@^0.3.64:
semver "^7.6.3"
uuid "^10.0.0"
langsmith@~0.3.74:
version "0.3.79"
resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.3.79.tgz#6c845644da26e7fdd8e9b80706091669fc43bda4"
integrity sha512-j5uiAsyy90zxlxaMuGjb7EdcL51Yx61SpKfDOI1nMPBbemGju+lf47he4e59Hp5K63CY8XWgFP42WeZ+zuIU4Q==
dependencies:
"@types/uuid" "^10.0.0"
chalk "^4.1.2"
console-table-printer "^2.12.1"
p-queue "^6.6.2"
p-retry "4"
semver "^7.6.3"
uuid "^10.0.0"
language-subtag-registry@^0.3.20:
version "0.3.22"
resolved "https://registry.yarnpkg.com/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz#2e1500861b2e457eba7e7ae86877cbd08fa1fd1d"
@@ -3686,6 +3693,11 @@ matcher@^3.0.0:
dependencies:
escape-string-regexp "^4.0.0"
math-expression-evaluator@^2.0.0:
version "2.0.7"
resolved "https://registry.yarnpkg.com/math-expression-evaluator/-/math-expression-evaluator-2.0.7.tgz#dc99a80ce2bf7f9b7df878126feb5c506c1fdf5f"
integrity sha512-uwliJZ6BPHRq4eiqNWxZBDzKUiS5RIynFFcgchqhBOloVLVBpZpNG8jRYkedLcBvhph8TnRyWEuxPqiQcwIdog==
math-intrinsics@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9"
@@ -3980,6 +3992,13 @@ ollama@^0.5.12:
dependencies:
whatwg-fetch "^3.6.20"
ollama@^0.6.3:
version "0.6.3"
resolved "https://registry.yarnpkg.com/ollama/-/ollama-0.6.3.tgz#b188573dd0ccb3b4759c1f8fa85067cb17f6673c"
integrity sha512-KEWEhIqE5wtfzEIZbDCLH51VFZ6Z3ZSa6sIOg/E/tBV8S51flyqBOXi+bRxlOYKDf8i327zG9eSTb8IJxvm3Zg==
dependencies:
whatwg-fetch "^3.6.20"
once@^1.3.0, once@^1.3.1, once@^1.4.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
@@ -4025,10 +4044,10 @@ onnxruntime-web@1.22.0-dev.20250409-89f8206ba4:
platform "^1.3.6"
protobufjs "^7.2.4"
openai@^6.3.0:
version "6.5.0"
resolved "https://registry.yarnpkg.com/openai/-/openai-6.5.0.tgz#7dd9c4c0ca6e394c1d1e738b2000e084024685b2"
integrity sha512-bNqJ15Ijbs41KuJ2iYz/mGAruFHzQQt7zXo4EvjNLoB64aJdgn1jlMeDTsXjEg+idVYafg57QB/5Rd16oqvZ6A==
openai@^6.9.0:
version "6.9.0"
resolved "https://registry.yarnpkg.com/openai/-/openai-6.9.0.tgz#acd15b2233c42b165981f3de8f4cfce27f844fce"
integrity sha512-n2sJRYmM+xfJ0l3OfH8eNnIyv3nQY7L08gZQu3dw6wSdfPtKAk92L83M2NIP5SS8Cl/bsBBG3yKzEOjkx0O+7A==
openapi-types@^12.1.3:
version "12.1.3"
@@ -4114,6 +4133,11 @@ parseley@^0.12.0:
leac "^0.6.0"
peberminta "^0.9.0"
partial-json@^0.1.7:
version "0.1.7"
resolved "https://registry.yarnpkg.com/partial-json/-/partial-json-0.1.7.tgz#b735a89edb3e25f231a3c4caeaae71dc9f578605"
integrity sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA==
path-exists@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3"
@@ -4506,6 +4530,11 @@ reusify@^1.0.4:
resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76"
integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==
rfc6902@^5.1.2:
version "5.1.2"
resolved "https://registry.yarnpkg.com/rfc6902/-/rfc6902-5.1.2.tgz#774262ba7b032ab9abf9eb8e0312927e8f425062"
integrity sha512-zxcb+PWlE8PwX0tiKE6zP97THQ8/lHmeiwucRrJ3YFupWEmp25RmFSlB1dNTqjkovwqG4iq+u1gzJMBS3um8mA==
rgbcolor@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/rgbcolor/-/rgbcolor-1.0.1.tgz#d6505ecdb304a6595da26fa4b43307306775945d"
@@ -5491,12 +5520,7 @@ yocto-queue@^0.1.0:
resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b"
integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==
zod@^3.22.4:
version "3.22.4"
resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.4.tgz#f31c3a9386f61b1f228af56faa9255e845cf3fff"
integrity sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==
"zod@^3.25.76 || ^4":
"zod@^3.25.76 || ^4", zod@^4.1.12:
version "4.1.12"
resolved "https://registry.yarnpkg.com/zod/-/zod-4.1.12.tgz#64f1ea53d00eab91853195653b5af9eee68970f0"
integrity sha512-JInaHOamG8pt5+Ey8kGmdcAcg3OL9reK8ltczgHTAwNhMys/6ThXHityHxVV2p3fkw/c+MAvBHFVYHFZDmjMCQ==