mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-11-22 21:18:15 +00:00
Compare commits
10 Commits
master
...
3d5d04eda0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3d5d04eda0 | ||
|
|
07a17925b1 | ||
|
|
3bcf646af1 | ||
|
|
e499c0b96e | ||
|
|
33b736e1e8 | ||
|
|
5e1746f646 | ||
|
|
41fe009847 | ||
|
|
1a8889c71c | ||
|
|
2e736613c5 | ||
|
|
046daf442a |
Binary file not shown.
|
Before Width: | Height: | Size: 6.5 KiB |
21
README.md
21
README.md
@@ -49,29 +49,10 @@ Perplexica's development is powered by the generous support of our sponsors. The
|
|||||||
<img alt="Warp Terminal" src=".assets/sponsers/warp.png" width="100%">
|
<img alt="Warp Terminal" src=".assets/sponsers/warp.png" width="100%">
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
### **✨ [Try Warp - The AI-Powered Terminal →](https://www.warp.dev/perplexica)**
|
**[Warp](https://www.warp.dev/perplexica)** - The AI-powered terminal revolutionizing development workflows
|
||||||
|
|
||||||
Warp is revolutionizing development workflows with AI-powered features, modern UX, and blazing-fast performance. Used by developers at top companies worldwide.
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
We'd also like to thank the following partners for their generous support:
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<td width="100" align="center">
|
|
||||||
<a href="https://dashboard.exa.ai" target="_blank">
|
|
||||||
<img src=".assets/sponsers/exa.png" alt="Exa" width="80" height="80" style="border-radius: .75rem;" />
|
|
||||||
</a>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
<a href="https://dashboard.exa.ai">Exa</a> • The Perfect Web Search API for LLMs - web search, crawling, deep research, and answer APIs
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. Using Docker is highly recommended.
|
There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. Using Docker is highly recommended.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import handleImageSearch from '@/lib/chains/imageSearchAgent';
|
import searchImages from '@/lib/agents/media/image';
|
||||||
import ModelRegistry from '@/lib/models/registry';
|
import ModelRegistry from '@/lib/models/registry';
|
||||||
import { ModelWithProvider } from '@/lib/models/types';
|
import { ModelWithProvider } from '@/lib/models/types';
|
||||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||||
@@ -13,6 +13,13 @@ export const POST = async (req: Request) => {
|
|||||||
try {
|
try {
|
||||||
const body: ImageSearchBody = await req.json();
|
const body: ImageSearchBody = await req.json();
|
||||||
|
|
||||||
|
const registry = new ModelRegistry();
|
||||||
|
|
||||||
|
const llm = await registry.loadChatModel(
|
||||||
|
body.chatModel.providerId,
|
||||||
|
body.chatModel.key,
|
||||||
|
);
|
||||||
|
|
||||||
const chatHistory = body.chatHistory
|
const chatHistory = body.chatHistory
|
||||||
.map((msg: any) => {
|
.map((msg: any) => {
|
||||||
if (msg.role === 'user') {
|
if (msg.role === 'user') {
|
||||||
@@ -23,16 +30,9 @@ export const POST = async (req: Request) => {
|
|||||||
})
|
})
|
||||||
.filter((msg) => msg !== undefined) as BaseMessage[];
|
.filter((msg) => msg !== undefined) as BaseMessage[];
|
||||||
|
|
||||||
const registry = new ModelRegistry();
|
const images = await searchImages(
|
||||||
|
|
||||||
const llm = await registry.loadChatModel(
|
|
||||||
body.chatModel.providerId,
|
|
||||||
body.chatModel.key,
|
|
||||||
);
|
|
||||||
|
|
||||||
const images = await handleImageSearch(
|
|
||||||
{
|
{
|
||||||
chat_history: chatHistory,
|
chatHistory: chatHistory,
|
||||||
query: body.query,
|
query: body.query,
|
||||||
},
|
},
|
||||||
llm,
|
llm,
|
||||||
|
|||||||
@@ -30,12 +30,6 @@ export const POST = async (req: Request) => {
|
|||||||
body.optimizationMode = body.optimizationMode || 'balanced';
|
body.optimizationMode = body.optimizationMode || 'balanced';
|
||||||
body.stream = body.stream || false;
|
body.stream = body.stream || false;
|
||||||
|
|
||||||
const history: BaseMessage[] = body.history.map((msg) => {
|
|
||||||
return msg[0] === 'human'
|
|
||||||
? new HumanMessage({ content: msg[1] })
|
|
||||||
: new AIMessage({ content: msg[1] });
|
|
||||||
});
|
|
||||||
|
|
||||||
const registry = new ModelRegistry();
|
const registry = new ModelRegistry();
|
||||||
|
|
||||||
const [llm, embeddings] = await Promise.all([
|
const [llm, embeddings] = await Promise.all([
|
||||||
@@ -46,6 +40,12 @@ export const POST = async (req: Request) => {
|
|||||||
),
|
),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
|
const history: BaseMessage[] = body.history.map((msg) => {
|
||||||
|
return msg[0] === 'human'
|
||||||
|
? new HumanMessage({ content: msg[1] })
|
||||||
|
: new AIMessage({ content: msg[1] });
|
||||||
|
});
|
||||||
|
|
||||||
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
|
const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode];
|
||||||
|
|
||||||
if (!searchHandler) {
|
if (!searchHandler) {
|
||||||
@@ -128,7 +128,7 @@ export const POST = async (req: Request) => {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
controller.close();
|
controller.close();
|
||||||
} catch (error) {}
|
} catch (error) { }
|
||||||
});
|
});
|
||||||
|
|
||||||
emitter.on('data', (data: string) => {
|
emitter.on('data', (data: string) => {
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
|
import generateSuggestions from '@/lib/agents/suggestions';
|
||||||
import ModelRegistry from '@/lib/models/registry';
|
import ModelRegistry from '@/lib/models/registry';
|
||||||
import { ModelWithProvider } from '@/lib/models/types';
|
import { ModelWithProvider } from '@/lib/models/types';
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
||||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||||
|
|
||||||
interface SuggestionsGenerationBody {
|
interface SuggestionsGenerationBody {
|
||||||
@@ -13,6 +12,13 @@ export const POST = async (req: Request) => {
|
|||||||
try {
|
try {
|
||||||
const body: SuggestionsGenerationBody = await req.json();
|
const body: SuggestionsGenerationBody = await req.json();
|
||||||
|
|
||||||
|
const registry = new ModelRegistry();
|
||||||
|
|
||||||
|
const llm = await registry.loadChatModel(
|
||||||
|
body.chatModel.providerId,
|
||||||
|
body.chatModel.key,
|
||||||
|
);
|
||||||
|
|
||||||
const chatHistory = body.chatHistory
|
const chatHistory = body.chatHistory
|
||||||
.map((msg: any) => {
|
.map((msg: any) => {
|
||||||
if (msg.role === 'user') {
|
if (msg.role === 'user') {
|
||||||
@@ -23,16 +29,9 @@ export const POST = async (req: Request) => {
|
|||||||
})
|
})
|
||||||
.filter((msg) => msg !== undefined) as BaseMessage[];
|
.filter((msg) => msg !== undefined) as BaseMessage[];
|
||||||
|
|
||||||
const registry = new ModelRegistry();
|
|
||||||
|
|
||||||
const llm = await registry.loadChatModel(
|
|
||||||
body.chatModel.providerId,
|
|
||||||
body.chatModel.key,
|
|
||||||
);
|
|
||||||
|
|
||||||
const suggestions = await generateSuggestions(
|
const suggestions = await generateSuggestions(
|
||||||
{
|
{
|
||||||
chat_history: chatHistory,
|
chatHistory,
|
||||||
},
|
},
|
||||||
llm,
|
llm,
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import handleVideoSearch from '@/lib/chains/videoSearchAgent';
|
import handleVideoSearch from '@/lib/agents/media/video';
|
||||||
import ModelRegistry from '@/lib/models/registry';
|
import ModelRegistry from '@/lib/models/registry';
|
||||||
import { ModelWithProvider } from '@/lib/models/types';
|
import { ModelWithProvider } from '@/lib/models/types';
|
||||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
||||||
@@ -13,6 +13,13 @@ export const POST = async (req: Request) => {
|
|||||||
try {
|
try {
|
||||||
const body: VideoSearchBody = await req.json();
|
const body: VideoSearchBody = await req.json();
|
||||||
|
|
||||||
|
const registry = new ModelRegistry();
|
||||||
|
|
||||||
|
const llm = await registry.loadChatModel(
|
||||||
|
body.chatModel.providerId,
|
||||||
|
body.chatModel.key,
|
||||||
|
);
|
||||||
|
|
||||||
const chatHistory = body.chatHistory
|
const chatHistory = body.chatHistory
|
||||||
.map((msg: any) => {
|
.map((msg: any) => {
|
||||||
if (msg.role === 'user') {
|
if (msg.role === 'user') {
|
||||||
@@ -23,16 +30,9 @@ export const POST = async (req: Request) => {
|
|||||||
})
|
})
|
||||||
.filter((msg) => msg !== undefined) as BaseMessage[];
|
.filter((msg) => msg !== undefined) as BaseMessage[];
|
||||||
|
|
||||||
const registry = new ModelRegistry();
|
|
||||||
|
|
||||||
const llm = await registry.loadChatModel(
|
|
||||||
body.chatModel.providerId,
|
|
||||||
body.chatModel.key,
|
|
||||||
);
|
|
||||||
|
|
||||||
const videos = await handleVideoSearch(
|
const videos = await handleVideoSearch(
|
||||||
{
|
{
|
||||||
chat_history: chatHistory,
|
chatHistory: chatHistory,
|
||||||
query: body.query,
|
query: body.query,
|
||||||
},
|
},
|
||||||
llm,
|
llm,
|
||||||
|
|||||||
@@ -205,11 +205,11 @@ const MessageBox = ({
|
|||||||
<div className="lg:sticky lg:top-20 flex flex-col items-center space-y-3 w-full lg:w-3/12 z-30 h-full pb-4">
|
<div className="lg:sticky lg:top-20 flex flex-col items-center space-y-3 w-full lg:w-3/12 z-30 h-full pb-4">
|
||||||
<SearchImages
|
<SearchImages
|
||||||
query={section.userMessage.content}
|
query={section.userMessage.content}
|
||||||
chatHistory={chatTurns.slice(0, sectionIndex * 2)}
|
chatHistory={chatTurns}
|
||||||
messageId={section.assistantMessage.messageId}
|
messageId={section.assistantMessage.messageId}
|
||||||
/>
|
/>
|
||||||
<SearchVideos
|
<SearchVideos
|
||||||
chatHistory={chatTurns.slice(0, sectionIndex * 2)}
|
chatHistory={chatTurns}
|
||||||
query={section.userMessage.content}
|
query={section.userMessage.content}
|
||||||
messageId={section.assistantMessage.messageId}
|
messageId={section.assistantMessage.messageId}
|
||||||
/>
|
/>
|
||||||
|
|||||||
65
src/lib/agents/media/image.ts
Normal file
65
src/lib/agents/media/image.ts
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
/* I don't think can be classified as agents but to keep the structure consistent i guess ill keep it here */
|
||||||
|
|
||||||
|
import {
|
||||||
|
RunnableSequence,
|
||||||
|
RunnableMap,
|
||||||
|
RunnableLambda,
|
||||||
|
} from '@langchain/core/runnables';
|
||||||
|
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
||||||
|
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
|
||||||
|
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||||
|
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||||
|
import { searchSearxng } from '@/lib/searxng';
|
||||||
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import LineOutputParser from '@/lib/outputParsers/lineOutputParser';
|
||||||
|
import { imageSearchFewShots, imageSearchPrompt } from '@/lib/prompts/media/image';
|
||||||
|
|
||||||
|
type ImageSearchChainInput = {
|
||||||
|
chatHistory: BaseMessage[];
|
||||||
|
query: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
type ImageSearchResult = {
|
||||||
|
img_src: string;
|
||||||
|
url: string;
|
||||||
|
title: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const outputParser = new LineOutputParser({
|
||||||
|
key: 'query',
|
||||||
|
})
|
||||||
|
|
||||||
|
const searchImages = async (
|
||||||
|
input: ImageSearchChainInput,
|
||||||
|
llm: BaseChatModel,
|
||||||
|
) => {
|
||||||
|
const chatPrompt = await ChatPromptTemplate.fromMessages([
|
||||||
|
new SystemMessage(imageSearchPrompt),
|
||||||
|
...imageSearchFewShots,
|
||||||
|
new HumanMessage(`<conversation>\n${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<follow_up>\n${input.query}\n</follow_up>`)
|
||||||
|
]).formatMessages({})
|
||||||
|
|
||||||
|
const res = await llm.invoke(chatPrompt)
|
||||||
|
|
||||||
|
const query = await outputParser.invoke(res)
|
||||||
|
|
||||||
|
const searchRes = await searchSearxng(query!, {
|
||||||
|
engines: ['bing images', 'google images'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const images: ImageSearchResult[] = [];
|
||||||
|
|
||||||
|
searchRes.results.forEach((result) => {
|
||||||
|
if (result.img_src && result.url && result.title) {
|
||||||
|
images.push({
|
||||||
|
img_src: result.img_src,
|
||||||
|
url: result.url,
|
||||||
|
title: result.title,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return images.slice(0, 10);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default searchImages;
|
||||||
65
src/lib/agents/media/video.ts
Normal file
65
src/lib/agents/media/video.ts
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
||||||
|
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
|
||||||
|
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||||
|
import { searchSearxng } from '@/lib/searxng';
|
||||||
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import LineOutputParser from '@/lib/outputParsers/lineOutputParser';
|
||||||
|
import { videoSearchFewShots, videoSearchPrompt } from '@/lib/prompts/media/videos';
|
||||||
|
|
||||||
|
type VideoSearchChainInput = {
|
||||||
|
chatHistory: BaseMessage[];
|
||||||
|
query: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
type VideoSearchResult = {
|
||||||
|
img_src: string;
|
||||||
|
url: string;
|
||||||
|
title: string;
|
||||||
|
iframe_src: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const outputParser = new LineOutputParser({
|
||||||
|
key: 'query',
|
||||||
|
});
|
||||||
|
|
||||||
|
const searchVideos = async (
|
||||||
|
input: VideoSearchChainInput,
|
||||||
|
llm: BaseChatModel,
|
||||||
|
) => {
|
||||||
|
const chatPrompt = await ChatPromptTemplate.fromMessages([
|
||||||
|
new SystemMessage(videoSearchPrompt),
|
||||||
|
...videoSearchFewShots,
|
||||||
|
new HumanMessage(`<conversation>${formatChatHistoryAsString(input.chatHistory)}\n</conversation>\n<follow_up>\n${input.query}\n</follow_up>`)
|
||||||
|
]).formatMessages({})
|
||||||
|
|
||||||
|
const res = await llm.invoke(chatPrompt)
|
||||||
|
|
||||||
|
const query = await outputParser.invoke(res)
|
||||||
|
|
||||||
|
const searchRes = await searchSearxng(query!, {
|
||||||
|
engines: ['youtube'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const videos: VideoSearchResult[] = [];
|
||||||
|
|
||||||
|
searchRes.results.forEach((result) => {
|
||||||
|
if (
|
||||||
|
result.thumbnail &&
|
||||||
|
result.url &&
|
||||||
|
result.title &&
|
||||||
|
result.iframe_src
|
||||||
|
) {
|
||||||
|
videos.push({
|
||||||
|
img_src: result.thumbnail,
|
||||||
|
url: result.url,
|
||||||
|
title: result.title,
|
||||||
|
iframe_src: result.iframe_src,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return videos.slice(0, 10);
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
export default searchVideos;
|
||||||
32
src/lib/agents/suggestions/index.ts
Normal file
32
src/lib/agents/suggestions/index.ts
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
import ListLineOutputParser from '@/lib/outputParsers/listLineOutputParser';
|
||||||
|
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
||||||
|
import formatChatHistoryAsString from '@/lib/utils/formatHistory';
|
||||||
|
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||||
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import { suggestionGeneratorPrompt } from '@/lib/prompts/suggestions';
|
||||||
|
|
||||||
|
type SuggestionGeneratorInput = {
|
||||||
|
chatHistory: BaseMessage[];
|
||||||
|
};
|
||||||
|
|
||||||
|
const outputParser = new ListLineOutputParser({
|
||||||
|
key: 'suggestions',
|
||||||
|
});
|
||||||
|
|
||||||
|
const generateSuggestions = async (
|
||||||
|
input: SuggestionGeneratorInput,
|
||||||
|
llm: BaseChatModel,
|
||||||
|
) => {
|
||||||
|
const chatPrompt = await ChatPromptTemplate.fromMessages([
|
||||||
|
new SystemMessage(suggestionGeneratorPrompt),
|
||||||
|
new HumanMessage(`<conversation>${formatChatHistoryAsString(input.chatHistory)}</conversation>`)
|
||||||
|
]).formatMessages({})
|
||||||
|
|
||||||
|
const res = await llm.invoke(chatPrompt)
|
||||||
|
|
||||||
|
const suggestions = await outputParser.invoke(res)
|
||||||
|
|
||||||
|
return suggestions
|
||||||
|
};
|
||||||
|
|
||||||
|
export default generateSuggestions;
|
||||||
@@ -1,105 +0,0 @@
|
|||||||
import {
|
|
||||||
RunnableSequence,
|
|
||||||
RunnableMap,
|
|
||||||
RunnableLambda,
|
|
||||||
} from '@langchain/core/runnables';
|
|
||||||
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
|
||||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
|
||||||
import { BaseMessage } from '@langchain/core/messages';
|
|
||||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
|
||||||
import { searchSearxng } from '../searxng';
|
|
||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
||||||
import LineOutputParser from '../outputParsers/lineOutputParser';
|
|
||||||
|
|
||||||
const imageSearchChainPrompt = `
|
|
||||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
|
|
||||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
|
||||||
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
|
|
||||||
`;
|
|
||||||
|
|
||||||
type ImageSearchChainInput = {
|
|
||||||
chat_history: BaseMessage[];
|
|
||||||
query: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
interface ImageSearchResult {
|
|
||||||
img_src: string;
|
|
||||||
url: string;
|
|
||||||
title: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
const strParser = new StringOutputParser();
|
|
||||||
|
|
||||||
const createImageSearchChain = (llm: BaseChatModel) => {
|
|
||||||
return RunnableSequence.from([
|
|
||||||
RunnableMap.from({
|
|
||||||
chat_history: (input: ImageSearchChainInput) => {
|
|
||||||
return formatChatHistoryAsString(input.chat_history);
|
|
||||||
},
|
|
||||||
query: (input: ImageSearchChainInput) => {
|
|
||||||
return input.query;
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
ChatPromptTemplate.fromMessages([
|
|
||||||
['system', imageSearchChainPrompt],
|
|
||||||
[
|
|
||||||
'user',
|
|
||||||
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
|
|
||||||
],
|
|
||||||
['assistant', '<query>A cat</query>'],
|
|
||||||
|
|
||||||
[
|
|
||||||
'user',
|
|
||||||
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
|
|
||||||
],
|
|
||||||
['assistant', '<query>Car working</query>'],
|
|
||||||
[
|
|
||||||
'user',
|
|
||||||
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
|
|
||||||
],
|
|
||||||
['assistant', '<query>AC working</query>'],
|
|
||||||
[
|
|
||||||
'user',
|
|
||||||
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
|
|
||||||
],
|
|
||||||
]),
|
|
||||||
llm,
|
|
||||||
strParser,
|
|
||||||
RunnableLambda.from(async (input: string) => {
|
|
||||||
const queryParser = new LineOutputParser({
|
|
||||||
key: 'query',
|
|
||||||
});
|
|
||||||
|
|
||||||
return await queryParser.parse(input);
|
|
||||||
}),
|
|
||||||
RunnableLambda.from(async (input: string) => {
|
|
||||||
const res = await searchSearxng(input, {
|
|
||||||
engines: ['bing images', 'google images'],
|
|
||||||
});
|
|
||||||
|
|
||||||
const images: ImageSearchResult[] = [];
|
|
||||||
|
|
||||||
res.results.forEach((result) => {
|
|
||||||
if (result.img_src && result.url && result.title) {
|
|
||||||
images.push({
|
|
||||||
img_src: result.img_src,
|
|
||||||
url: result.url,
|
|
||||||
title: result.title,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return images.slice(0, 10);
|
|
||||||
}),
|
|
||||||
]);
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleImageSearch = (
|
|
||||||
input: ImageSearchChainInput,
|
|
||||||
llm: BaseChatModel,
|
|
||||||
) => {
|
|
||||||
const imageSearchChain = createImageSearchChain(llm);
|
|
||||||
return imageSearchChain.invoke(input);
|
|
||||||
};
|
|
||||||
|
|
||||||
export default handleImageSearch;
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
|
|
||||||
import ListLineOutputParser from '../outputParsers/listLineOutputParser';
|
|
||||||
import { PromptTemplate } from '@langchain/core/prompts';
|
|
||||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
|
||||||
import { BaseMessage } from '@langchain/core/messages';
|
|
||||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
||||||
import { ChatOpenAI } from '@langchain/openai';
|
|
||||||
|
|
||||||
const suggestionGeneratorPrompt = `
|
|
||||||
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
|
|
||||||
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
|
|
||||||
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
|
|
||||||
|
|
||||||
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:
|
|
||||||
|
|
||||||
<suggestions>
|
|
||||||
Tell me more about SpaceX and their recent projects
|
|
||||||
What is the latest news on SpaceX?
|
|
||||||
Who is the CEO of SpaceX?
|
|
||||||
</suggestions>
|
|
||||||
|
|
||||||
Conversation:
|
|
||||||
{chat_history}
|
|
||||||
`;
|
|
||||||
|
|
||||||
type SuggestionGeneratorInput = {
|
|
||||||
chat_history: BaseMessage[];
|
|
||||||
};
|
|
||||||
|
|
||||||
const outputParser = new ListLineOutputParser({
|
|
||||||
key: 'suggestions',
|
|
||||||
});
|
|
||||||
|
|
||||||
const createSuggestionGeneratorChain = (llm: BaseChatModel) => {
|
|
||||||
return RunnableSequence.from([
|
|
||||||
RunnableMap.from({
|
|
||||||
chat_history: (input: SuggestionGeneratorInput) =>
|
|
||||||
formatChatHistoryAsString(input.chat_history),
|
|
||||||
}),
|
|
||||||
PromptTemplate.fromTemplate(suggestionGeneratorPrompt),
|
|
||||||
llm,
|
|
||||||
outputParser,
|
|
||||||
]);
|
|
||||||
};
|
|
||||||
|
|
||||||
const generateSuggestions = (
|
|
||||||
input: SuggestionGeneratorInput,
|
|
||||||
llm: BaseChatModel,
|
|
||||||
) => {
|
|
||||||
(llm as unknown as ChatOpenAI).temperature = 0;
|
|
||||||
const suggestionGeneratorChain = createSuggestionGeneratorChain(llm);
|
|
||||||
return suggestionGeneratorChain.invoke(input);
|
|
||||||
};
|
|
||||||
|
|
||||||
export default generateSuggestions;
|
|
||||||
@@ -1,110 +0,0 @@
|
|||||||
import {
|
|
||||||
RunnableSequence,
|
|
||||||
RunnableMap,
|
|
||||||
RunnableLambda,
|
|
||||||
} from '@langchain/core/runnables';
|
|
||||||
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
|
||||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
|
||||||
import { BaseMessage } from '@langchain/core/messages';
|
|
||||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
|
||||||
import { searchSearxng } from '../searxng';
|
|
||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
||||||
import LineOutputParser from '../outputParsers/lineOutputParser';
|
|
||||||
|
|
||||||
const videoSearchChainPrompt = `
|
|
||||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
|
|
||||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
|
||||||
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
|
|
||||||
`;
|
|
||||||
|
|
||||||
type VideoSearchChainInput = {
|
|
||||||
chat_history: BaseMessage[];
|
|
||||||
query: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
interface VideoSearchResult {
|
|
||||||
img_src: string;
|
|
||||||
url: string;
|
|
||||||
title: string;
|
|
||||||
iframe_src: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
const strParser = new StringOutputParser();
|
|
||||||
|
|
||||||
const createVideoSearchChain = (llm: BaseChatModel) => {
|
|
||||||
return RunnableSequence.from([
|
|
||||||
RunnableMap.from({
|
|
||||||
chat_history: (input: VideoSearchChainInput) => {
|
|
||||||
return formatChatHistoryAsString(input.chat_history);
|
|
||||||
},
|
|
||||||
query: (input: VideoSearchChainInput) => {
|
|
||||||
return input.query;
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
ChatPromptTemplate.fromMessages([
|
|
||||||
['system', videoSearchChainPrompt],
|
|
||||||
[
|
|
||||||
'user',
|
|
||||||
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
|
|
||||||
],
|
|
||||||
['assistant', '<query>How does a car work?</query>'],
|
|
||||||
[
|
|
||||||
'user',
|
|
||||||
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
|
|
||||||
],
|
|
||||||
['assistant', '<query>Theory of relativity</query>'],
|
|
||||||
[
|
|
||||||
'user',
|
|
||||||
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
|
|
||||||
],
|
|
||||||
['assistant', '<query>AC working</query>'],
|
|
||||||
[
|
|
||||||
'user',
|
|
||||||
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
|
|
||||||
],
|
|
||||||
]),
|
|
||||||
llm,
|
|
||||||
strParser,
|
|
||||||
RunnableLambda.from(async (input: string) => {
|
|
||||||
const queryParser = new LineOutputParser({
|
|
||||||
key: 'query',
|
|
||||||
});
|
|
||||||
return await queryParser.parse(input);
|
|
||||||
}),
|
|
||||||
RunnableLambda.from(async (input: string) => {
|
|
||||||
const res = await searchSearxng(input, {
|
|
||||||
engines: ['youtube'],
|
|
||||||
});
|
|
||||||
|
|
||||||
const videos: VideoSearchResult[] = [];
|
|
||||||
|
|
||||||
res.results.forEach((result) => {
|
|
||||||
if (
|
|
||||||
result.thumbnail &&
|
|
||||||
result.url &&
|
|
||||||
result.title &&
|
|
||||||
result.iframe_src
|
|
||||||
) {
|
|
||||||
videos.push({
|
|
||||||
img_src: result.thumbnail,
|
|
||||||
url: result.url,
|
|
||||||
title: result.title,
|
|
||||||
iframe_src: result.iframe_src,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return videos.slice(0, 10);
|
|
||||||
}),
|
|
||||||
]);
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleVideoSearch = (
|
|
||||||
input: VideoSearchChainInput,
|
|
||||||
llm: BaseChatModel,
|
|
||||||
) => {
|
|
||||||
const videoSearchChain = createVideoSearchChain(llm);
|
|
||||||
return videoSearchChain.invoke(input);
|
|
||||||
};
|
|
||||||
|
|
||||||
export default handleVideoSearch;
|
|
||||||
26
src/lib/prompts/media/image.ts
Normal file
26
src/lib/prompts/media/image.ts
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
import { BaseMessageLike } from "@langchain/core/messages";
|
||||||
|
|
||||||
|
export const imageSearchPrompt = `
|
||||||
|
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
|
||||||
|
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||||
|
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const imageSearchFewShots: BaseMessageLike[] = [
|
||||||
|
[
|
||||||
|
'user',
|
||||||
|
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
|
||||||
|
],
|
||||||
|
['assistant', '<query>A cat</query>'],
|
||||||
|
|
||||||
|
[
|
||||||
|
'user',
|
||||||
|
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
|
||||||
|
],
|
||||||
|
['assistant', '<query>Car working</query>'],
|
||||||
|
[
|
||||||
|
'user',
|
||||||
|
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
|
||||||
|
],
|
||||||
|
['assistant', '<query>AC working</query>']
|
||||||
|
]
|
||||||
25
src/lib/prompts/media/videos.ts
Normal file
25
src/lib/prompts/media/videos.ts
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
import { BaseMessageLike } from "@langchain/core/messages";
|
||||||
|
|
||||||
|
export const videoSearchPrompt = `
|
||||||
|
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
|
||||||
|
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||||
|
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const videoSearchFewShots: BaseMessageLike[] = [
|
||||||
|
[
|
||||||
|
'user',
|
||||||
|
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
|
||||||
|
],
|
||||||
|
['assistant', '<query>How does a car work?</query>'],
|
||||||
|
[
|
||||||
|
'user',
|
||||||
|
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
|
||||||
|
],
|
||||||
|
['assistant', '<query>Theory of relativity</query>'],
|
||||||
|
[
|
||||||
|
'user',
|
||||||
|
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
|
||||||
|
],
|
||||||
|
['assistant', '<query>AC working</query>'],
|
||||||
|
]
|
||||||
15
src/lib/prompts/suggestions/index.ts
Normal file
15
src/lib/prompts/suggestions/index.ts
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
export const suggestionGeneratorPrompt = `
|
||||||
|
You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
|
||||||
|
You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
|
||||||
|
Make sure the suggestions are medium in length and are informative and relevant to the conversation.
|
||||||
|
|
||||||
|
Provide these suggestions separated by newlines between the XML tags <suggestions> and </suggestions>. For example:
|
||||||
|
|
||||||
|
<suggestions>
|
||||||
|
Tell me more about SpaceX and their recent projects
|
||||||
|
What is the latest news on SpaceX?
|
||||||
|
Who is the CEO of SpaceX?
|
||||||
|
</suggestions>
|
||||||
|
|
||||||
|
Today's date is ${new Date().toISOString()}
|
||||||
|
`;
|
||||||
Reference in New Issue
Block a user