Compare commits

...

2 Commits

Author SHA1 Message Date
Willie Zutz
c645ffdcde Merge 03b27d9cbb into 68e151b2bd 2025-05-10 21:09:04 +00:00
Willie Zutz
03b27d9cbb fix(UI): Fix results showing in light mode
feat(AI): Enhance system prompt for more reliable and relevant results
fix(Reddit): Reddit focus should work again. Works around SearXNG limitations of broken reddit search by using `site:reddit.com`
2025-05-10 15:09:00 -06:00
8 changed files with 307 additions and 124 deletions

View File

@@ -377,7 +377,7 @@ const MessageTabs = ({
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]', 'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'prose-code:bg-transparent prose-code:p-0 prose-code:text-inherit prose-code:font-normal prose-code:before:content-none prose-code:after:content-none', 'prose-code:bg-transparent prose-code:p-0 prose-code:text-inherit prose-code:font-normal prose-code:before:content-none prose-code:after:content-none',
'prose-pre:bg-transparent prose-pre:border-0 prose-pre:m-0 prose-pre:p-0', 'prose-pre:bg-transparent prose-pre:border-0 prose-pre:m-0 prose-pre:p-0',
'max-w-none break-words px-4 text-white', 'max-w-none break-words px-4 text-black dark:text-white',
)} )}
options={markdownOverrides} options={markdownOverrides}
> >

View File

@@ -25,15 +25,27 @@ const SearchImages = ({
const [loading, setLoading] = useState(true); const [loading, setLoading] = useState(true);
const [open, setOpen] = useState(false); const [open, setOpen] = useState(false);
const [slides, setSlides] = useState<any[]>([]); const [slides, setSlides] = useState<any[]>([]);
const hasLoadedRef = useRef(false); const [displayLimit, setDisplayLimit] = useState(10); // Initially show only 10 images
const loadedMessageIdsRef = useRef<Set<string>>(new Set());
// Function to show more images when the Show More button is clicked
const handleShowMore = () => {
// If we're already showing all images, don't do anything
if (images && displayLimit >= images.length) return;
// Otherwise, increase the display limit by 10, or show all images
setDisplayLimit(prev => images ? Math.min(prev + 10, images.length) : prev);
};
useEffect(() => { useEffect(() => {
// Skip fetching if images are already loaded for this message // Skip fetching if images are already loaded for this message
if (hasLoadedRef.current) { if (loadedMessageIdsRef.current.has(messageId)) {
return; return;
} }
const fetchImages = async () => { const fetchImages = async () => {
// Mark as loaded to prevent refetching
loadedMessageIdsRef.current.add(messageId);
setLoading(true); setLoading(true);
const chatModelProvider = localStorage.getItem('chatModelProvider'); const chatModelProvider = localStorage.getItem('chatModelProvider');
@@ -80,8 +92,7 @@ const SearchImages = ({
if (onImagesLoaded && images.length > 0) { if (onImagesLoaded && images.length > 0) {
onImagesLoaded(images.length); onImagesLoaded(images.length);
} }
// Mark as loaded to prevent refetching
hasLoadedRef.current = true;
} catch (error) { } catch (error) {
console.error('Error fetching images:', error); console.error('Error fetching images:', error);
} finally { } finally {
@@ -91,11 +102,7 @@ const SearchImages = ({
fetchImages(); fetchImages();
// Reset the loading state when component unmounts }, [query, messageId, chatHistory, onImagesLoaded]);
return () => {
hasLoadedRef.current = false;
};
}, [query, messageId]);
return ( return (
<> <>
@@ -111,8 +118,8 @@ const SearchImages = ({
)} )}
{images !== null && images.length > 0 && ( {images !== null && images.length > 0 && (
<> <>
<div className="grid grid-cols-2 gap-2"> <div className="grid grid-cols-2 gap-2" key={`image-results-${messageId}`}>
{images.map((image, i) => ( {images.slice(0, displayLimit).map((image, i) => (
<img <img
onClick={() => { onClick={() => {
setOpen(true); setOpen(true);
@@ -129,6 +136,17 @@ const SearchImages = ({
/> />
))} ))}
</div> </div>
{images.length > displayLimit && (
<div className="flex justify-center mt-4">
<button
onClick={handleShowMore}
className="px-4 py-2 bg-light-secondary dark:bg-dark-secondary hover:bg-light-200 dark:hover:bg-dark-200 text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white rounded-md transition duration-200 flex items-center space-x-2"
>
<span>Show More Images</span>
<span className="text-sm opacity-75">({displayLimit} of {images.length})</span>
</button>
</div>
)}
<Lightbox open={open} close={() => setOpen(false)} slides={slides} /> <Lightbox open={open} close={() => setOpen(false)} slides={slides} />
</> </>
)} )}

View File

@@ -40,12 +40,22 @@ const Searchvideos = ({
const [open, setOpen] = useState(false); const [open, setOpen] = useState(false);
const [slides, setSlides] = useState<VideoSlide[]>([]); const [slides, setSlides] = useState<VideoSlide[]>([]);
const [currentIndex, setCurrentIndex] = useState(0); const [currentIndex, setCurrentIndex] = useState(0);
const [displayLimit, setDisplayLimit] = useState(10); // Initially show only 10 videos
const videoRefs = useRef<(HTMLIFrameElement | null)[]>([]); const videoRefs = useRef<(HTMLIFrameElement | null)[]>([]);
const hasLoadedRef = useRef(false); const loadedMessageIdsRef = useRef<Set<string>>(new Set());
// Function to show more videos when the Show More button is clicked
const handleShowMore = () => {
// If we're already showing all videos, don't do anything
if (videos && displayLimit >= videos.length) return;
// Otherwise, increase the display limit by 10, or show all videos
setDisplayLimit(prev => videos ? Math.min(prev + 10, videos.length) : prev);
};
useEffect(() => { useEffect(() => {
// Skip fetching if videos are already loaded for this message // Skip fetching if videos are already loaded for this message
if (hasLoadedRef.current) { if (loadedMessageIdsRef.current.has(messageId)) {
return; return;
} }
@@ -99,7 +109,7 @@ const Searchvideos = ({
onVideosLoaded(videos.length); onVideosLoaded(videos.length);
} }
// Mark as loaded to prevent refetching // Mark as loaded to prevent refetching
hasLoadedRef.current = true; loadedMessageIdsRef.current.add(messageId);
} catch (error) { } catch (error) {
console.error('Error fetching videos:', error); console.error('Error fetching videos:', error);
} finally { } finally {
@@ -109,11 +119,7 @@ const Searchvideos = ({
fetchVideos(); fetchVideos();
// Reset the loading state when component unmounts }, [query, messageId, chatHistory, onVideosLoaded]);
return () => {
hasLoadedRef.current = false;
};
}, [query, messageId]);
return ( return (
<> <>
@@ -129,8 +135,8 @@ const Searchvideos = ({
)} )}
{videos !== null && videos.length > 0 && ( {videos !== null && videos.length > 0 && (
<> <>
<div className="grid grid-cols-2 gap-2"> <div className="grid grid-cols-2 gap-2" key={`video-results-${messageId}`}>
{videos.map((video, i) => ( {videos.slice(0, displayLimit).map((video, i) => (
<div <div
onClick={() => { onClick={() => {
setOpen(true); setOpen(true);
@@ -155,6 +161,17 @@ const Searchvideos = ({
</div> </div>
))} ))}
</div> </div>
{videos.length > displayLimit && (
<div className="flex justify-center mt-4">
<button
onClick={handleShowMore}
className="px-4 py-2 bg-light-secondary dark:bg-dark-secondary hover:bg-light-200 dark:hover:bg-dark-200 text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white rounded-md transition duration-200 flex items-center space-x-2"
>
<span>Show More Videos</span>
<span className="text-sm opacity-75">({displayLimit} of {videos.length})</span>
</button>
</div>
)}
<Lightbox <Lightbox
open={open} open={open}
close={() => setOpen(false)} close={() => setOpen(false)}

View File

@@ -6,29 +6,73 @@ import {
import { PromptTemplate } from '@langchain/core/prompts'; import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import LineOutputParser from '../outputParsers/lineOutputParser';
import { searchSearxng } from '../searxng'; import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = ` const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images. # Instructions
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation. - You will be given a question from a user and a conversation history
- Rephrase the question based on the conversation so it is a standalone question that can be used to search for images that are relevant to the question
- Ensure the rephrased question agrees with the conversation and is relevant to the conversation
- If you are thinking or reasoning, use <think> tags to indicate your thought process
- If you are thinking or reasoning, do not use <answer> and </answer> tags in your thinking. Those tags should only be used in the final output
- Use the provided date to ensure the rephrased question is relevant to the current date and time if applicable
Example: # Data locations
1. Follow up question: What is a cat? - The history is contained in the <conversation> tag after the <examples> below
Rephrased: A cat - The user question is contained in the <question> tag after the <examples> below
- Output your answer in an <answer> tag
- Current date & time in ISO format (UTC timezone) is: {date}
- Do not include any other text in your answer
2. Follow up question: What is a car? How does it works? <examples>
Rephrased: Car working ## Example 1 input
<conversation>
Who won the last F1 race?\nAyrton Senna won the Monaco Grand Prix. It was a tight race with lots of overtakes. Alain Prost was in the lead for most of the race until the last lap when Senna overtook them.
</conversation>
<question>
What were the highlights of the race?
</question>
3. Follow up question: How does an AC work? ## Example 1 output
Rephrased: AC working <answer>
F1 Monaco Grand Prix highlights
</answer>
Conversation: ## Example 2 input
<conversation>
What is the theory of relativity?
</conversation>
<question>
What is the theory of relativity?
</question>
## Example 2 output
<answer>
Theory of relativity
</answer>
## Example 3 input
<conversation>
I'm looking for a nice vacation spot. Where do you suggest?\nI suggest you go to Hawaii. It's a beautiful place with lots of beaches and activities to do.\nI love the beach! What are some activities I can do there?\nYou can go surfing, snorkeling, or just relax on the beach.
</conversation>
<question>
What are some activities I can do in Hawaii?
</question>
## Example 3 output
<answer>
Hawaii activities
</answer>
</examples>
<conversation>
{chat_history} {chat_history}
</conversation>
Follow up question: {query} <question>
Rephrased question: {query}
</question>
`; `;
type ImageSearchChainInput = { type ImageSearchChainInput = {
@@ -42,7 +86,9 @@ interface ImageSearchResult {
title: string; title: string;
} }
const strParser = new StringOutputParser(); const outputParser = new LineOutputParser({
key: 'answer',
});
const createImageSearchChain = (llm: BaseChatModel) => { const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([ return RunnableSequence.from([
@@ -53,14 +99,13 @@ const createImageSearchChain = (llm: BaseChatModel) => {
query: (input: ImageSearchChainInput) => { query: (input: ImageSearchChainInput) => {
return input.query; return input.query;
}, },
date: () => new Date().toISOString(),
}), }),
PromptTemplate.fromTemplate(imageSearchChainPrompt), PromptTemplate.fromTemplate(imageSearchChainPrompt),
llm, llm,
strParser, outputParser,
RunnableLambda.from(async (input: string) => { RunnableLambda.from(async (searchQuery: string) => {
input = input.replace(/<think>.*?<\/think>/g, ''); const res = await searchSearxng(searchQuery, {
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'], engines: ['bing images', 'google images'],
}); });
@@ -76,7 +121,7 @@ const createImageSearchChain = (llm: BaseChatModel) => {
} }
}); });
return images.slice(0, 10); return images;
}), }),
]); ]);
}; };

View File

@@ -6,29 +6,73 @@ import {
import { PromptTemplate } from '@langchain/core/prompts'; import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import LineOutputParser from '../outputParsers/lineOutputParser';
import { searchSearxng } from '../searxng'; import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = ` const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos. # Instructions
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation. - You will be given a question from a user and a conversation history
- Rephrase the question based on the conversation so it is a standalone question that can be used to search Youtube for videos
- Ensure the rephrased question agrees with the conversation and is relevant to the conversation
- If you are thinking or reasoning, use <think> tags to indicate your thought process
- If you are thinking or reasoning, do not use <answer> and </answer> tags in your thinking. Those tags should only be used in the final output
- Use the provided date to ensure the rephrased question is relevant to the current date and time if applicable
Example: # Data locations
1. Follow up question: How does a car work? - The history is contained in the <conversation> tag after the <examples> below
Rephrased: How does a car work? - The user question is contained in the <question> tag after the <examples> below
- Output your answer in an <answer> tag
- Current date & time in ISO format (UTC timezone) is: {date}
- Do not include any other text in your answer
2. Follow up question: What is the theory of relativity? <examples>
Rephrased: What is theory of relativity ## Example 1 input
<conversation>
Who won the last F1 race?\nAyrton Senna won the Monaco Grand Prix. It was a tight race with lots of overtakes. Alain Prost was in the lead for most of the race until the last lap when Senna overtook them.
</conversation>
<question>
What were the highlights of the race?
</question>
3. Follow up question: How does an AC work? ## Example 1 output
Rephrased: How does an AC work <answer>
F1 Monaco Grand Prix highlights
</answer>
Conversation: ## Example 2 input
<conversation>
What is the theory of relativity?
</conversation>
<question>
What is the theory of relativity?
</question>
## Example 2 output
<answer>
What is the theory of relativity?
</answer>
## Example 3 input
<conversation>
I'm looking for a nice vacation spot. Where do you suggest?\nI suggest you go to Hawaii. It's a beautiful place with lots of beaches and activities to do.\nI love the beach! What are some activities I can do there?\nYou can go surfing, snorkeling, or just relax on the beach.
</conversation>
<question>
What are some activities I can do in Hawaii?
</question>
## Example 3 output
<answer>
Activities to do in Hawaii
</answer>
</examples>
<conversation>
{chat_history} {chat_history}
</conversation>
Follow up question: {query} <question>
Rephrased question: {query}
</question>
`; `;
type VideoSearchChainInput = { type VideoSearchChainInput = {
@@ -43,7 +87,9 @@ interface VideoSearchResult {
iframe_src: string; iframe_src: string;
} }
const strParser = new StringOutputParser(); const answerParser = new LineOutputParser({
key: 'answer',
});
const createVideoSearchChain = (llm: BaseChatModel) => { const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([ return RunnableSequence.from([
@@ -54,14 +100,13 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
query: (input: VideoSearchChainInput) => { query: (input: VideoSearchChainInput) => {
return input.query; return input.query;
}, },
date: () => new Date().toISOString(),
}), }),
PromptTemplate.fromTemplate(VideoSearchChainPrompt), PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm, llm,
strParser, answerParser,
RunnableLambda.from(async (input: string) => { RunnableLambda.from(async (searchQuery: string) => {
input = input.replace(/<think>.*?<\/think>/g, ''); const res = await searchSearxng(searchQuery, {
const res = await searchSearxng(input, {
engines: ['youtube'], engines: ['youtube'],
}); });
@@ -83,7 +128,7 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
} }
}); });
return videos.slice(0, 10); return videos;
}), }),
]); ]);
}; };

View File

@@ -1,80 +1,131 @@
export const webSearchRetrieverPrompt = ` export const webSearchRetrieverPrompt = `
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it. You should condense the question to its essence and remove any unnecessary details. You should also make sure that the question is clear and easy to understand. You should not add any new information or change the meaning of the question. You should also make sure that the question is grammatically correct and free of spelling errors. # Instructions
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic). - You are an AI question rephraser
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block. - You will be given a conversation and a user question
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response. - Rephrase the question so it is appropriate for web search
If you are a thinking or reasoning AI, you should avoid using \`<question>\` and \`</question>\` tags in your thinking. Those tags should only be used in the final output. You should also avoid using \`<links>\` and \`</links>\` tags in your thinking. Those tags should only be used in the final output. - Only add additional information or change the meaning of the question if it is necessary for clarity or relevance to the conversation
- Condense the question to its essence and remove any unnecessary details
- Ensure the question is grammatically correct and free of spelling errors
- If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response in the <answer> XML block
- If the user includes URLs or a PDF in their question, return the URLs or PDF links inside the <links> XML block and the question inside the <answer> XML block
- If the user wants to you to summarize the webpage or the PDF, return summarize inside the <answer> XML block in place of a question and the URLs to summarize in the <links> XML block
- If you are a thinking or reasoning AI, do not use <answer> and </answer> or <links> and </links> tags in your thinking. Those tags should only be used in the final output
- If applicable, use the provided date to ensure the rephrased question is relevant to the current date and time
There are several examples attached for your reference inside the below \`examples\` XML block # Data
- The history is contained in the <conversation> tag after the <examples> below
- The user question is contained in the <question> tag after the <examples> below
- You must always return the rephrased question inside an <answer> XML block, if there are no links in the follow-up question then don't insert a <links> XML block in your response
- Current date & time in ISO format (UTC timezone) is: {date}
- Do not include any other text in your answer
There are several examples attached for your reference inside the below examples XML block
<examples> <examples>
1. Follow up question: What is the capital of France ## Example 1 input
Rephrased question:\` <conversation>
Who won the last F1 race?\nAyrton Senna won the Monaco Grand Prix. It was a tight race with lots of overtakes. Alain Prost was in the lead for most of the race until the last lap when Senna overtook them.
</conversation>
<question> <question>
Capital of france What were the highlights of the race?
</question> </question>
\`
2. Hi, how are you? ## Example 1 output
Rephrased question\` <answer>
F1 Monaco Grand Prix highlights
</answer>
## Example 2 input
<conversation>
</conversation>
<question> <question>
What is the capital of France
</question>
## Example 2 output
<answer>
Capital of France
</answer>
## Example 3 input
<conversation>
</conversation>
<question>
Hi, how are you?
</question>
## Example 3 output
<answer>
not_needed not_needed
</question> </answer>
\`
3. Follow up question: What is Docker? ## Example 4 input
Rephrased question: \` <conversation>
</conversation>
<question> <question>
What is Docker Can you tell me what is X from https://example.com
</question> </question>
\`
4. Follow up question: Can you tell me what is X from https://example.com ## Example 4 output
Rephrased question: \` <answer>
<question> Can you tell me what is X
Can you tell me what is X? </answer>
</question>
<links> <links>
https://example.com https://example.com
</links> </links>
\`
5. Follow up question: Summarize the content from https://example.com ## Example 5 input
Rephrased question: \` <conversation>
</conversation>
<question> <question>
Summarize the content from https://example.com
</question>
## Example 5 output
<answer>
summarize summarize
</question> </answer>
<links> <links>
https://example.com https://example.com
</links> </links>
\`
6. Follow-up question: Get the current F1 constructor standings and return the results in a table ## Example 6 input
Rephrased question: \` <conversation>
</conversation>
<question> <question>
Current F1 constructor standings Get the current F1 constructor standings and return the results in a table
</question> </question>
\`
7. Follow-up question: What are the top 10 restaurants in New York? Show the results in a table and include a short description of each restaurant. ## Example 6 output
Rephrased question: \` <answer>
{date} F1 constructor standings
</answer>
## Example 7 input
<conversation>
</conversation>
<question> <question>
Top 10 restaurants in New York What are the top 10 restaurants in New York? Show the results in a table and include a short description of each restaurant
</question> </question>
\`
## Example 7 output
<answer>
Top 10 restaurants in New York on {date}
</answer>
</examples> </examples>
Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above. Everything below is the part of the actual conversation
<conversation> <conversation>
{chat_history} {chat_history}
</conversation> </conversation>
Follow up question: {query} <question>
Rephrased question: {query}
</question>
`; `;
export const webSearchResponsePrompt = ` export const webSearchResponsePrompt = `

View File

@@ -13,8 +13,8 @@ export const searchHandlers: Record<string, MetaSearchAgent> = {
}), }),
academicSearch: new MetaSearchAgent({ academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'], activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt, queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt, responsePrompt: prompts.webSearchResponsePrompt,
rerank: true, rerank: true,
rerankThreshold: 0, rerankThreshold: 0,
searchWeb: true, searchWeb: true,
@@ -40,8 +40,8 @@ export const searchHandlers: Record<string, MetaSearchAgent> = {
}), }),
wolframAlphaSearch: new MetaSearchAgent({ wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'], activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt, queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt, responsePrompt: prompts.webSearchResponsePrompt,
rerank: false, rerank: false,
rerankThreshold: 0, rerankThreshold: 0,
searchWeb: true, searchWeb: true,
@@ -49,20 +49,21 @@ export const searchHandlers: Record<string, MetaSearchAgent> = {
}), }),
youtubeSearch: new MetaSearchAgent({ youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'], activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt, queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt, responsePrompt: prompts.webSearchResponsePrompt,
rerank: true, rerank: true,
rerankThreshold: 0.3, rerankThreshold: 0.3,
searchWeb: true, searchWeb: true,
summarizer: false, summarizer: false,
}), }),
redditSearch: new MetaSearchAgent({ redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'], activeEngines: [],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt, queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt, responsePrompt: prompts.webSearchResponsePrompt,
rerank: true, rerank: true,
rerankThreshold: 0.3, rerankThreshold: 0.3,
searchWeb: true, searchWeb: true,
summarizer: false, summarizer: false,
additionalSearchCriteria: "site:reddit.com",
}), }),
}; };

View File

@@ -45,6 +45,7 @@ interface Config {
queryGeneratorPrompt: string; queryGeneratorPrompt: string;
responsePrompt: string; responsePrompt: string;
activeEngines: string[]; activeEngines: string[];
additionalSearchCriteria?: string;
} }
type BasicChainInput = { type BasicChainInput = {
@@ -70,19 +71,19 @@ class MetaSearchAgent implements MetaSearchAgentType {
llm, llm,
this.strParser, this.strParser,
RunnableLambda.from(async (input: string) => { RunnableLambda.from(async (input: string) => {
//console.log(`LLM response for initial web search:"${input}"`);
const linksOutputParser = new LineListOutputParser({ const linksOutputParser = new LineListOutputParser({
key: 'links', key: 'links',
}); });
const questionOutputParser = new LineOutputParser({ const questionOutputParser = new LineOutputParser({
key: 'question', key: 'answer',
}); });
const links = await linksOutputParser.parse(input); const links = await linksOutputParser.parse(input);
let question = this.config.summarizer let question = await questionOutputParser.parse(input);
? await questionOutputParser.parse(input)
: input; //console.log('question', question);
console.log('question', question);
if (question === 'not_needed') { if (question === 'not_needed') {
return { query: '', docs: [] }; return { query: '', docs: [] };
@@ -206,7 +207,10 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: docs }; return { query: question, docs: docs };
} else { } else {
question = question.replace(/<think>.*?<\/think>/g, '');
if (this.config.additionalSearchCriteria) {
question = `${question} ${this.config.additionalSearchCriteria}`;
}
const searxngResult = await searchSearxng(question, { const searxngResult = await searchSearxng(question, {
language: 'en', language: 'en',
@@ -245,6 +249,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
optimizationMode: 'speed' | 'balanced' | 'quality', optimizationMode: 'speed' | 'balanced' | 'quality',
systemInstructions: string, systemInstructions: string,
) { ) {
return RunnableSequence.from([ return RunnableSequence.from([
RunnableMap.from({ RunnableMap.from({
systemInstructions: () => systemInstructions, systemInstructions: () => systemInstructions,
@@ -262,10 +267,11 @@ class MetaSearchAgent implements MetaSearchAgentType {
if (this.config.searchWeb) { if (this.config.searchWeb) {
const searchRetrieverChain = const searchRetrieverChain =
await this.createSearchRetrieverChain(llm); await this.createSearchRetrieverChain(llm);
var date = new Date().toISOString();
const searchRetrieverResult = await searchRetrieverChain.invoke({ const searchRetrieverResult = await searchRetrieverChain.invoke({
chat_history: processedHistory, chat_history: processedHistory,
query, query,
date,
}); });
query = searchRetrieverResult.query; query = searchRetrieverResult.query;