Compare commits

..

1 Commits

Author SHA1 Message Date
Willie Zutz
e2ad55cd7a Merge 2a37f672ab into 68e151b2bd 2025-05-09 07:13:22 +00:00
8 changed files with 126 additions and 309 deletions

View File

@@ -377,7 +377,7 @@ const MessageTabs = ({
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'prose-code:bg-transparent prose-code:p-0 prose-code:text-inherit prose-code:font-normal prose-code:before:content-none prose-code:after:content-none',
'prose-pre:bg-transparent prose-pre:border-0 prose-pre:m-0 prose-pre:p-0',
'max-w-none break-words px-4 text-black dark:text-white',
'max-w-none break-words px-4 text-white',
)}
options={markdownOverrides}
>

View File

@@ -25,27 +25,15 @@ const SearchImages = ({
const [loading, setLoading] = useState(true);
const [open, setOpen] = useState(false);
const [slides, setSlides] = useState<any[]>([]);
const [displayLimit, setDisplayLimit] = useState(10); // Initially show only 10 images
const loadedMessageIdsRef = useRef<Set<string>>(new Set());
// Function to show more images when the Show More button is clicked
const handleShowMore = () => {
// If we're already showing all images, don't do anything
if (images && displayLimit >= images.length) return;
// Otherwise, increase the display limit by 10, or show all images
setDisplayLimit(prev => images ? Math.min(prev + 10, images.length) : prev);
};
const hasLoadedRef = useRef(false);
useEffect(() => {
// Skip fetching if images are already loaded for this message
if (loadedMessageIdsRef.current.has(messageId)) {
if (hasLoadedRef.current) {
return;
}
const fetchImages = async () => {
// Mark as loaded to prevent refetching
loadedMessageIdsRef.current.add(messageId);
setLoading(true);
const chatModelProvider = localStorage.getItem('chatModelProvider');
@@ -92,7 +80,8 @@ const SearchImages = ({
if (onImagesLoaded && images.length > 0) {
onImagesLoaded(images.length);
}
// Mark as loaded to prevent refetching
hasLoadedRef.current = true;
} catch (error) {
console.error('Error fetching images:', error);
} finally {
@@ -102,7 +91,11 @@ const SearchImages = ({
fetchImages();
}, [query, messageId, chatHistory, onImagesLoaded]);
// Reset the loading state when component unmounts
return () => {
hasLoadedRef.current = false;
};
}, [query, messageId]);
return (
<>
@@ -118,8 +111,8 @@ const SearchImages = ({
)}
{images !== null && images.length > 0 && (
<>
<div className="grid grid-cols-2 gap-2" key={`image-results-${messageId}`}>
{images.slice(0, displayLimit).map((image, i) => (
<div className="grid grid-cols-2 gap-2">
{images.map((image, i) => (
<img
onClick={() => {
setOpen(true);
@@ -136,17 +129,6 @@ const SearchImages = ({
/>
))}
</div>
{images.length > displayLimit && (
<div className="flex justify-center mt-4">
<button
onClick={handleShowMore}
className="px-4 py-2 bg-light-secondary dark:bg-dark-secondary hover:bg-light-200 dark:hover:bg-dark-200 text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white rounded-md transition duration-200 flex items-center space-x-2"
>
<span>Show More Images</span>
<span className="text-sm opacity-75">({displayLimit} of {images.length})</span>
</button>
</div>
)}
<Lightbox open={open} close={() => setOpen(false)} slides={slides} />
</>
)}

View File

@@ -40,22 +40,12 @@ const Searchvideos = ({
const [open, setOpen] = useState(false);
const [slides, setSlides] = useState<VideoSlide[]>([]);
const [currentIndex, setCurrentIndex] = useState(0);
const [displayLimit, setDisplayLimit] = useState(10); // Initially show only 10 videos
const videoRefs = useRef<(HTMLIFrameElement | null)[]>([]);
const loadedMessageIdsRef = useRef<Set<string>>(new Set());
// Function to show more videos when the Show More button is clicked
const handleShowMore = () => {
// If we're already showing all videos, don't do anything
if (videos && displayLimit >= videos.length) return;
// Otherwise, increase the display limit by 10, or show all videos
setDisplayLimit(prev => videos ? Math.min(prev + 10, videos.length) : prev);
};
const hasLoadedRef = useRef(false);
useEffect(() => {
// Skip fetching if videos are already loaded for this message
if (loadedMessageIdsRef.current.has(messageId)) {
if (hasLoadedRef.current) {
return;
}
@@ -109,7 +99,7 @@ const Searchvideos = ({
onVideosLoaded(videos.length);
}
// Mark as loaded to prevent refetching
loadedMessageIdsRef.current.add(messageId);
hasLoadedRef.current = true;
} catch (error) {
console.error('Error fetching videos:', error);
} finally {
@@ -119,7 +109,11 @@ const Searchvideos = ({
fetchVideos();
}, [query, messageId, chatHistory, onVideosLoaded]);
// Reset the loading state when component unmounts
return () => {
hasLoadedRef.current = false;
};
}, [query, messageId]);
return (
<>
@@ -135,8 +129,8 @@ const Searchvideos = ({
)}
{videos !== null && videos.length > 0 && (
<>
<div className="grid grid-cols-2 gap-2" key={`video-results-${messageId}`}>
{videos.slice(0, displayLimit).map((video, i) => (
<div className="grid grid-cols-2 gap-2">
{videos.map((video, i) => (
<div
onClick={() => {
setOpen(true);
@@ -161,17 +155,6 @@ const Searchvideos = ({
</div>
))}
</div>
{videos.length > displayLimit && (
<div className="flex justify-center mt-4">
<button
onClick={handleShowMore}
className="px-4 py-2 bg-light-secondary dark:bg-dark-secondary hover:bg-light-200 dark:hover:bg-dark-200 text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white rounded-md transition duration-200 flex items-center space-x-2"
>
<span>Show More Videos</span>
<span className="text-sm opacity-75">({displayLimit} of {videos.length})</span>
</button>
</div>
)}
<Lightbox
open={open}
close={() => setOpen(false)}

View File

@@ -6,73 +6,29 @@ import {
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import LineOutputParser from '../outputParsers/lineOutputParser';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const imageSearchChainPrompt = `
# Instructions
- You will be given a question from a user and a conversation history
- Rephrase the question based on the conversation so it is a standalone question that can be used to search for images that are relevant to the question
- Ensure the rephrased question agrees with the conversation and is relevant to the conversation
- If you are thinking or reasoning, use <think> tags to indicate your thought process
- If you are thinking or reasoning, do not use <answer> and </answer> tags in your thinking. Those tags should only be used in the final output
- Use the provided date to ensure the rephrased question is relevant to the current date and time if applicable
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
# Data locations
- The history is contained in the <conversation> tag after the <examples> below
- The user question is contained in the <question> tag after the <examples> below
- Output your answer in an <answer> tag
- Current date & time in ISO format (UTC timezone) is: {date}
- Do not include any other text in your answer
<examples>
## Example 1 input
<conversation>
Who won the last F1 race?\nAyrton Senna won the Monaco Grand Prix. It was a tight race with lots of overtakes. Alain Prost was in the lead for most of the race until the last lap when Senna overtook them.
</conversation>
<question>
What were the highlights of the race?
</question>
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
## Example 1 output
<answer>
F1 Monaco Grand Prix highlights
</answer>
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
## Example 2 input
<conversation>
What is the theory of relativity?
</conversation>
<question>
What is the theory of relativity?
</question>
3. Follow up question: How does an AC work?
Rephrased: AC working
## Example 2 output
<answer>
Theory of relativity
</answer>
## Example 3 input
<conversation>
I'm looking for a nice vacation spot. Where do you suggest?\nI suggest you go to Hawaii. It's a beautiful place with lots of beaches and activities to do.\nI love the beach! What are some activities I can do there?\nYou can go surfing, snorkeling, or just relax on the beach.
</conversation>
<question>
What are some activities I can do in Hawaii?
</question>
## Example 3 output
<answer>
Hawaii activities
</answer>
</examples>
<conversation>
Conversation:
{chat_history}
</conversation>
<question>
{query}
</question>
Follow up question: {query}
Rephrased question:
`;
type ImageSearchChainInput = {
@@ -86,9 +42,7 @@ interface ImageSearchResult {
title: string;
}
const outputParser = new LineOutputParser({
key: 'answer',
});
const strParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
@@ -99,13 +53,14 @@ const createImageSearchChain = (llm: BaseChatModel) => {
query: (input: ImageSearchChainInput) => {
return input.query;
},
date: () => new Date().toISOString(),
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
llm,
outputParser,
RunnableLambda.from(async (searchQuery: string) => {
const res = await searchSearxng(searchQuery, {
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});
@@ -121,7 +76,7 @@ const createImageSearchChain = (llm: BaseChatModel) => {
}
});
return images;
return images.slice(0, 10);
}),
]);
};

View File

@@ -6,74 +6,30 @@ import {
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import LineOutputParser from '../outputParsers/lineOutputParser';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
# Instructions
- You will be given a question from a user and a conversation history
- Rephrase the question based on the conversation so it is a standalone question that can be used to search Youtube for videos
- Ensure the rephrased question agrees with the conversation and is relevant to the conversation
- If you are thinking or reasoning, use <think> tags to indicate your thought process
- If you are thinking or reasoning, do not use <answer> and </answer> tags in your thinking. Those tags should only be used in the final output
- Use the provided date to ensure the rephrased question is relevant to the current date and time if applicable
# Data locations
- The history is contained in the <conversation> tag after the <examples> below
- The user question is contained in the <question> tag after the <examples> below
- Output your answer in an <answer> tag
- Current date & time in ISO format (UTC timezone) is: {date}
- Do not include any other text in your answer
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
<examples>
## Example 1 input
<conversation>
Who won the last F1 race?\nAyrton Senna won the Monaco Grand Prix. It was a tight race with lots of overtakes. Alain Prost was in the lead for most of the race until the last lap when Senna overtook them.
</conversation>
<question>
What were the highlights of the race?
</question>
## Example 1 output
<answer>
F1 Monaco Grand Prix highlights
</answer>
## Example 2 input
<conversation>
What is the theory of relativity?
</conversation>
<question>
What is the theory of relativity?
</question>
## Example 2 output
<answer>
What is the theory of relativity?
</answer>
## Example 3 input
<conversation>
I'm looking for a nice vacation spot. Where do you suggest?\nI suggest you go to Hawaii. It's a beautiful place with lots of beaches and activities to do.\nI love the beach! What are some activities I can do there?\nYou can go surfing, snorkeling, or just relax on the beach.
</conversation>
<question>
What are some activities I can do in Hawaii?
</question>
## Example 3 output
<answer>
Activities to do in Hawaii
</answer>
</examples>
<conversation>
{chat_history}
</conversation>
<question>
{query}
</question>
`;
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
@@ -87,9 +43,7 @@ interface VideoSearchResult {
iframe_src: string;
}
const answerParser = new LineOutputParser({
key: 'answer',
});
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
@@ -100,13 +54,14 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
query: (input: VideoSearchChainInput) => {
return input.query;
},
date: () => new Date().toISOString(),
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm,
answerParser,
RunnableLambda.from(async (searchQuery: string) => {
const res = await searchSearxng(searchQuery, {
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const res = await searchSearxng(input, {
engines: ['youtube'],
});
@@ -128,7 +83,7 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
}
});
return videos;
return videos.slice(0, 10);
}),
]);
};

View File

@@ -1,131 +1,80 @@
export const webSearchRetrieverPrompt = `
# Instructions
- You are an AI question rephraser
- You will be given a conversation and a user question
- Rephrase the question so it is appropriate for web search
- Only add additional information or change the meaning of the question if it is necessary for clarity or relevance to the conversation
- Condense the question to its essence and remove any unnecessary details
- Ensure the question is grammatically correct and free of spelling errors
- If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response in the <answer> XML block
- If the user includes URLs or a PDF in their question, return the URLs or PDF links inside the <links> XML block and the question inside the <answer> XML block
- If the user wants to you to summarize the webpage or the PDF, return summarize inside the <answer> XML block in place of a question and the URLs to summarize in the <links> XML block
- If you are a thinking or reasoning AI, do not use <answer> and </answer> or <links> and </links> tags in your thinking. Those tags should only be used in the final output
- If applicable, use the provided date to ensure the rephrased question is relevant to the current date and time
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it. You should condense the question to its essence and remove any unnecessary details. You should also make sure that the question is clear and easy to understand. You should not add any new information or change the meaning of the question. You should also make sure that the question is grammatically correct and free of spelling errors.
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
If you are a thinking or reasoning AI, you should avoid using \`<question>\` and \`</question>\` tags in your thinking. Those tags should only be used in the final output. You should also avoid using \`<links>\` and \`</links>\` tags in your thinking. Those tags should only be used in the final output.
# Data
- The history is contained in the <conversation> tag after the <examples> below
- The user question is contained in the <question> tag after the <examples> below
- You must always return the rephrased question inside an <answer> XML block, if there are no links in the follow-up question then don't insert a <links> XML block in your response
- Current date & time in ISO format (UTC timezone) is: {date}
- Do not include any other text in your answer
There are several examples attached for your reference inside the below examples XML block
There are several examples attached for your reference inside the below \`examples\` XML block
<examples>
## Example 1 input
<conversation>
Who won the last F1 race?\nAyrton Senna won the Monaco Grand Prix. It was a tight race with lots of overtakes. Alain Prost was in the lead for most of the race until the last lap when Senna overtook them.
</conversation>
1. Follow up question: What is the capital of France
Rephrased question:\`
<question>
What were the highlights of the race?
Capital of france
</question>
\`
## Example 1 output
<answer>
F1 Monaco Grand Prix highlights
</answer>
## Example 2 input
<conversation>
</conversation>
2. Hi, how are you?
Rephrased question\`
<question>
What is the capital of France
</question>
## Example 2 output
<answer>
Capital of France
</answer>
## Example 3 input
<conversation>
</conversation>
<question>
Hi, how are you?
</question>
## Example 3 output
<answer>
not_needed
</answer>
## Example 4 input
<conversation>
</conversation>
<question>
Can you tell me what is X from https://example.com
</question>
\`
## Example 4 output
<answer>
Can you tell me what is X
</answer>
3. Follow up question: What is Docker?
Rephrased question: \`
<question>
What is Docker
</question>
\`
4. Follow up question: Can you tell me what is X from https://example.com
Rephrased question: \`
<question>
Can you tell me what is X?
</question>
<links>
https://example.com
</links>
\`
## Example 5 input
<conversation>
</conversation>
5. Follow up question: Summarize the content from https://example.com
Rephrased question: \`
<question>
Summarize the content from https://example.com
</question>
## Example 5 output
<answer>
summarize
</answer>
</question>
<links>
https://example.com
</links>
\`
## Example 6 input
<conversation>
</conversation>
6. Follow-up question: Get the current F1 constructor standings and return the results in a table
Rephrased question: \`
<question>
Get the current F1 constructor standings and return the results in a table
Current F1 constructor standings
</question>
\`
## Example 6 output
<answer>
{date} F1 constructor standings
</answer>
## Example 7 input
<conversation>
</conversation>
7. Follow-up question: What are the top 10 restaurants in New York? Show the results in a table and include a short description of each restaurant.
Rephrased question: \`
<question>
What are the top 10 restaurants in New York? Show the results in a table and include a short description of each restaurant
Top 10 restaurants in New York
</question>
## Example 7 output
<answer>
Top 10 restaurants in New York on {date}
</answer>
\`
</examples>
Everything below is the part of the actual conversation
Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
<conversation>
{chat_history}
</conversation>
<question>
{query}
</question>
Follow up question: {query}
Rephrased question:
`;
export const webSearchResponsePrompt = `

View File

@@ -13,8 +13,8 @@ export const searchHandlers: Record<string, MetaSearchAgent> = {
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
@@ -40,8 +40,8 @@ export const searchHandlers: Record<string, MetaSearchAgent> = {
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
@@ -49,21 +49,20 @@ export const searchHandlers: Record<string, MetaSearchAgent> = {
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
additionalSearchCriteria: "site:reddit.com",
}),
};

View File

@@ -45,7 +45,6 @@ interface Config {
queryGeneratorPrompt: string;
responsePrompt: string;
activeEngines: string[];
additionalSearchCriteria?: string;
}
type BasicChainInput = {
@@ -71,19 +70,19 @@ class MetaSearchAgent implements MetaSearchAgentType {
llm,
this.strParser,
RunnableLambda.from(async (input: string) => {
//console.log(`LLM response for initial web search:"${input}"`);
const linksOutputParser = new LineListOutputParser({
key: 'links',
});
const questionOutputParser = new LineOutputParser({
key: 'answer',
key: 'question',
});
const links = await linksOutputParser.parse(input);
let question = await questionOutputParser.parse(input);
//console.log('question', question);
let question = this.config.summarizer
? await questionOutputParser.parse(input)
: input;
console.log('question', question);
if (question === 'not_needed') {
return { query: '', docs: [] };
@@ -207,10 +206,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: docs };
} else {
if (this.config.additionalSearchCriteria) {
question = `${question} ${this.config.additionalSearchCriteria}`;
}
question = question.replace(/<think>.*?<\/think>/g, '');
const searxngResult = await searchSearxng(question, {
language: 'en',
@@ -249,7 +245,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
optimizationMode: 'speed' | 'balanced' | 'quality',
systemInstructions: string,
) {
return RunnableSequence.from([
RunnableMap.from({
systemInstructions: () => systemInstructions,
@@ -267,11 +262,10 @@ class MetaSearchAgent implements MetaSearchAgentType {
if (this.config.searchWeb) {
const searchRetrieverChain =
await this.createSearchRetrieverChain(llm);
var date = new Date().toISOString();
const searchRetrieverResult = await searchRetrieverChain.invoke({
chat_history: processedHistory,
query,
date,
});
query = searchRetrieverResult.query;