diff --git a/README.md b/README.md index 866b585..3b80e58 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,6 @@ [![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT) - ![preview](.assets/perplexica-screenshot.png?) ## Table of Contents diff --git a/src/prompts/index.ts b/src/prompts/index.ts index f479185..90bd738 100644 --- a/src/prompts/index.ts +++ b/src/prompts/index.ts @@ -6,7 +6,11 @@ import { redditSearchResponsePrompt, redditSearchRetrieverPrompt, } from './redditSearch'; -import { webSearchResponsePrompt, webSearchRetrieverPrompt } from './webSearch'; +import { + webSearchResponsePrompt, + webSearchRetrieverPrompt, + preciseWebSearchResponsePrompt, +} from './webSearch'; import { wolframAlphaSearchResponsePrompt, wolframAlphaSearchRetrieverPrompt, @@ -20,6 +24,7 @@ import { export default { webSearchResponsePrompt, webSearchRetrieverPrompt, + preciseWebSearchResponsePrompt, academicSearchResponsePrompt, academicSearchRetrieverPrompt, redditSearchResponsePrompt, diff --git a/src/prompts/webSearch.ts b/src/prompts/webSearch.ts index d8269c8..d0123c0 100644 --- a/src/prompts/webSearch.ts +++ b/src/prompts/webSearch.ts @@ -104,3 +104,41 @@ export const webSearchResponsePrompt = ` Current date & time in ISO format (UTC timezone) is: {date}. `; + +export const preciseWebSearchResponsePrompt = ` + You are Perplexica, an AI model skilled in web search and crafting accurate, concise, and well-structured answers. You excel at breaking down long form content into brief summaries or specific answers. + + Your task is to provide answers that are: + - **Informative and relevant**: Precisely address the user's query using the given context. + - **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically. + - **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included. + - **Brief and Accurate**: If a direct answer is available, provide it succinctly without unnecessary elaboration. + + ### Formatting Instructions + - **Structure**: Use a well-organized format with proper headings. Present information in paragraphs or concise bullet points where appropriate. You should never need more than one heading. + - **Tone and Style**: Maintain a matter-of-fact tone and focus on delivering accurate information. Avoid overly complex language or unnecessary jargon. + - **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability. + - **Length and Depth**: Be brief. Provide concise answers. Avoid superficial responses and strive for accuracy without unnecessary repetition. + - **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title. + - **Conclusion or Summary**: Do not include a conclusion unless the context specifically requires it. + + ### Citation Requirements + - Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`. + - Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]." + - Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context. + - Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]." + - Always prioritize credibility and accuracy by linking all statements back to their respective context sources. + - Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation. + + ### Special Instructions + - If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search. + - If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query. + - Do not provide additional commentary or personal opinions unless specifically asked for in the context. + - Do not include plesantries or greetings in your response. + + + {context} + + + Current date & time in ISO format (UTC timezone) is: {date}. +`; diff --git a/src/routes/images.ts b/src/routes/images.ts index 5671657..2e8e912 100644 --- a/src/routes/images.ts +++ b/src/routes/images.ts @@ -5,6 +5,7 @@ import { getAvailableChatModelProviders } from '../lib/providers'; import { HumanMessage, AIMessage } from '@langchain/core/messages'; import logger from '../utils/logger'; import { ChatOpenAI } from '@langchain/openai'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { getCustomOpenaiApiKey, getCustomOpenaiApiUrl, @@ -16,6 +17,7 @@ const router = express.Router(); interface ChatModel { provider: string; model: string; + ollamaContextWindow?: number; } interface ImageSearchBody { @@ -61,6 +63,10 @@ router.post('/', async (req, res) => { ) { llm = chatModelProviders[chatModelProvider][chatModel] .model as unknown as BaseChatModel | undefined; + + if (llm instanceof ChatOllama) { + llm.numCtx = body.chatModel?.ollamaContextWindow || 2048; + } } if (!llm) { diff --git a/src/routes/search.ts b/src/routes/search.ts index 57d90a3..daefece 100644 --- a/src/routes/search.ts +++ b/src/routes/search.ts @@ -15,12 +15,14 @@ import { getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '../config'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; const router = express.Router(); interface chatModel { provider: string; model: string; + ollamaContextWindow?: number; customOpenAIKey?: string; customOpenAIBaseURL?: string; } @@ -78,6 +80,7 @@ router.post('/', async (req, res) => { const embeddingModel = body.embeddingModel?.model || Object.keys(embeddingModelProviders[embeddingModelProvider])[0]; + const ollamaContextWindow = body.chatModel?.ollamaContextWindow || 2048; let llm: BaseChatModel | undefined; let embeddings: Embeddings | undefined; @@ -99,6 +102,9 @@ router.post('/', async (req, res) => { ) { llm = chatModelProviders[chatModelProvider][chatModel] .model as unknown as BaseChatModel | undefined; + if (llm instanceof ChatOllama) { + llm.numCtx = ollamaContextWindow; + } } if ( diff --git a/src/routes/suggestions.ts b/src/routes/suggestions.ts index 7dd1739..c7a1409 100644 --- a/src/routes/suggestions.ts +++ b/src/routes/suggestions.ts @@ -10,12 +10,14 @@ import { getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '../config'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; const router = express.Router(); interface ChatModel { provider: string; model: string; + ollamaContextWindow?: number; } interface SuggestionsBody { @@ -60,6 +62,9 @@ router.post('/', async (req, res) => { ) { llm = chatModelProviders[chatModelProvider][chatModel] .model as unknown as BaseChatModel | undefined; + if (llm instanceof ChatOllama) { + llm.numCtx = body.chatModel?.ollamaContextWindow || 2048; + } } if (!llm) { diff --git a/src/routes/videos.ts b/src/routes/videos.ts index b631f26..debe3cd 100644 --- a/src/routes/videos.ts +++ b/src/routes/videos.ts @@ -10,12 +10,14 @@ import { getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '../config'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; const router = express.Router(); interface ChatModel { provider: string; model: string; + ollamaContextWindow?: number; } interface VideoSearchBody { @@ -61,6 +63,10 @@ router.post('/', async (req, res) => { ) { llm = chatModelProviders[chatModelProvider][chatModel] .model as unknown as BaseChatModel | undefined; + + if (llm instanceof ChatOllama) { + llm.numCtx = body.chatModel?.ollamaContextWindow || 2048; + } } if (!llm) { diff --git a/src/search/metaSearchAgent.ts b/src/search/metaSearchAgent.ts index ee82c10..2e414fe 100644 --- a/src/search/metaSearchAgent.ts +++ b/src/search/metaSearchAgent.ts @@ -34,6 +34,7 @@ export interface MetaSearchAgentType { embeddings: Embeddings, optimizationMode: 'speed' | 'balanced' | 'quality', fileIds: string[], + isCompact?: boolean, ) => Promise; } @@ -44,6 +45,7 @@ interface Config { rerankThreshold: number; queryGeneratorPrompt: string; responsePrompt: string; + preciseResponsePrompt: string; activeEngines: string[]; } @@ -235,6 +237,7 @@ class MetaSearchAgent implements MetaSearchAgentType { fileIds: string[], embeddings: Embeddings, optimizationMode: 'speed' | 'balanced' | 'quality', + isCompact?: boolean, ) { return RunnableSequence.from([ RunnableMap.from({ @@ -278,7 +281,12 @@ class MetaSearchAgent implements MetaSearchAgentType { .pipe(this.processDocs), }), ChatPromptTemplate.fromMessages([ - ['system', this.config.responsePrompt], + [ + 'system', + isCompact + ? this.config.preciseResponsePrompt + : this.config.responsePrompt, + ], new MessagesPlaceholder('chat_history'), ['user', '{query}'], ]), @@ -465,6 +473,7 @@ class MetaSearchAgent implements MetaSearchAgentType { embeddings: Embeddings, optimizationMode: 'speed' | 'balanced' | 'quality', fileIds: string[], + isCompact?: boolean, ) { const emitter = new eventEmitter(); @@ -473,6 +482,7 @@ class MetaSearchAgent implements MetaSearchAgentType { fileIds, embeddings, optimizationMode, + isCompact, ); const stream = answeringChain.streamEvents( diff --git a/src/websocket/connectionManager.ts b/src/websocket/connectionManager.ts index bb8f242..979b8a0 100644 --- a/src/websocket/connectionManager.ts +++ b/src/websocket/connectionManager.ts @@ -14,6 +14,7 @@ import { getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '../config'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; export const handleConnection = async ( ws: WebSocket, @@ -42,6 +43,8 @@ export const handleConnection = async ( searchParams.get('embeddingModel') || Object.keys(embeddingModelProviders[embeddingModelProvider])[0]; + const ollamaContextWindow = searchParams.get('ollamaContextWindow'); + let llm: BaseChatModel | undefined; let embeddings: Embeddings | undefined; @@ -52,6 +55,9 @@ export const handleConnection = async ( ) { llm = chatModelProviders[chatModelProvider][chatModel] .model as unknown as BaseChatModel | undefined; + if (llm instanceof ChatOllama) { + llm.numCtx = ollamaContextWindow ? parseInt(ollamaContextWindow) : 2048; + } } else if (chatModelProvider == 'custom_openai') { const customOpenaiApiKey = getCustomOpenaiApiKey(); const customOpenaiApiUrl = getCustomOpenaiApiUrl(); diff --git a/src/websocket/messageHandler.ts b/src/websocket/messageHandler.ts index 395c0de..99b3eaf 100644 --- a/src/websocket/messageHandler.ts +++ b/src/websocket/messageHandler.ts @@ -26,6 +26,7 @@ type WSMessage = { focusMode: string; history: Array<[string, string]>; files: Array; + isCompact?: boolean; }; export const searchHandlers = { @@ -33,6 +34,7 @@ export const searchHandlers = { activeEngines: [], queryGeneratorPrompt: prompts.webSearchRetrieverPrompt, responsePrompt: prompts.webSearchResponsePrompt, + preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt, rerank: true, rerankThreshold: 0.3, searchWeb: true, @@ -42,6 +44,7 @@ export const searchHandlers = { activeEngines: ['arxiv', 'google scholar', 'pubmed'], queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt, responsePrompt: prompts.academicSearchResponsePrompt, + preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt, rerank: true, rerankThreshold: 0, searchWeb: true, @@ -51,6 +54,7 @@ export const searchHandlers = { activeEngines: [], queryGeneratorPrompt: '', responsePrompt: prompts.writingAssistantPrompt, + preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt, rerank: true, rerankThreshold: 0, searchWeb: false, @@ -60,6 +64,7 @@ export const searchHandlers = { activeEngines: ['wolframalpha'], queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt, responsePrompt: prompts.wolframAlphaSearchResponsePrompt, + preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt, rerank: false, rerankThreshold: 0, searchWeb: true, @@ -69,6 +74,7 @@ export const searchHandlers = { activeEngines: ['youtube'], queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt, responsePrompt: prompts.youtubeSearchResponsePrompt, + preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt, rerank: true, rerankThreshold: 0.3, searchWeb: true, @@ -78,6 +84,7 @@ export const searchHandlers = { activeEngines: ['reddit'], queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt, responsePrompt: prompts.redditSearchResponsePrompt, + preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt, rerank: true, rerankThreshold: 0.3, searchWeb: true, @@ -116,6 +123,7 @@ const handleEmitterEvents = ( sources = parsedData.data; } }); + emitter.on('end', () => { ws.send(JSON.stringify({ type: 'messageEnd', messageId: messageId })); @@ -132,6 +140,7 @@ const handleEmitterEvents = ( }) .execute(); }); + emitter.on('error', (data) => { const parsedData = JSON.parse(data); ws.send( @@ -197,6 +206,7 @@ export const handleMessage = async ( embeddings, parsedWSMessage.optimizationMode, parsedWSMessage.files, + parsedWSMessage.isCompact, ); handleEmitterEvents(emitter, ws, aiMessageId, parsedMessage.chatId); diff --git a/ui/app/settings/page.tsx b/ui/app/settings/page.tsx index 6aff1b0..c659265 100644 --- a/ui/app/settings/page.tsx +++ b/ui/app/settings/page.tsx @@ -23,6 +23,7 @@ interface SettingsType { customOpenaiApiKey: string; customOpenaiApiUrl: string; customOpenaiModelName: string; + ollamaContextWindow: number; } interface InputProps extends React.InputHTMLAttributes { @@ -112,6 +113,11 @@ const Page = () => { const [automaticImageSearch, setAutomaticImageSearch] = useState(false); const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false); const [savingStates, setSavingStates] = useState>({}); + const [contextWindowSize, setContextWindowSize] = useState(2048); + const [isCustomContextWindow, setIsCustomContextWindow] = useState(false); + const predefinedContextSizes = [ + 1024, 2048, 3072, 4096, 8192, 16384, 32768, 65536, 131072, + ]; useEffect(() => { const fetchConfig = async () => { @@ -123,6 +129,7 @@ const Page = () => { }); const data = (await res.json()) as SettingsType; + setConfig(data); const chatModelProvidersKeys = Object.keys(data.chatModelProviders || {}); @@ -171,6 +178,13 @@ const Page = () => { setAutomaticVideoSearch( localStorage.getItem('autoVideoSearch') === 'true', ); + const storedContextWindow = parseInt( + localStorage.getItem('ollamaContextWindow') ?? '2048', + ); + setContextWindowSize(storedContextWindow); + setIsCustomContextWindow( + !predefinedContextSizes.includes(storedContextWindow), + ); setIsLoading(false); }; @@ -331,6 +345,8 @@ const Page = () => { localStorage.setItem('embeddingModelProvider', value); } else if (key === 'embeddingModel') { localStorage.setItem('embeddingModel', value); + } else if (key === 'ollamaContextWindow') { + localStorage.setItem('ollamaContextWindow', value.toString()); } } catch (err) { console.error('Failed to save:', err); @@ -548,6 +564,78 @@ const Page = () => { ]; })()} /> + {selectedChatModelProvider === 'ollama' && ( +
+

+ Chat Context Window Size +

+ { + // Allow any value to be typed + const value = + parseInt(e.target.value) || + contextWindowSize; + setContextWindowSize(value); + }} + onSave={(value) => { + // Validate only when saving + const numValue = Math.max( + 512, + parseInt(value) || 2048, + ); + setContextWindowSize(numValue); + setConfig((prev) => ({ + ...prev!, + ollamaContextWindow: numValue, + })); + saveConfig('ollamaContextWindow', numValue); + }} + /> +
+ )} +

+ {isCustomContextWindow + ? 'Adjust the context window size for Ollama models (minimum 512 tokens)' + : 'Adjust the context window size for Ollama models'} +

+ + )} )} diff --git a/ui/components/Chat.tsx b/ui/components/Chat.tsx index 81aa32f..3d4cf6b 100644 --- a/ui/components/Chat.tsx +++ b/ui/components/Chat.tsx @@ -16,9 +16,17 @@ const Chat = ({ setFileIds, files, setFiles, + isCompact, + setIsCompact, + optimizationMode, + setOptimizationMode, }: { messages: Message[]; - sendMessage: (message: string) => void; + sendMessage: ( + message: string, + messageId?: string, + options?: { isCompact?: boolean }, + ) => void; loading: boolean; messageAppeared: boolean; rewrite: (messageId: string) => void; @@ -26,6 +34,10 @@ const Chat = ({ setFileIds: (fileIds: string[]) => void; files: File[]; setFiles: (files: File[]) => void; + isCompact: boolean; + setIsCompact: (isCompact: boolean) => void; + optimizationMode: string; + setOptimizationMode: (mode: string) => void; }) => { const [dividerWidth, setDividerWidth] = useState(0); const dividerRef = useRef(null); @@ -71,6 +83,7 @@ const Chat = ({ dividerRef={isLast ? dividerRef : undefined} isLast={isLast} rewrite={rewrite} + isCompact={isCompact} sendMessage={sendMessage} /> {!isLast && msg.role === 'assistant' && ( @@ -83,7 +96,7 @@ const Chat = ({
{dividerWidth > 0 && (
)} diff --git a/ui/components/ChatWindow.tsx b/ui/components/ChatWindow.tsx index 1940f42..f87292d 100644 --- a/ui/components/ChatWindow.tsx +++ b/ui/components/ChatWindow.tsx @@ -197,6 +197,11 @@ const useSocket = ( 'openAIBaseURL', localStorage.getItem('openAIBaseURL')!, ); + } else { + searchParams.append( + 'ollamaContextWindow', + localStorage.getItem('ollamaContextWindow') || '2048', + ); } searchParams.append('embeddingModel', embeddingModel!); @@ -394,6 +399,7 @@ const ChatWindow = ({ id }: { id?: string }) => { const [focusMode, setFocusMode] = useState('webSearch'); const [optimizationMode, setOptimizationMode] = useState('speed'); + const [isCompact, setIsCompact] = useState(false); const [isMessagesLoaded, setIsMessagesLoaded] = useState(false); @@ -401,6 +407,21 @@ const ChatWindow = ({ id }: { id?: string }) => { const [isSettingsOpen, setIsSettingsOpen] = useState(false); + useEffect(() => { + const savedCompactMode = localStorage.getItem('compactMode'); + const savedOptimizationMode = localStorage.getItem('optimizationMode'); + + if (savedCompactMode !== null) { + setIsCompact(savedCompactMode === 'true'); + } + + if (savedOptimizationMode !== null) { + setOptimizationMode(savedOptimizationMode); + } else { + localStorage.setItem('optimizationMode', optimizationMode); + } + }, []); + useEffect(() => { if ( chatId && @@ -451,7 +472,11 @@ const ChatWindow = ({ id }: { id?: string }) => { } }, [isMessagesLoaded, isWSReady]); - const sendMessage = async (message: string, messageId?: string) => { +const sendMessage = async ( + message: string, + messageId?: string, + options?: { isCompact?: boolean; rewriteIndex?: number }, + ) => { if (loading) return; if (!ws || ws.readyState !== WebSocket.OPEN) { toast.error('Cannot send message while disconnected'); @@ -464,23 +489,33 @@ const ChatWindow = ({ id }: { id?: string }) => { let sources: Document[] | undefined = undefined; let recievedMessage = ''; let added = false; + let messageChatHistory = chatHistory; + + if (options?.rewriteIndex !== undefined) { + const rewriteIndex = options.rewriteIndex; + setMessages((prev) => { + return [...prev.slice(0, messages.length > 2 ? rewriteIndex - 1 : 0)] + }); + + messageChatHistory = chatHistory.slice(0, messages.length > 2 ? rewriteIndex - 1 : 0) + setChatHistory(messageChatHistory); + } messageId = messageId ?? crypto.randomBytes(7).toString('hex'); - - ws.send( - JSON.stringify({ - type: 'message', - message: { - messageId: messageId, - chatId: chatId!, - content: message, - }, - files: fileIds, - focusMode: focusMode, - optimizationMode: optimizationMode, - history: [...chatHistory, ['human', message]], - }), - ); + let messageData = { + type: 'message', + message: { + messageId: messageId, + chatId: chatId!, + content: message, + }, + files: fileIds, + focusMode: focusMode, + optimizationMode: optimizationMode, + history: [...messageChatHistory, ['human', message]], + isCompact: options?.isCompact ?? isCompact, + }; + ws.send(JSON.stringify(messageData)); setMessages((prevMessages) => [ ...prevMessages, @@ -597,25 +632,14 @@ const ChatWindow = ({ id }: { id?: string }) => { }; const rewrite = (messageId: string) => { - const index = messages.findIndex((msg) => msg.messageId === messageId); - - if (index === -1) return; - - const message = messages[index - 1]; - - setMessages((prev) => { - return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)]; - }); - setChatHistory((prev) => { - return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)]; - }); - - sendMessage(message.content, message.messageId); + const messageIndex = messages.findIndex((msg) => msg.messageId === messageId); + if(messageIndex == -1) return; + sendMessage(messages[messageIndex - 1].content, messageId, { isCompact, rewriteIndex: messageIndex }); }; useEffect(() => { if (isReady && initialMessage && ws?.readyState === 1) { - sendMessage(initialMessage); + sendMessage(initialMessage, undefined, { isCompact }); } // eslint-disable-next-line react-hooks/exhaustive-deps }, [ws?.readyState, isReady, initialMessage, isWSReady]); @@ -655,6 +679,10 @@ const ChatWindow = ({ id }: { id?: string }) => { setFileIds={setFileIds} files={files} setFiles={setFiles} + isCompact={isCompact} + setIsCompact={setIsCompact} + optimizationMode={optimizationMode} + setOptimizationMode={setOptimizationMode} /> ) : ( @@ -668,6 +696,8 @@ const ChatWindow = ({ id }: { id?: string }) => { setFileIds={setFileIds} files={files} setFiles={setFiles} + isCompact={isCompact} + setIsCompact={setIsCompact} /> )}
diff --git a/ui/components/EmptyChat.tsx b/ui/components/EmptyChat.tsx index 838849f..19366dc 100644 --- a/ui/components/EmptyChat.tsx +++ b/ui/components/EmptyChat.tsx @@ -14,6 +14,8 @@ const EmptyChat = ({ setFileIds, files, setFiles, + isCompact, + setIsCompact, }: { sendMessage: (message: string) => void; focusMode: string; @@ -24,6 +26,8 @@ const EmptyChat = ({ setFileIds: (fileIds: string[]) => void; files: File[]; setFiles: (files: File[]) => void; + isCompact: boolean; + setIsCompact: (isCompact: boolean) => void; }) => { const [isSettingsOpen, setIsSettingsOpen] = useState(false); @@ -48,6 +52,8 @@ const EmptyChat = ({ setFileIds={setFileIds} files={files} setFiles={setFiles} + isCompact={isCompact} + setIsCompact={setIsCompact} /> diff --git a/ui/components/EmptyChatMessageInput.tsx b/ui/components/EmptyChatMessageInput.tsx index 43d1e28..7ef1b66 100644 --- a/ui/components/EmptyChatMessageInput.tsx +++ b/ui/components/EmptyChatMessageInput.tsx @@ -17,8 +17,14 @@ const EmptyChatMessageInput = ({ setFileIds, files, setFiles, + isCompact, + setIsCompact, }: { - sendMessage: (message: string) => void; + sendMessage: ( + message: string, + messageId?: string, + options?: { isCompact?: boolean }, + ) => void; focusMode: string; setFocusMode: (mode: string) => void; optimizationMode: string; @@ -27,6 +33,8 @@ const EmptyChatMessageInput = ({ setFileIds: (fileIds: string[]) => void; files: File[]; setFiles: (files: File[]) => void; + isCompact: boolean; + setIsCompact: (isCompact: boolean) => void; }) => { const [copilotEnabled, setCopilotEnabled] = useState(false); const [message, setMessage] = useState(''); @@ -61,13 +69,13 @@ const EmptyChatMessageInput = ({
{ e.preventDefault(); - sendMessage(message); + sendMessage(message, undefined, { isCompact }); setMessage(''); }} onKeyDown={(e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); - sendMessage(message); + sendMessage(message, undefined, { isCompact }); setMessage(''); } }} @@ -97,6 +105,8 @@ const EmptyChatMessageInput = ({