From a71e4ae10df1631cd2dbc6fd838cb558192907d6 Mon Sep 17 00:00:00 2001 From: Willie Zutz Date: Thu, 1 May 2025 11:32:13 -0600 Subject: [PATCH] feat(app): - Adds true chat mode. Moves writing mode to local research mode. - Adds model stats that shows model name and response time for messages. - Adds settings toggle to allow turning off automatic suggestions --- README.md | 5 +- docs/API/SEARCH.md | 2 +- src/app/api/chat/route.ts | 36 ++++++- src/app/settings/page.tsx | 50 +++++++++- src/components/Chat.tsx | 9 +- src/components/ChatWindow.tsx | 63 ++++++++++-- src/components/MessageActions/ModelInfo.tsx | 80 +++++++++++++++ src/components/MessageBox.tsx | 97 +++++++++++++++---- src/components/MessageInputActions/Focus.tsx | 25 +++-- src/lib/prompts/chat.ts | 19 ++++ src/lib/prompts/index.ts | 6 +- .../{writingAssistant.ts => localResearch.ts} | 6 +- src/lib/search/index.ts | 13 ++- src/lib/search/metaSearchAgent.ts | 48 ++++++++- 14 files changed, 408 insertions(+), 51 deletions(-) create mode 100644 src/components/MessageActions/ModelInfo.tsx create mode 100644 src/lib/prompts/chat.ts rename src/lib/prompts/{writingAssistant.ts => localResearch.ts} (77%) diff --git a/README.md b/README.md index 9e94028..fc93244 100644 --- a/README.md +++ b/README.md @@ -41,9 +41,10 @@ Want to know more about its architecture and how it works? You can read it [here - **Two Main Modes:** - **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page. - **Normal Mode:** Processes your query and performs a web search. -- **Focus Modes:** Special modes to better answer specific types of questions. Perplexica currently has 6 focus modes: +- **Focus Modes:** Special modes to better answer specific types of questions. Perplexica currently has 7 focus modes: - **All Mode:** Searches the entire web to find the best results. - - **Writing Assistant Mode:** Helpful for writing tasks that do not require searching the web. + - **Local Research Mode:** Research and interact with local files with citations. + - **Chat Mode:** Have a truly creative conversation without web search. - **Academic Search Mode:** Finds articles and papers, ideal for academic research. - **YouTube Search Mode:** Finds YouTube videos based on the search query. - **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha. diff --git a/docs/API/SEARCH.md b/docs/API/SEARCH.md index b67b62b..e7e55b2 100644 --- a/docs/API/SEARCH.md +++ b/docs/API/SEARCH.md @@ -55,7 +55,7 @@ The API accepts a JSON object in the request body, where you define the focus mo - **`focusMode`** (string, required): Specifies which focus mode to use. Available modes: - - `webSearch`, `academicSearch`, `writingAssistant`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`. + - `webSearch`, `academicSearch`, `localResearch`, `chat`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`. - **`optimizationMode`** (string, optional): Specifies the optimization mode to control the balance between performance and quality. Available modes: diff --git a/src/app/api/chat/route.ts b/src/app/api/chat/route.ts index 2ae80be..9705dc9 100644 --- a/src/app/api/chat/route.ts +++ b/src/app/api/chat/route.ts @@ -54,12 +54,18 @@ type Body = { systemInstructions: string; }; +type ModelStats = { + modelName: string; + responseTime?: number; +}; + const handleEmitterEvents = async ( stream: EventEmitter, writer: WritableStreamDefaultWriter, encoder: TextEncoder, aiMessageId: string, chatId: string, + startTime: number, ) => { let recievedMessage = ''; let sources: any[] = []; @@ -92,12 +98,32 @@ const handleEmitterEvents = async ( sources = parsedData.data; } }); + let modelStats: ModelStats = { + modelName: '', + }; + + stream.on('stats', (data) => { + const parsedData = JSON.parse(data); + if (parsedData.type === 'modelStats') { + modelStats = parsedData.data; + } + }); + stream.on('end', () => { + const endTime = Date.now(); + const duration = endTime - startTime; + + modelStats = { + ...modelStats, + responseTime: duration, + }; + writer.write( encoder.encode( JSON.stringify({ type: 'messageEnd', messageId: aiMessageId, + modelStats: modelStats, }) + '\n', ), ); @@ -109,10 +135,9 @@ const handleEmitterEvents = async ( chatId: chatId, messageId: aiMessageId, role: 'assistant', - metadata: JSON.stringify({ - createdAt: new Date(), - ...(sources && sources.length > 0 && { sources }), - }), + metadata: { + modelStats: modelStats, + }, }) .execute(); }); @@ -185,6 +210,7 @@ const handleHistorySave = async ( export const POST = async (req: Request) => { try { + const startTime = Date.now(); const body = (await req.json()) as Body; const { message } = body; @@ -293,7 +319,7 @@ export const POST = async (req: Request) => { const writer = responseStream.writable.getWriter(); const encoder = new TextEncoder(); - handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId); + handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId, startTime); handleHistorySave(message, humanMessageId, body.focusMode, body.files); return new Response(responseStream.readable, { diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index e97d189..42fff1e 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -5,7 +5,7 @@ import { useEffect, useState } from 'react'; import { cn } from '@/lib/utils'; import { Switch } from '@headlessui/react'; import ThemeSwitcher from '@/components/theme/Switcher'; -import { ImagesIcon, VideoIcon } from 'lucide-react'; +import { ImagesIcon, VideoIcon, Layers3 } from 'lucide-react'; import Link from 'next/link'; import { PROVIDER_METADATA } from '@/lib/providers'; @@ -147,6 +147,7 @@ const Page = () => { const [isLoading, setIsLoading] = useState(false); const [automaticImageSearch, setAutomaticImageSearch] = useState(false); const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false); + const [automaticSuggestions, setAutomaticSuggestions] = useState(true); const [systemInstructions, setSystemInstructions] = useState(''); const [savingStates, setSavingStates] = useState>({}); const [contextWindowSize, setContextWindowSize] = useState(2048); @@ -214,6 +215,9 @@ const Page = () => { setAutomaticVideoSearch( localStorage.getItem('autoVideoSearch') === 'true', ); + setAutomaticSuggestions( + localStorage.getItem('autoSuggestions') !== 'false', // default to true if not set + ); const storedContextWindow = parseInt( localStorage.getItem('ollamaContextWindow') ?? '2048', ); @@ -372,6 +376,8 @@ const Page = () => { localStorage.setItem('autoImageSearch', value.toString()); } else if (key === 'automaticVideoSearch') { localStorage.setItem('autoVideoSearch', value.toString()); + } else if (key === 'automaticSuggestions') { + localStorage.setItem('autoSuggestions', value.toString()); } else if (key === 'chatModelProvider') { localStorage.setItem('chatModelProvider', value); } else if (key === 'chatModel') { @@ -526,6 +532,48 @@ const Page = () => { /> + +
+
+
+ +
+
+

+ Automatic Suggestions +

+

+ Automatically show related suggestions after + responses +

+
+
+ { + setAutomaticSuggestions(checked); + saveConfig('automaticSuggestions', checked); + }} + className={cn( + automaticSuggestions + ? 'bg-[#24A0ED]' + : 'bg-light-200 dark:bg-dark-200', + 'relative inline-flex h-6 w-11 items-center rounded-full transition-colors focus:outline-none', + )} + > + + +
diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index bee90b5..0cbf1f6 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -20,7 +20,14 @@ const Chat = ({ setOptimizationMode, }: { messages: Message[]; - sendMessage: (message: string) => void; + sendMessage: ( + message: string, + options?: { + messageId?: string; + rewriteIndex?: number; + suggestions?: string[]; + }, + ) => void; loading: boolean; messageAppeared: boolean; rewrite: (messageId: string) => void; diff --git a/src/components/ChatWindow.tsx b/src/components/ChatWindow.tsx index b4882e5..35e9164 100644 --- a/src/components/ChatWindow.tsx +++ b/src/components/ChatWindow.tsx @@ -13,6 +13,15 @@ import { Settings } from 'lucide-react'; import Link from 'next/link'; import NextError from 'next/error'; +export type ModelStats = { + modelName: string; + responseTime?: number; +}; + +export type MessageMetadata = { + modelStats?: ModelStats; +}; + export type Message = { messageId: string; chatId: string; @@ -21,6 +30,7 @@ export type Message = { role: 'user' | 'assistant'; suggestions?: string[]; sources?: Document[]; + metadata?: MessageMetadata; }; export interface File { @@ -207,7 +217,6 @@ const loadMessages = async ( const messages = data.messages.map((msg: any) => { return { ...msg, - ...JSON.parse(msg.metadata), }; }) as Message[]; @@ -339,9 +348,25 @@ const ChatWindow = ({ id }: { id?: string }) => { const sendMessage = async ( message: string, - messageId?: string, - options?: { rewriteIndex?: number }, + options?: { + messageId?: string; + rewriteIndex?: number; + suggestions?: string[]; + }, ) => { + // Special case: If we're just updating an existing message with suggestions + if (options?.suggestions && options.messageId) { + setMessages((prev) => + prev.map((msg) => { + if (msg.messageId === options.messageId) { + return { ...msg, suggestions: options.suggestions }; + } + return msg; + }), + ); + return; + } + if (loading) return; if (!isConfigReady) { toast.error('Cannot send message before the configuration is ready'); @@ -369,7 +394,8 @@ const ChatWindow = ({ id }: { id?: string }) => { setChatHistory(messageChatHistory); } - messageId = messageId ?? crypto.randomBytes(7).toString('hex'); + const messageId = + options?.messageId ?? crypto.randomBytes(7).toString('hex'); setMessages((prevMessages) => [ ...prevMessages, @@ -419,6 +445,12 @@ const ChatWindow = ({ id }: { id?: string }) => { role: 'assistant', sources: sources, createdAt: new Date(), + metadata: { + // modelStats will be added when we receive messageEnd event + modelStats: { + modelName: data.modelName, + }, + }, }, ]); added = true; @@ -445,12 +477,29 @@ const ChatWindow = ({ id }: { id?: string }) => { ['assistant', recievedMessage], ]); + // Always update the message, adding modelStats if available + setMessages((prev) => + prev.map((message) => { + if (message.messageId === data.messageId) { + return { + ...message, + metadata: { + // Include model stats if available, otherwise null + modelStats: data.modelStats || null, + }, + }; + } + return message; + }), + ); + setLoading(false); const lastMsg = messagesRef.current[messagesRef.current.length - 1]; const autoImageSearch = localStorage.getItem('autoImageSearch'); const autoVideoSearch = localStorage.getItem('autoVideoSearch'); + const autoSuggestions = localStorage.getItem('autoSuggestions'); if (autoImageSearch === 'true') { document @@ -468,7 +517,8 @@ const ChatWindow = ({ id }: { id?: string }) => { lastMsg.role === 'assistant' && lastMsg.sources && lastMsg.sources.length > 0 && - !lastMsg.suggestions + !lastMsg.suggestions && + autoSuggestions !== 'false' // Default to true if not set ) { const suggestions = await getSuggestions(messagesRef.current); setMessages((prev) => @@ -550,7 +600,8 @@ const ChatWindow = ({ id }: { id?: string }) => { (msg) => msg.messageId === messageId, ); if (messageIndex == -1) return; - sendMessage(messages[messageIndex - 1].content, messageId, { + sendMessage(messages[messageIndex - 1].content, { + messageId: messageId, rewriteIndex: messageIndex, }); }; diff --git a/src/components/MessageActions/ModelInfo.tsx b/src/components/MessageActions/ModelInfo.tsx new file mode 100644 index 0000000..fec80f2 --- /dev/null +++ b/src/components/MessageActions/ModelInfo.tsx @@ -0,0 +1,80 @@ +'use client'; + +import React, { useState, useEffect, useRef } from 'react'; +import { Info } from 'lucide-react'; +import { ModelStats } from '../ChatWindow'; +import { cn } from '@/lib/utils'; + +interface ModelInfoButtonProps { + modelStats: ModelStats | null; +} + +const ModelInfoButton: React.FC = ({ modelStats }) => { + const [showPopover, setShowPopover] = useState(false); + const popoverRef = useRef(null); + const buttonRef = useRef(null); + + // Always render, using "Unknown" as fallback if model info isn't available + const modelName = modelStats?.modelName || 'Unknown'; + + useEffect(() => { + const handleClickOutside = (event: MouseEvent) => { + if ( + popoverRef.current && + !popoverRef.current.contains(event.target as Node) && + buttonRef.current && + !buttonRef.current.contains(event.target as Node) + ) { + setShowPopover(false); + } + }; + + document.addEventListener('mousedown', handleClickOutside); + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; + }, []); + + return ( +
+ + {showPopover && ( +
+
+

+ Model Information +

+
+
+ Model: + + {modelName} + +
+ {modelStats?.responseTime && ( +
+ Response time: + + {(modelStats.responseTime / 1000).toFixed(2)}s + +
+ )} +
+
+
+ )} +
+ ); +}; + +export default ModelInfoButton; diff --git a/src/components/MessageBox.tsx b/src/components/MessageBox.tsx index a7c46ec..193bff8 100644 --- a/src/components/MessageBox.tsx +++ b/src/components/MessageBox.tsx @@ -4,6 +4,7 @@ import React, { MutableRefObject, useEffect, useState } from 'react'; import { Message } from './ChatWindow'; import { cn } from '@/lib/utils'; +import { getSuggestions } from '@/lib/actions'; import { BookCopy, Disc3, @@ -11,10 +12,12 @@ import { StopCircle, Layers3, Plus, + Sparkles, } from 'lucide-react'; import Markdown, { MarkdownToJSX } from 'markdown-to-jsx'; import Copy from './MessageActions/Copy'; import Rewrite from './MessageActions/Rewrite'; +import ModelInfoButton from './MessageActions/ModelInfo'; import MessageSources from './MessageSources'; import SearchImages from './SearchImages'; import SearchVideos from './SearchVideos'; @@ -42,10 +45,36 @@ const MessageBox = ({ dividerRef?: MutableRefObject; isLast: boolean; rewrite: (messageId: string) => void; - sendMessage: (message: string) => void; + sendMessage: ( + message: string, + options?: { + messageId?: string; + rewriteIndex?: number; + suggestions?: string[]; + }, + ) => void; }) => { const [parsedMessage, setParsedMessage] = useState(message.content); const [speechMessage, setSpeechMessage] = useState(message.content); + const [loadingSuggestions, setLoadingSuggestions] = useState(false); + const [autoSuggestions, setAutoSuggestions] = useState( + localStorage.getItem('autoSuggestions') + ); + + const handleLoadSuggestions = async () => { + if (loadingSuggestions || (message?.suggestions && message.suggestions.length > 0)) return; + + setLoadingSuggestions(true); + try { + const suggestions = await getSuggestions([...history]); + // We need to update the message.suggestions property through parent component + sendMessage('', { messageId: message.messageId, suggestions }); + } catch (error) { + console.error('Error loading suggestions:', error); + } finally { + setLoadingSuggestions(false); + } + }; useEffect(() => { const citationRegex = /\[([^\]]+)\]/g; @@ -105,6 +134,18 @@ const MessageBox = ({ setParsedMessage(processedMessage); }, [message.content, message.sources, message.role]); + useEffect(() => { + const handleStorageChange = () => { + setAutoSuggestions(localStorage.getItem('autoSuggestions')); + }; + + window.addEventListener('storage', handleStorageChange); + + return () => { + window.removeEventListener('storage', handleStorageChange); + }; + }, []); + const { speechStatus, start, stop } = useSpeech({ text: speechMessage }); const markdownOverrides: MarkdownToJSX.Options = { @@ -149,6 +190,7 @@ const MessageBox = ({ )}
+ {' '}
Answer + {message.metadata?.modelStats && ( + + )}
-
)} - {isLast && - message.suggestions && - message.suggestions.length > 0 && - message.role === 'assistant' && - !loading && ( - <> -
-
-
- -

Related

-
+ {isLast && message.role === 'assistant' && !loading && ( + <> +
+
+
+ +

Related

{' '} + {(!autoSuggestions || autoSuggestions === 'false') && (!message.suggestions || + message.suggestions.length === 0) ? ( +
+ +
+ ) : null} +
+ {message.suggestions && message.suggestions.length > 0 ? (
{message.suggestions.map((suggestion, i) => (
))}
-
- - )} + ) : null} +
+ + )}
diff --git a/src/components/MessageInputActions/Focus.tsx b/src/components/MessageInputActions/Focus.tsx index 875dbf7..09d97ac 100644 --- a/src/components/MessageInputActions/Focus.tsx +++ b/src/components/MessageInputActions/Focus.tsx @@ -2,6 +2,7 @@ import { BadgePercent, ChevronDown, Globe, + MessageCircle, Pencil, ScanEye, SwatchBook, @@ -30,11 +31,23 @@ const focusModes = [ icon: , }, { - key: 'writingAssistant', - title: 'Writing', - description: 'Chat without searching the web', + key: 'chat', + title: 'Chat', + description: 'Have a creative conversation', + icon: , + }, + { + key: 'localResearch', + title: 'Local Research', + description: 'Research and interact with local files with citations', icon: , }, + { + key: 'redditSearch', + title: 'Reddit', + description: 'Search for discussions and opinions', + icon: , + }, { key: 'wolframAlphaSearch', title: 'Wolfram Alpha', @@ -47,12 +60,6 @@ const focusModes = [ description: 'Search and watch videos', icon: , }, - { - key: 'redditSearch', - title: 'Reddit', - description: 'Search for discussions and opinions', - icon: , - }, ]; const Focus = ({ diff --git a/src/lib/prompts/chat.ts b/src/lib/prompts/chat.ts new file mode 100644 index 0000000..fd818b6 --- /dev/null +++ b/src/lib/prompts/chat.ts @@ -0,0 +1,19 @@ +export const chatPrompt = ` +You are Perplexica, an AI model who is expert at having creative conversations with users. You are currently set on focus mode 'Chat', which means you will engage in a truly creative conversation without searching the web or citing sources. + +In Chat mode, you should be: +- Creative and engaging in your responses +- Helpful and informative based on your internal knowledge +- Conversational and natural in your tone +- Willing to explore ideas, hypothetical scenarios, and creative topics + +Since you are in Chat mode, you would not perform web searches or cite sources. If the user asks a question that would benefit from web search or specific data, you can suggest they switch to a different focus mode like 'All Mode' for general web search or another specialized mode. + +### User instructions +These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines. +{systemInstructions} + + +{context} + +`; diff --git a/src/lib/prompts/index.ts b/src/lib/prompts/index.ts index f479185..eff5737 100644 --- a/src/lib/prompts/index.ts +++ b/src/lib/prompts/index.ts @@ -11,7 +11,8 @@ import { wolframAlphaSearchResponsePrompt, wolframAlphaSearchRetrieverPrompt, } from './wolframAlpha'; -import { writingAssistantPrompt } from './writingAssistant'; +import { localResearchPrompt } from './localResearch'; +import { chatPrompt } from './chat'; import { youtubeSearchResponsePrompt, youtubeSearchRetrieverPrompt, @@ -26,7 +27,8 @@ export default { redditSearchRetrieverPrompt, wolframAlphaSearchResponsePrompt, wolframAlphaSearchRetrieverPrompt, - writingAssistantPrompt, + localResearchPrompt, + chatPrompt, youtubeSearchResponsePrompt, youtubeSearchRetrieverPrompt, }; diff --git a/src/lib/prompts/writingAssistant.ts b/src/lib/prompts/localResearch.ts similarity index 77% rename from src/lib/prompts/writingAssistant.ts rename to src/lib/prompts/localResearch.ts index 565827a..be40e82 100644 --- a/src/lib/prompts/writingAssistant.ts +++ b/src/lib/prompts/localResearch.ts @@ -1,6 +1,6 @@ -export const writingAssistantPrompt = ` -You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Writing Assistant', this means you will be helping the user write a response to a given query. -Since you are a writing assistant, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode. +export const localResearchPrompt = ` +You are Perplexica, an AI model who is expert at searching the web and answering user's queries. You are currently set on focus mode 'Local Research', this means you will be helping the user research and interact with local files with citations. +Since you are in local research mode, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode. You will be shared a context that can contain information from files user has uploaded to get answers from. You will have to generate answers upon that. You have to cite the answer using [number] notation. You must cite the sentences with their relevent context number. You must cite each and every part of the answer so the user can know where the information is coming from. diff --git a/src/lib/search/index.ts b/src/lib/search/index.ts index 01fb87f..b334962 100644 --- a/src/lib/search/index.ts +++ b/src/lib/search/index.ts @@ -20,15 +20,24 @@ export const searchHandlers: Record = { searchWeb: true, summarizer: false, }), - writingAssistant: new MetaSearchAgent({ + localResearch: new MetaSearchAgent({ activeEngines: [], queryGeneratorPrompt: '', - responsePrompt: prompts.writingAssistantPrompt, + responsePrompt: prompts.localResearchPrompt, rerank: true, rerankThreshold: 0, searchWeb: false, summarizer: false, }), + chat: new MetaSearchAgent({ + activeEngines: [], + queryGeneratorPrompt: '', + responsePrompt: prompts.chatPrompt, + rerank: false, + rerankThreshold: 0, + searchWeb: false, + summarizer: false, + }), wolframAlphaSearch: new MetaSearchAgent({ activeEngines: ['wolframalpha'], queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt, diff --git a/src/lib/search/metaSearchAgent.ts b/src/lib/search/metaSearchAgent.ts index 67b7c58..03ad982 100644 --- a/src/lib/search/metaSearchAgent.ts +++ b/src/lib/search/metaSearchAgent.ts @@ -434,13 +434,13 @@ class MetaSearchAgent implements MetaSearchAgentType { private async handleStream( stream: AsyncGenerator, emitter: eventEmitter, + llm: BaseChatModel, ) { for await (const event of stream) { if ( event.event === 'on_chain_end' && event.name === 'FinalSourceRetriever' ) { - ``; emitter.emit( 'data', JSON.stringify({ type: 'sources', data: event.data.output }), @@ -459,6 +459,50 @@ class MetaSearchAgent implements MetaSearchAgentType { event.event === 'on_chain_end' && event.name === 'FinalResponseGenerator' ) { + // Get model name safely with better detection + let modelName = 'Unknown'; + try { + // @ts-ignore - Different LLM implementations have different properties + if (llm.modelName) { + // @ts-ignore + modelName = llm.modelName; + // @ts-ignore + } else if (llm._llm && llm._llm.modelName) { + // @ts-ignore + modelName = llm._llm.modelName; + // @ts-ignore + } else if (llm.model && llm.model.modelName) { + // @ts-ignore + modelName = llm.model.modelName; + } else if ('model' in llm) { + // @ts-ignore + const model = llm.model; + if (typeof model === 'string') { + modelName = model; + // @ts-ignore + } else if (model && model.modelName) { + // @ts-ignore + modelName = model.modelName; + } + } else if (llm.constructor && llm.constructor.name) { + // Last resort: use the class name + modelName = llm.constructor.name; + } + } catch (e) { + console.error('Failed to get model name:', e); + } + + // Send model info before ending + emitter.emit( + 'stats', + JSON.stringify({ + type: 'modelStats', + data: { + modelName, + }, + }), + ); + emitter.emit('end'); } } @@ -493,7 +537,7 @@ class MetaSearchAgent implements MetaSearchAgentType { }, ); - this.handleStream(stream, emitter); + this.handleStream(stream, emitter, llm); return emitter; }