diff --git a/ui/app/api/suggestions/route.ts b/ui/app/api/suggestions/route.ts
new file mode 100644
index 0000000..f28092e
--- /dev/null
+++ b/ui/app/api/suggestions/route.ts
@@ -0,0 +1,81 @@
+import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent';
+import {
+ getCustomOpenaiApiKey,
+ getCustomOpenaiApiUrl,
+ getCustomOpenaiModelName,
+} from '@/lib/config';
+import { getAvailableChatModelProviders } from '@/lib/providers';
+import { BaseChatModel } from '@langchain/core/language_models/chat_models';
+import { AIMessage, HumanMessage } from '@langchain/core/messages';
+import { ChatOpenAI } from '@langchain/openai';
+
+interface ChatModel {
+ provider: string;
+ model: string;
+}
+
+interface SuggestionsGenerationBody {
+ chatHistory: any[];
+ chatModel?: ChatModel;
+}
+
+export const POST = async (req: Request) => {
+ try {
+ const body: SuggestionsGenerationBody = await req.json();
+
+ const chatHistory = body.chatHistory
+ .map((msg: any) => {
+ if (msg.role === 'user') {
+ return new HumanMessage(msg.content);
+ } else if (msg.role === 'assistant') {
+ return new AIMessage(msg.content);
+ }
+ })
+ .filter((msg) => msg !== undefined);
+
+ const chatModelProviders = await getAvailableChatModelProviders();
+
+ const chatModelProvider =
+ chatModelProviders[
+ body.chatModel?.provider || Object.keys(chatModelProviders)[0]
+ ];
+ const chatModel =
+ chatModelProvider[
+ body.chatModel?.model || Object.keys(chatModelProvider)[0]
+ ];
+
+ let llm: BaseChatModel | undefined;
+
+ if (body.chatModel?.provider === 'custom_openai') {
+ llm = new ChatOpenAI({
+ openAIApiKey: getCustomOpenaiApiKey(),
+ modelName: getCustomOpenaiModelName(),
+ temperature: 0.7,
+ configuration: {
+ baseURL: getCustomOpenaiApiUrl(),
+ },
+ });
+ } else if (chatModelProvider && chatModel) {
+ llm = chatModel.model;
+ }
+
+ if (!llm) {
+ return Response.json({ error: 'Invalid chat model' }, { status: 400 });
+ }
+
+ const suggestions = await generateSuggestions(
+ {
+ chat_history: chatHistory,
+ },
+ llm,
+ );
+
+ return Response.json({ suggestions }, { status: 200 });
+ } catch (err) {
+ console.error(`An error ocurred while generating suggestions: ${err}`);
+ return Response.json(
+ { message: 'An error ocurred while generating suggestions' },
+ { status: 500 },
+ );
+ }
+};
diff --git a/ui/lib/chains/suggestionGeneratorAgent.ts b/ui/lib/chains/suggestionGeneratorAgent.ts
new file mode 100644
index 0000000..9129059
--- /dev/null
+++ b/ui/lib/chains/suggestionGeneratorAgent.ts
@@ -0,0 +1,55 @@
+import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
+import ListLineOutputParser from '../outputParsers/listLineOutputParser';
+import { PromptTemplate } from '@langchain/core/prompts';
+import formatChatHistoryAsString from '../utils/formatHistory';
+import { BaseMessage } from '@langchain/core/messages';
+import { BaseChatModel } from '@langchain/core/language_models/chat_models';
+import { ChatOpenAI } from '@langchain/openai';
+
+const suggestionGeneratorPrompt = `
+You are an AI suggestion generator for an AI powered search engine. You will be given a conversation below. You need to generate 4-5 suggestions based on the conversation. The suggestion should be relevant to the conversation that can be used by the user to ask the chat model for more information.
+You need to make sure the suggestions are relevant to the conversation and are helpful to the user. Keep a note that the user might use these suggestions to ask a chat model for more information.
+Make sure the suggestions are medium in length and are informative and relevant to the conversation.
+
+Provide these suggestions separated by newlines between the XML tags and . For example:
+
+
+Tell me more about SpaceX and their recent projects
+What is the latest news on SpaceX?
+Who is the CEO of SpaceX?
+
+
+Conversation:
+{chat_history}
+`;
+
+type SuggestionGeneratorInput = {
+ chat_history: BaseMessage[];
+};
+
+const outputParser = new ListLineOutputParser({
+ key: 'suggestions',
+});
+
+const createSuggestionGeneratorChain = (llm: BaseChatModel) => {
+ return RunnableSequence.from([
+ RunnableMap.from({
+ chat_history: (input: SuggestionGeneratorInput) =>
+ formatChatHistoryAsString(input.chat_history),
+ }),
+ PromptTemplate.fromTemplate(suggestionGeneratorPrompt),
+ llm,
+ outputParser,
+ ]);
+};
+
+const generateSuggestions = (
+ input: SuggestionGeneratorInput,
+ llm: BaseChatModel,
+) => {
+ (llm as unknown as ChatOpenAI).temperature = 0;
+ const suggestionGeneratorChain = createSuggestionGeneratorChain(llm);
+ return suggestionGeneratorChain.invoke(input);
+};
+
+export default generateSuggestions;