mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-08-01 05:18:41 +00:00
Compare commits
22 Commits
v1.11.0-rc
...
81a91da743
Author | SHA1 | Date | |
---|---|---|---|
81a91da743 | |||
70a61ee1eb | |||
9d89a4413b | |||
6ea17d54c6 | |||
11a828b073 | |||
37022fb11e | |||
dd50d4927b | |||
fdaf3af3af | |||
3f2a8f862c | |||
829b4e7134 | |||
77870b39cc | |||
8e0ae9b867 | |||
543f1df5ce | |||
341aae4587 | |||
7f62907385 | |||
7c4aa683a2 | |||
b48b0eeb0e | |||
cddc793915 | |||
94e6db10bb | |||
26e1d5fec3 | |||
66be87b688 | |||
f7b4e32218 |
13
package.json
13
package.json
@ -15,11 +15,12 @@
|
||||
"@headlessui/react": "^2.2.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"@icons-pack/react-simple-icons": "^12.3.0",
|
||||
"@langchain/anthropic": "^0.3.15",
|
||||
"@langchain/community": "^0.3.36",
|
||||
"@langchain/core": "^0.3.42",
|
||||
"@langchain/google-genai": "^0.1.12",
|
||||
"@langchain/openai": "^0.0.25",
|
||||
"@langchain/anthropic": "^0.3.24",
|
||||
"@langchain/community": "^0.3.49",
|
||||
"@langchain/core": "^0.3.66",
|
||||
"@langchain/google-genai": "^0.2.15",
|
||||
"@langchain/ollama": "^0.2.3",
|
||||
"@langchain/openai": "^0.6.2",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@tailwindcss/typography": "^0.5.12",
|
||||
"@xenova/transformers": "^2.17.2",
|
||||
@ -31,7 +32,7 @@
|
||||
"drizzle-orm": "^0.40.1",
|
||||
"html-to-text": "^9.0.5",
|
||||
"jspdf": "^3.0.1",
|
||||
"langchain": "^0.1.30",
|
||||
"langchain": "^0.3.30",
|
||||
"lucide-react": "^0.363.0",
|
||||
"mammoth": "^1.9.1",
|
||||
"markdown-to-jsx": "^7.7.2",
|
||||
|
@ -223,7 +223,7 @@ export const POST = async (req: Request) => {
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
apiKey: getCustomOpenaiApiKey(),
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
|
@ -36,6 +36,7 @@ export const GET = async (req: Request) => {
|
||||
{
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
language: 'en',
|
||||
},
|
||||
)
|
||||
).results;
|
||||
@ -49,7 +50,11 @@ export const GET = async (req: Request) => {
|
||||
data = (
|
||||
await searchSearxng(
|
||||
`site:${articleWebsites[Math.floor(Math.random() * articleWebsites.length)]} ${topics[Math.floor(Math.random() * topics.length)]}`,
|
||||
{ engines: ['bing news'], pageno: 1 },
|
||||
{
|
||||
engines: ['bing news'],
|
||||
pageno: 1,
|
||||
language: 'en',
|
||||
},
|
||||
)
|
||||
).results;
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
apiKey: getCustomOpenaiApiKey(),
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
|
@ -81,8 +81,7 @@ export const POST = async (req: Request) => {
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
|
||||
openAIApiKey:
|
||||
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
||||
apiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL:
|
||||
|
@ -48,7 +48,7 @@ export const POST = async (req: Request) => {
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
apiKey: getCustomOpenaiApiKey(),
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
|
@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
|
||||
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
openAIApiKey: getCustomOpenaiApiKey(),
|
||||
apiKey: getCustomOpenaiApiKey(),
|
||||
modelName: getCustomOpenaiModelName(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
|
@ -1,6 +1,10 @@
|
||||
export const POST = async (req: Request) => {
|
||||
try {
|
||||
const body: { lat: number; lng: number } = await req.json();
|
||||
const body: {
|
||||
lat: number;
|
||||
lng: number;
|
||||
measureUnit: 'Imperial' | 'Metric';
|
||||
} = await req.json();
|
||||
|
||||
if (!body.lat || !body.lng) {
|
||||
return Response.json(
|
||||
@ -12,7 +16,9 @@ export const POST = async (req: Request) => {
|
||||
}
|
||||
|
||||
const res = await fetch(
|
||||
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}¤t=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto`,
|
||||
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}¤t=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto${
|
||||
body.measureUnit === 'Metric' ? '' : '&temperature_unit=fahrenheit'
|
||||
}${body.measureUnit === 'Metric' ? '' : '&wind_speed_unit=mph'}`,
|
||||
);
|
||||
|
||||
const data = await res.json();
|
||||
@ -33,12 +39,16 @@ export const POST = async (req: Request) => {
|
||||
humidity: number;
|
||||
windSpeed: number;
|
||||
icon: string;
|
||||
temperatureUnit: 'C' | 'F';
|
||||
windSpeedUnit: 'm/s' | 'mph';
|
||||
} = {
|
||||
temperature: data.current.temperature_2m,
|
||||
condition: '',
|
||||
humidity: data.current.relative_humidity_2m,
|
||||
windSpeed: data.current.wind_speed_10m,
|
||||
icon: '',
|
||||
temperatureUnit: body.measureUnit === 'Metric' ? 'C' : 'F',
|
||||
windSpeedUnit: body.measureUnit === 'Metric' ? 'm/s' : 'mph',
|
||||
};
|
||||
|
||||
const code = data.current.weather_code;
|
||||
|
@ -148,6 +148,9 @@ const Page = () => {
|
||||
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
|
||||
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
|
||||
const [systemInstructions, setSystemInstructions] = useState<string>('');
|
||||
const [measureUnit, setMeasureUnit] = useState<'Imperial' | 'Metric'>(
|
||||
'Metric',
|
||||
);
|
||||
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
|
||||
|
||||
useEffect(() => {
|
||||
@ -210,6 +213,10 @@ const Page = () => {
|
||||
|
||||
setSystemInstructions(localStorage.getItem('systemInstructions')!);
|
||||
|
||||
setMeasureUnit(
|
||||
localStorage.getItem('measureUnit')! as 'Imperial' | 'Metric',
|
||||
);
|
||||
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
||||
@ -368,6 +375,8 @@ const Page = () => {
|
||||
localStorage.setItem('embeddingModel', value);
|
||||
} else if (key === 'systemInstructions') {
|
||||
localStorage.setItem('systemInstructions', value);
|
||||
} else if (key === 'measureUnit') {
|
||||
localStorage.setItem('measureUnit', value.toString());
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to save:', err);
|
||||
@ -416,13 +425,35 @@ const Page = () => {
|
||||
) : (
|
||||
config && (
|
||||
<div className="flex flex-col space-y-6 pb-28 lg:pb-8">
|
||||
<SettingsSection title="Appearance">
|
||||
<SettingsSection title="Preferences">
|
||||
<div className="flex flex-col space-y-1">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
Theme
|
||||
</p>
|
||||
<ThemeSwitcher />
|
||||
</div>
|
||||
<div className="flex flex-col space-y-1">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
Measurement Units
|
||||
</p>
|
||||
<Select
|
||||
value={measureUnit ?? undefined}
|
||||
onChange={(e) => {
|
||||
setMeasureUnit(e.target.value as 'Imperial' | 'Metric');
|
||||
saveConfig('measureUnit', e.target.value);
|
||||
}}
|
||||
options={[
|
||||
{
|
||||
label: 'Metric',
|
||||
value: 'Metric',
|
||||
},
|
||||
{
|
||||
label: 'Imperial',
|
||||
value: 'Imperial',
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</div>
|
||||
</SettingsSection>
|
||||
|
||||
<SettingsSection title="Automatic Search">
|
||||
@ -516,7 +547,7 @@ const Page = () => {
|
||||
<SettingsSection title="System Instructions">
|
||||
<div className="flex flex-col space-y-4">
|
||||
<Textarea
|
||||
value={systemInstructions}
|
||||
value={systemInstructions ?? undefined}
|
||||
isSaving={savingStates['systemInstructions']}
|
||||
onChange={(e) => {
|
||||
setSystemInstructions(e.target.value);
|
||||
|
@ -354,7 +354,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
}
|
||||
}, [isMessagesLoaded, isConfigReady]);
|
||||
|
||||
const sendMessage = async (message: string, messageId?: string) => {
|
||||
const sendMessage = async (
|
||||
message: string,
|
||||
messageId?: string,
|
||||
rewrite = false,
|
||||
) => {
|
||||
if (loading) return;
|
||||
if (!isConfigReady) {
|
||||
toast.error('Cannot send message before the configuration is ready');
|
||||
@ -482,6 +486,8 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
}
|
||||
};
|
||||
|
||||
const messageIndex = messages.findIndex((m) => m.messageId === messageId);
|
||||
|
||||
const res = await fetch('/api/chat', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@ -498,7 +504,9 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
files: fileIds,
|
||||
focusMode: focusMode,
|
||||
optimizationMode: optimizationMode,
|
||||
history: chatHistory,
|
||||
history: rewrite
|
||||
? chatHistory.slice(0, messageIndex === -1 ? undefined : messageIndex)
|
||||
: chatHistory,
|
||||
chatModel: {
|
||||
name: chatModelProvider.name,
|
||||
provider: chatModelProvider.provider,
|
||||
@ -552,7 +560,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
|
||||
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
|
||||
});
|
||||
|
||||
sendMessage(message.content, message.messageId);
|
||||
sendMessage(message.content, message.messageId, true);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
|
@ -21,8 +21,16 @@ import SearchVideos from './SearchVideos';
|
||||
import { useSpeech } from 'react-text-to-speech';
|
||||
import ThinkBox from './ThinkBox';
|
||||
|
||||
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
|
||||
return <ThinkBox content={children as string} />;
|
||||
const ThinkTagProcessor = ({
|
||||
children,
|
||||
thinkingEnded,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
thinkingEnded: boolean;
|
||||
}) => {
|
||||
return (
|
||||
<ThinkBox content={children as string} thinkingEnded={thinkingEnded} />
|
||||
);
|
||||
};
|
||||
|
||||
const MessageBox = ({
|
||||
@ -46,6 +54,7 @@ const MessageBox = ({
|
||||
}) => {
|
||||
const [parsedMessage, setParsedMessage] = useState(message.content);
|
||||
const [speechMessage, setSpeechMessage] = useState(message.content);
|
||||
const [thinkingEnded, setThinkingEnded] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
const citationRegex = /\[([^\]]+)\]/g;
|
||||
@ -61,6 +70,10 @@ const MessageBox = ({
|
||||
}
|
||||
}
|
||||
|
||||
if (message.role === 'assistant' && message.content.includes('</think>')) {
|
||||
setThinkingEnded(true);
|
||||
}
|
||||
|
||||
if (
|
||||
message.role === 'assistant' &&
|
||||
message?.sources &&
|
||||
@ -88,7 +101,7 @@ const MessageBox = ({
|
||||
if (url) {
|
||||
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
|
||||
} else {
|
||||
return `[${numStr}]`;
|
||||
return ``;
|
||||
}
|
||||
})
|
||||
.join('');
|
||||
@ -99,6 +112,14 @@ const MessageBox = ({
|
||||
);
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
return;
|
||||
} else if (
|
||||
message.role === 'assistant' &&
|
||||
message?.sources &&
|
||||
message.sources.length === 0
|
||||
) {
|
||||
setParsedMessage(processedMessage.replace(regex, ''));
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
return;
|
||||
}
|
||||
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
@ -111,6 +132,9 @@ const MessageBox = ({
|
||||
overrides: {
|
||||
think: {
|
||||
component: ThinkTagProcessor,
|
||||
props: {
|
||||
thinkingEnded: thinkingEnded,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
@ -1,15 +1,23 @@
|
||||
'use client';
|
||||
|
||||
import { useState } from 'react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
|
||||
|
||||
interface ThinkBoxProps {
|
||||
content: string;
|
||||
thinkingEnded: boolean;
|
||||
}
|
||||
|
||||
const ThinkBox = ({ content }: ThinkBoxProps) => {
|
||||
const [isExpanded, setIsExpanded] = useState(false);
|
||||
const ThinkBox = ({ content, thinkingEnded }: ThinkBoxProps) => {
|
||||
const [isExpanded, setIsExpanded] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
if (thinkingEnded) {
|
||||
setIsExpanded(false);
|
||||
} else {
|
||||
setIsExpanded(true);
|
||||
}
|
||||
}, [thinkingEnded]);
|
||||
|
||||
return (
|
||||
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">
|
||||
|
@ -9,7 +9,10 @@ const WeatherWidget = () => {
|
||||
humidity: 0,
|
||||
windSpeed: 0,
|
||||
icon: '',
|
||||
temperatureUnit: 'C',
|
||||
windSpeedUnit: 'm/s',
|
||||
});
|
||||
|
||||
const [loading, setLoading] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
@ -73,6 +76,7 @@ const WeatherWidget = () => {
|
||||
body: JSON.stringify({
|
||||
lat: location.latitude,
|
||||
lng: location.longitude,
|
||||
measureUnit: localStorage.getItem('measureUnit') ?? 'Metric',
|
||||
}),
|
||||
});
|
||||
|
||||
@ -91,6 +95,8 @@ const WeatherWidget = () => {
|
||||
humidity: data.humidity,
|
||||
windSpeed: data.windSpeed,
|
||||
icon: data.icon,
|
||||
temperatureUnit: data.temperatureUnit,
|
||||
windSpeedUnit: data.windSpeedUnit,
|
||||
});
|
||||
setLoading(false);
|
||||
});
|
||||
@ -125,7 +131,7 @@ const WeatherWidget = () => {
|
||||
className="h-10 w-auto"
|
||||
/>
|
||||
<span className="text-base font-semibold text-black dark:text-white">
|
||||
{data.temperature}°C
|
||||
{data.temperature}°{data.temperatureUnit}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex flex-col justify-between flex-1 h-full py-1">
|
||||
@ -135,7 +141,7 @@ const WeatherWidget = () => {
|
||||
</span>
|
||||
<span className="flex items-center text-xs text-black/60 dark:text-white/60">
|
||||
<Wind className="w-3 h-3 mr-1" />
|
||||
{data.windSpeed} km/h
|
||||
{data.windSpeed} {data.windSpeedUnit}
|
||||
</span>
|
||||
</div>
|
||||
<span className="text-xs text-black/60 dark:text-white/60 mt-1">
|
||||
|
@ -3,32 +3,18 @@ import {
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../searxng';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import LineOutputParser from '../outputParsers/lineOutputParser';
|
||||
|
||||
const imageSearchChainPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
|
||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||
|
||||
Example:
|
||||
1. Follow up question: What is a cat?
|
||||
Rephrased: A cat
|
||||
|
||||
2. Follow up question: What is a car? How does it works?
|
||||
Rephrased: Car working
|
||||
|
||||
3. Follow up question: How does an AC work?
|
||||
Rephrased: AC working
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
|
||||
`;
|
||||
|
||||
type ImageSearchChainInput = {
|
||||
@ -54,12 +40,39 @@ const createImageSearchChain = (llm: BaseChatModel) => {
|
||||
return input.query;
|
||||
},
|
||||
}),
|
||||
PromptTemplate.fromTemplate(imageSearchChainPrompt),
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', imageSearchChainPrompt],
|
||||
[
|
||||
'user',
|
||||
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
|
||||
],
|
||||
['assistant', '<query>A cat</query>'],
|
||||
|
||||
[
|
||||
'user',
|
||||
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
|
||||
],
|
||||
['assistant', '<query>Car working</query>'],
|
||||
[
|
||||
'user',
|
||||
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
|
||||
],
|
||||
['assistant', '<query>AC working</query>'],
|
||||
[
|
||||
'user',
|
||||
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
|
||||
],
|
||||
]),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
input = input.replace(/<think>.*?<\/think>/g, '');
|
||||
const queryParser = new LineOutputParser({
|
||||
key: 'query',
|
||||
});
|
||||
|
||||
return await queryParser.parse(input);
|
||||
}),
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
const res = await searchSearxng(input, {
|
||||
engines: ['bing images', 'google images'],
|
||||
});
|
||||
|
@ -3,33 +3,19 @@ import {
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../searxng';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import LineOutputParser from '../outputParsers/lineOutputParser';
|
||||
|
||||
const VideoSearchChainPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
|
||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||
|
||||
Example:
|
||||
1. Follow up question: How does a car work?
|
||||
Rephrased: How does a car work?
|
||||
|
||||
2. Follow up question: What is the theory of relativity?
|
||||
Rephrased: What is theory of relativity
|
||||
|
||||
3. Follow up question: How does an AC work?
|
||||
Rephrased: How does an AC work
|
||||
|
||||
Conversation:
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
const videoSearchChainPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
|
||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
|
||||
`;
|
||||
|
||||
type VideoSearchChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
@ -55,12 +41,37 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
|
||||
return input.query;
|
||||
},
|
||||
}),
|
||||
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
|
||||
ChatPromptTemplate.fromMessages([
|
||||
['system', videoSearchChainPrompt],
|
||||
[
|
||||
'user',
|
||||
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
|
||||
],
|
||||
['assistant', '<query>How does a car work?</query>'],
|
||||
[
|
||||
'user',
|
||||
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
|
||||
],
|
||||
['assistant', '<query>Theory of relativity</query>'],
|
||||
[
|
||||
'user',
|
||||
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
|
||||
],
|
||||
['assistant', '<query>AC working</query>'],
|
||||
[
|
||||
'user',
|
||||
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
|
||||
],
|
||||
]),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
input = input.replace(/<think>.*?<\/think>/g, '');
|
||||
|
||||
const queryParser = new LineOutputParser({
|
||||
key: 'query',
|
||||
});
|
||||
return await queryParser.parse(input);
|
||||
}),
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
const res = await searchSearxng(input, {
|
||||
engines: ['youtube'],
|
||||
});
|
||||
@ -92,8 +103,8 @@ const handleVideoSearch = (
|
||||
input: VideoSearchChainInput,
|
||||
llm: BaseChatModel,
|
||||
) => {
|
||||
const VideoSearchChain = createVideoSearchChain(llm);
|
||||
return VideoSearchChain.invoke(input);
|
||||
const videoSearchChain = createVideoSearchChain(llm);
|
||||
return videoSearchChain.invoke(input);
|
||||
};
|
||||
|
||||
export default handleVideoSearch;
|
||||
|
@ -38,7 +38,7 @@ export const loadAimlApiChatModels = async () => {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: apiKey,
|
||||
apiKey: apiKey,
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
@ -76,7 +76,7 @@ export const loadAimlApiEmbeddingModels = async () => {
|
||||
embeddingModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: apiKey,
|
||||
apiKey: apiKey,
|
||||
modelName: model.id,
|
||||
configuration: {
|
||||
baseURL: API_URL,
|
||||
|
@ -31,7 +31,7 @@ export const loadDeepseekChatModels = async () => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: deepseekApiKey,
|
||||
apiKey: deepseekApiKey,
|
||||
modelName: model.key,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
|
@ -75,7 +75,7 @@ export const loadGeminiChatModels = async () => {
|
||||
displayName: model.displayName,
|
||||
model: new ChatGoogleGenerativeAI({
|
||||
apiKey: geminiApiKey,
|
||||
modelName: model.key,
|
||||
model: model.key,
|
||||
temperature: 0.7,
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
@ -108,7 +108,7 @@ export const loadGeminiEmbeddingModels = async () => {
|
||||
|
||||
return embeddingModels;
|
||||
} catch (err) {
|
||||
console.error(`Error loading OpenAI embeddings models: ${err}`);
|
||||
console.error(`Error loading Gemini embeddings models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -29,12 +29,15 @@ export const loadGroqChatModels = async () => {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.id,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: groqApiKey,
|
||||
apiKey: groqApiKey,
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
},
|
||||
metadata: {
|
||||
'model-type': 'groq',
|
||||
},
|
||||
}) as unknown as BaseChatModel,
|
||||
};
|
||||
});
|
||||
|
@ -118,7 +118,7 @@ export const getAvailableChatModelProviders = async () => {
|
||||
[customOpenAiModelName]: {
|
||||
displayName: customOpenAiModelName,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: customOpenAiApiKey,
|
||||
apiKey: customOpenAiApiKey,
|
||||
modelName: customOpenAiModelName,
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
|
@ -47,7 +47,7 @@ export const loadLMStudioChatModels = async () => {
|
||||
chatModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: 'lm-studio',
|
||||
apiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
@ -83,7 +83,7 @@ export const loadLMStudioEmbeddingsModels = async () => {
|
||||
embeddingsModels[model.id] = {
|
||||
displayName: model.name || model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: 'lm-studio',
|
||||
apiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: ensureV1Endpoint(endpoint),
|
||||
},
|
||||
|
@ -6,8 +6,8 @@ export const PROVIDER_INFO = {
|
||||
key: 'ollama',
|
||||
displayName: 'Ollama',
|
||||
};
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { ChatOllama } from '@langchain/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/ollama';
|
||||
|
||||
export const loadOllamaChatModels = async () => {
|
||||
const ollamaApiEndpoint = getOllamaApiEndpoint();
|
||||
|
@ -67,7 +67,7 @@ export const loadOpenAIChatModels = async () => {
|
||||
chatModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: openaiApiKey,
|
||||
apiKey: openaiApiKey,
|
||||
modelName: model.key,
|
||||
temperature: 0.7,
|
||||
}) as unknown as BaseChatModel,
|
||||
@ -93,7 +93,7 @@ export const loadOpenAIEmbeddingModels = async () => {
|
||||
embeddingModels[model.key] = {
|
||||
displayName: model.displayName,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: openaiApiKey,
|
||||
apiKey: openaiApiKey,
|
||||
modelName: model.key,
|
||||
}) as unknown as Embeddings,
|
||||
};
|
||||
|
@ -1,8 +1,11 @@
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { BaseMessage, isAIMessage } from '@langchain/core/messages';
|
||||
|
||||
const formatChatHistoryAsString = (history: BaseMessage[]) => {
|
||||
return history
|
||||
.map((message) => `${message._getType()}: ${message.content}`)
|
||||
.map(
|
||||
(message) =>
|
||||
`${isAIMessage(message) ? 'AI' : 'User'}: ${message.content}`,
|
||||
)
|
||||
.join('\n');
|
||||
};
|
||||
|
||||
|
Reference in New Issue
Block a user