Compare commits

...

20 Commits

Author SHA1 Message Date
ItzCrazyKns
567c6a8758 Merge branch 'pr/831' 2025-07-24 22:36:19 +05:30
ItzCrazyKns
81a91da743 feat(gemini): use model instead of modelName 2025-07-23 12:22:26 +05:30
ItzCrazyKns
70a61ee1eb feat(think-box): handle thinkingEnded 2025-07-23 12:21:07 +05:30
ItzCrazyKns
9d89a4413b feat(format-history): remove extra : 2025-07-23 12:20:49 +05:30
ItzCrazyKns
6ea17d54c6 feat(chat-window): fix wrong history while rewriting 2025-07-22 21:21:49 +05:30
ItzCrazyKns
11a828b073 feat(message-box): close think box after thinking process ends 2025-07-22 21:21:09 +05:30
ItzCrazyKns
37022fb11e feat(format-history): update role determination 2025-07-22 21:20:16 +05:30
ItzCrazyKns
dd50d4927b Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-07-22 12:27:11 +05:30
ItzCrazyKns
fdaf3af3af Merge pull request #832 from tuxthepenguin84/patch-2
Fix name of provider in embeddings models error message
2025-07-21 20:56:24 +05:30
Samuel Dockery
3f2a8f862c Fix name of provider in embeddings models error message 2025-07-20 09:20:39 -07:00
Samuel Dockery
58c7be6e95 Update Gemini 2.5 Models 2025-07-20 09:17:20 -07:00
ItzCrazyKns
829b4e7134 feat(custom-openai): use apiKey instead of openAIApiKey 2025-07-19 21:37:34 +05:30
ItzCrazyKns
77870b39cc feat(ollama): use @langchain/ollama library 2025-07-19 21:37:34 +05:30
ItzCrazyKns
8e0ae9b867 feat(providers): switch to apiKey key 2025-07-19 21:37:34 +05:30
ItzCrazyKns
543f1df5ce feat(modules): update langchain packages 2025-07-19 21:37:34 +05:30
ItzCrazyKns
341aae4587 Merge branch 'pr/830' 2025-07-19 21:36:23 +05:30
Willie Zutz
7f62907385 feat(weather): update measurement units to Imperial/Metric 2025-07-19 08:53:11 -06:00
ItzCrazyKns
7c4aa683a2 feat(chains): remove unused imports 2025-07-19 17:57:32 +05:30
ItzCrazyKns
b48b0eeb0e feat(imageSearch): use XML parsing, implement few shot prompting 2025-07-19 17:52:30 +05:30
ItzCrazyKns
cddc793915 feat(videoSearch): use XML parsing, use few shot prompting 2025-07-19 17:52:14 +05:30
24 changed files with 476 additions and 499 deletions

View File

@@ -15,11 +15,12 @@
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/google-genai": "^0.1.12",
"@langchain/openai": "^0.0.25",
"@langchain/anthropic": "^0.3.24",
"@langchain/community": "^0.3.49",
"@langchain/core": "^0.3.66",
"@langchain/google-genai": "^0.2.15",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.6.2",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
@@ -31,7 +32,7 @@
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"jspdf": "^3.0.1",
"langchain": "^0.1.30",
"langchain": "^0.3.30",
"lucide-react": "^0.363.0",
"mammoth": "^1.9.1",
"markdown-to-jsx": "^7.7.2",

View File

@@ -223,7 +223,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -81,8 +81,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
apiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:

View File

@@ -48,7 +48,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -1,7 +1,10 @@
export const POST = async (req: Request) => {
try {
const body: { lat: number; lng: number; temperatureUnit: 'C' | 'F' } =
await req.json();
const body: {
lat: number;
lng: number;
measureUnit: 'Imperial' | 'Metric';
} = await req.json();
if (!body.lat || !body.lng) {
return Response.json(
@@ -13,7 +16,9 @@ export const POST = async (req: Request) => {
}
const res = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto${body.temperatureUnit === 'C' ? '' : '&temperature_unit=fahrenheit'}`,
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto${
body.measureUnit === 'Metric' ? '' : '&temperature_unit=fahrenheit'
}${body.measureUnit === 'Metric' ? '' : '&wind_speed_unit=mph'}`,
);
const data = await res.json();
@@ -35,13 +40,15 @@ export const POST = async (req: Request) => {
windSpeed: number;
icon: string;
temperatureUnit: 'C' | 'F';
windSpeedUnit: 'm/s' | 'mph';
} = {
temperature: data.current.temperature_2m,
condition: '',
humidity: data.current.relative_humidity_2m,
windSpeed: data.current.wind_speed_10m,
icon: '',
temperatureUnit: body.temperatureUnit,
temperatureUnit: body.measureUnit === 'Metric' ? 'C' : 'F',
windSpeedUnit: body.measureUnit === 'Metric' ? 'm/s' : 'mph',
};
const code = data.current.weather_code;

View File

@@ -148,7 +148,9 @@ const Page = () => {
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [systemInstructions, setSystemInstructions] = useState<string>('');
const [temperatureUnit, setTemperatureUnit] = useState<'C' | 'F'>('C');
const [measureUnit, setMeasureUnit] = useState<'Imperial' | 'Metric'>(
'Metric',
);
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
useEffect(() => {
@@ -211,7 +213,9 @@ const Page = () => {
setSystemInstructions(localStorage.getItem('systemInstructions')!);
setTemperatureUnit(localStorage.getItem('temperatureUnit')! as 'C' | 'F');
setMeasureUnit(
localStorage.getItem('measureUnit')! as 'Imperial' | 'Metric',
);
setIsLoading(false);
};
@@ -371,8 +375,8 @@ const Page = () => {
localStorage.setItem('embeddingModel', value);
} else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value);
} else if (key === 'temperatureUnit') {
localStorage.setItem('temperatureUnit', value.toString());
} else if (key === 'measureUnit') {
localStorage.setItem('measureUnit', value.toString());
}
} catch (err) {
console.error('Failed to save:', err);
@@ -430,22 +434,22 @@ const Page = () => {
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Temperature Unit
Measurement Units
</p>
<Select
value={temperatureUnit ?? undefined}
value={measureUnit ?? undefined}
onChange={(e) => {
setTemperatureUnit(e.target.value as 'C' | 'F');
saveConfig('temperatureUnit', e.target.value);
setMeasureUnit(e.target.value as 'Imperial' | 'Metric');
saveConfig('measureUnit', e.target.value);
}}
options={[
{
label: 'Celsius',
value: 'C',
label: 'Metric',
value: 'Metric',
},
{
label: 'Fahrenheit',
value: 'F',
label: 'Imperial',
value: 'Imperial',
},
]}
/>

View File

@@ -354,7 +354,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
}
}, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => {
const sendMessage = async (
message: string,
messageId?: string,
rewrite = false,
) => {
if (loading) return;
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
@@ -482,6 +486,8 @@ const ChatWindow = ({ id }: { id?: string }) => {
}
};
const messageIndex = messages.findIndex((m) => m.messageId === messageId);
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
@@ -498,7 +504,9 @@ const ChatWindow = ({ id }: { id?: string }) => {
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: chatHistory,
history: rewrite
? chatHistory.slice(0, messageIndex === -1 ? undefined : messageIndex)
: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
@@ -552,7 +560,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
});
sendMessage(message.content, message.messageId);
sendMessage(message.content, message.messageId, true);
};
useEffect(() => {

View File

@@ -21,8 +21,16 @@ import SearchVideos from './SearchVideos';
import { useSpeech } from 'react-text-to-speech';
import ThinkBox from './ThinkBox';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
return <ThinkBox content={children as string} />;
const ThinkTagProcessor = ({
children,
thinkingEnded,
}: {
children: React.ReactNode;
thinkingEnded: boolean;
}) => {
return (
<ThinkBox content={children as string} thinkingEnded={thinkingEnded} />
);
};
const MessageBox = ({
@@ -46,6 +54,7 @@ const MessageBox = ({
}) => {
const [parsedMessage, setParsedMessage] = useState(message.content);
const [speechMessage, setSpeechMessage] = useState(message.content);
const [thinkingEnded, setThinkingEnded] = useState(false);
useEffect(() => {
const citationRegex = /\[([^\]]+)\]/g;
@@ -61,6 +70,10 @@ const MessageBox = ({
}
}
if (message.role === 'assistant' && message.content.includes('</think>')) {
setThinkingEnded(true);
}
if (
message.role === 'assistant' &&
message?.sources &&
@@ -88,7 +101,7 @@ const MessageBox = ({
if (url) {
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
} else {
return `[${numStr}]`;
return ``;
}
})
.join('');
@@ -99,6 +112,14 @@ const MessageBox = ({
);
setSpeechMessage(message.content.replace(regex, ''));
return;
} else if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length === 0
) {
setParsedMessage(processedMessage.replace(regex, ''));
setSpeechMessage(message.content.replace(regex, ''));
return;
}
setSpeechMessage(message.content.replace(regex, ''));
@@ -111,6 +132,9 @@ const MessageBox = ({
overrides: {
think: {
component: ThinkTagProcessor,
props: {
thinkingEnded: thinkingEnded,
},
},
},
};

View File

@@ -1,15 +1,23 @@
'use client';
import { useState } from 'react';
import { cn } from '@/lib/utils';
import { useEffect, useState } from 'react';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps {
content: string;
thinkingEnded: boolean;
}
const ThinkBox = ({ content }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false);
const ThinkBox = ({ content, thinkingEnded }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(true);
useEffect(() => {
if (thinkingEnded) {
setIsExpanded(false);
} else {
setIsExpanded(true);
}
}, [thinkingEnded]);
return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">

View File

@@ -10,6 +10,7 @@ const WeatherWidget = () => {
windSpeed: 0,
icon: '',
temperatureUnit: 'C',
windSpeedUnit: 'm/s',
});
const [loading, setLoading] = useState(true);
@@ -75,7 +76,7 @@ const WeatherWidget = () => {
body: JSON.stringify({
lat: location.latitude,
lng: location.longitude,
temperatureUnit: localStorage.getItem('temperatureUnit') ?? 'C',
measureUnit: localStorage.getItem('measureUnit') ?? 'Metric',
}),
});
@@ -95,6 +96,7 @@ const WeatherWidget = () => {
windSpeed: data.windSpeed,
icon: data.icon,
temperatureUnit: data.temperatureUnit,
windSpeedUnit: data.windSpeedUnit,
});
setLoading(false);
});
@@ -139,7 +141,7 @@ const WeatherWidget = () => {
</span>
<span className="flex items-center text-xs text-black/60 dark:text-white/60">
<Wind className="w-3 h-3 mr-1" />
{data.windSpeed} km/h
{data.windSpeed} {data.windSpeedUnit}
</span>
</div>
<span className="text-xs text-black/60 dark:text-white/60 mt-1">

View File

@@ -3,32 +3,18 @@ import {
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type ImageSearchChainInput = {
@@ -54,12 +40,39 @@ const createImageSearchChain = (llm: BaseChatModel) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
ChatPromptTemplate.fromMessages([
['system', imageSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
],
['assistant', '<query>A cat</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
],
['assistant', '<query>Car working</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});

View File

@@ -3,33 +3,19 @@ import {
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
const videoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
@@ -55,12 +41,37 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
ChatPromptTemplate.fromMessages([
['system', videoSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
],
['assistant', '<query>How does a car work?</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
],
['assistant', '<query>Theory of relativity</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['youtube'],
});
@@ -92,8 +103,8 @@ const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
const videoSearchChain = createVideoSearchChain(llm);
return videoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@@ -38,7 +38,7 @@ export const loadAimlApiChatModels = async () => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: apiKey,
apiKey: apiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
@@ -76,7 +76,7 @@ export const loadAimlApiEmbeddingModels = async () => {
embeddingModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: apiKey,
apiKey: apiKey,
modelName: model.id,
configuration: {
baseURL: API_URL,

View File

@@ -31,7 +31,7 @@ export const loadDeepseekChatModels = async () => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
apiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {

View File

@@ -14,16 +14,12 @@ import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Flash Preview 05-20',
key: 'gemini-2.5-flash-preview-05-20',
displayName: 'Gemini 2.5 Flash',
key: 'gemini-2.5-flash',
},
{
displayName: 'Gemini 2.5 Pro Preview',
key: 'gemini-2.5-pro-preview-05-06',
},
{
displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-preview-05-06',
displayName: 'Gemini 2.5 Pro',
key: 'gemini-2.5-pro',
},
{
displayName: 'Gemini 2.0 Flash',
@@ -75,7 +71,7 @@ export const loadGeminiChatModels = async () => {
displayName: model.displayName,
model: new ChatGoogleGenerativeAI({
apiKey: geminiApiKey,
modelName: model.key,
model: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
};
@@ -108,7 +104,7 @@ export const loadGeminiEmbeddingModels = async () => {
return embeddingModels;
} catch (err) {
console.error(`Error loading OpenAI embeddings models: ${err}`);
console.error(`Error loading Gemini embeddings models: ${err}`);
return {};
}
};

View File

@@ -29,12 +29,15 @@ export const loadGroqChatModels = async () => {
chatModels[model.id] = {
displayName: model.id,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
apiKey: groqApiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
baseURL: 'https://api.groq.com/openai/v1',
},
metadata: {
'model-type': 'groq',
},
}) as unknown as BaseChatModel,
};
});

View File

@@ -118,7 +118,7 @@ export const getAvailableChatModelProviders = async () => {
[customOpenAiModelName]: {
displayName: customOpenAiModelName,
model: new ChatOpenAI({
openAIApiKey: customOpenAiApiKey,
apiKey: customOpenAiApiKey,
modelName: customOpenAiModelName,
temperature: 0.7,
configuration: {

View File

@@ -47,7 +47,7 @@ export const loadLMStudioChatModels = async () => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
apiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
@@ -83,7 +83,7 @@ export const loadLMStudioEmbeddingsModels = async () => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
apiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},

View File

@@ -6,8 +6,8 @@ export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { ChatOllama } from '@langchain/ollama';
import { OllamaEmbeddings } from '@langchain/ollama';
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();

View File

@@ -67,7 +67,7 @@ export const loadOpenAIChatModels = async () => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: openaiApiKey,
apiKey: openaiApiKey,
modelName: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
@@ -93,7 +93,7 @@ export const loadOpenAIEmbeddingModels = async () => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey: openaiApiKey,
apiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};

View File

@@ -1,8 +1,11 @@
import { BaseMessage } from '@langchain/core/messages';
import { BaseMessage, isAIMessage } from '@langchain/core/messages';
const formatChatHistoryAsString = (history: BaseMessage[]) => {
return history
.map((message) => `${message._getType()}: ${message.content}`)
.map(
(message) =>
`${isAIMessage(message) ? 'AI' : 'User'}: ${message.content}`,
)
.join('\n');
};

680
yarn.lock

File diff suppressed because it is too large Load Diff