Compare commits

..

59 Commits

Author SHA1 Message Date
ItzCrazyKns
b8e4152e77 Merge pull request #857 from skoved/fix-light-mode
make file icon in attachment modal in chat page fit light theme better
2025-09-12 21:16:46 +05:30
ItzCrazyKns
c8ac9279bd Merge pull request #866 from agungbesti/feat/add-openai-models
feat: add new OpenAI models with proper temperature parameter handling
2025-09-12 21:15:09 +05:30
akubesti
6f367c34a8 feat: add gpt-5, gpt-5-mini, o3 models and remove gpt-5-chat-latest
- Add new OpenAI models: gpt-5, gpt-5-mini, and o3 series

- Fix temperature parameter handling for o3 models

- Update models list to ensure compatibility
2025-09-12 22:22:16 +07:00
akubesti
328b12ffbe feat: add new OpenAI models with proper temperature parameter handling
- Add GPT 4.1 series and o1/o3/o4 models with temperature compatibility fixes

- Remove gpt-5/gpt-5-mini models due to organization verification restrictions

- Fix 400 errors for models that only support default temperature values
2025-09-11 16:38:01 +07:00
skoved
d8486e90bb make file icon in attachment modal in chat page fit light theme better
make the file icon in the attachment modal for the chat page an off-white background so that it matches the light theme better and looks the same as the attachment modal on the home page
2025-08-27 09:43:09 -04:00
ItzCrazyKns
238bcaff2b Delete .github/FUNDING.yml 2025-08-27 16:23:39 +05:30
ItzCrazyKns
6f7c55b783 Update FUNDING.yml with Patreon username
Added Patreon username for funding support.
2025-08-27 16:19:53 +05:30
ItzCrazyKns
83a0cffe1b Merge pull request #828 from justinmayer/patch-1
docs: Add instructions for local OpenAI-API-compatible LLMs
2025-08-27 14:38:55 +05:30
ItzCrazyKns
829ae59944 Merge pull request #855 from skoved/fix-light-mode
fix text color for topic names on discover page in light mode
2025-08-27 14:38:13 +05:30
skoved
a546eb18a1 make file icon in attachment modal fit light theme better
make the file icon in the attachment modal black on an off-white background so that it matches the light theme better and looks stylistically equivalent to its look in dark mode
2025-08-26 21:53:51 -04:00
skoved
ff1ca56157 fix text color for topic names on discover page in light mode
make light mode color for topic text black so it is readable
2025-08-26 21:16:21 -04:00
ItzCrazyKns
30725b5d6d feat(attach): remove unused import 2025-08-25 21:48:08 +05:30
ItzCrazyKns
8dc54efbdd feat(chat-route): lint & beautify 2025-08-21 17:48:55 +05:30
ItzCrazyKns
72f26b4370 feat(upload): save files uploaded after chat created 2025-08-21 17:47:49 +05:30
ItzCrazyKns
f680188905 feat(ollama): add ability to provide api key 2025-08-20 20:32:13 +05:30
ItzCrazyKns
0b15bfbe32 feat(app): switch to useChat hook 2025-08-20 20:21:06 +05:30
ItzCrazyKns
8fc7808654 feat(hooks): implement useChat hook 2025-08-20 20:20:50 +05:30
ItzCrazyKns
0dc17286b9 Merge branch 'pr/843' 2025-08-12 21:39:25 +05:30
ItzCrazyKns
3edd7d44dd feat(openai): conditionally set temperature 2025-08-12 21:39:14 +05:30
Samuel Dockery
1132997108 feat: Add support for latest AI models from Anthropic, Google, & OpenAI 2025-08-10 07:50:31 -07:00
ItzCrazyKns
eadbedb713 feat(groq): switch to @langchain/groq for better handling 2025-08-02 17:14:34 +05:30
ItzCrazyKns
37cd6d3ab5 feat(discover): prevent duplicate articles 2025-08-01 20:41:07 +05:30
ItzCrazyKns
88be3a045b feat(discover): randomly sort results 2025-07-29 13:18:36 +05:30
ItzCrazyKns
45b51ab156 feat(discover-api): handle topics 2025-07-29 13:17:07 +05:30
ItzCrazyKns
3bee01cfa7 feat(discover): add topic selection 2025-07-28 20:39:50 +05:30
ItzCrazyKns
567c6a8758 Merge branch 'pr/831' 2025-07-24 22:36:19 +05:30
ItzCrazyKns
81a91da743 feat(gemini): use model instead of modelName 2025-07-23 12:22:26 +05:30
ItzCrazyKns
70a61ee1eb feat(think-box): handle thinkingEnded 2025-07-23 12:21:07 +05:30
ItzCrazyKns
9d89a4413b feat(format-history): remove extra : 2025-07-23 12:20:49 +05:30
ItzCrazyKns
6ea17d54c6 feat(chat-window): fix wrong history while rewriting 2025-07-22 21:21:49 +05:30
ItzCrazyKns
11a828b073 feat(message-box): close think box after thinking process ends 2025-07-22 21:21:09 +05:30
ItzCrazyKns
37022fb11e feat(format-history): update role determination 2025-07-22 21:20:16 +05:30
ItzCrazyKns
dd50d4927b Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-07-22 12:27:11 +05:30
ItzCrazyKns
fdaf3af3af Merge pull request #832 from tuxthepenguin84/patch-2
Fix name of provider in embeddings models error message
2025-07-21 20:56:24 +05:30
Samuel Dockery
3f2a8f862c Fix name of provider in embeddings models error message 2025-07-20 09:20:39 -07:00
Samuel Dockery
58c7be6e95 Update Gemini 2.5 Models 2025-07-20 09:17:20 -07:00
ItzCrazyKns
829b4e7134 feat(custom-openai): use apiKey instead of openAIApiKey 2025-07-19 21:37:34 +05:30
ItzCrazyKns
77870b39cc feat(ollama): use @langchain/ollama library 2025-07-19 21:37:34 +05:30
ItzCrazyKns
8e0ae9b867 feat(providers): switch to apiKey key 2025-07-19 21:37:34 +05:30
ItzCrazyKns
543f1df5ce feat(modules): update langchain packages 2025-07-19 21:37:34 +05:30
ItzCrazyKns
341aae4587 Merge branch 'pr/830' 2025-07-19 21:36:23 +05:30
Willie Zutz
7f62907385 feat(weather): update measurement units to Imperial/Metric 2025-07-19 08:53:11 -06:00
ItzCrazyKns
7c4aa683a2 feat(chains): remove unused imports 2025-07-19 17:57:32 +05:30
ItzCrazyKns
b48b0eeb0e feat(imageSearch): use XML parsing, implement few shot prompting 2025-07-19 17:52:30 +05:30
ItzCrazyKns
cddc793915 feat(videoSearch): use XML parsing, use few shot prompting 2025-07-19 17:52:14 +05:30
ItzCrazyKns
94e6db10bb feat(weather): add other measurement units, closes #821 #790 2025-07-18 21:09:32 +05:30
Justin Mayer
65fc881356 docs: Add instructions for local OpenAI-API-compatible LLMs
Perplexica supports local OpenAI-API-compatible LLMs via the `[MODELS.CUSTOM_OPENAI]` header in `config.toml` but was missing documentation for configuring the necessary settings. This makes that support more explicit and visible, as well as helping end users set the required configuration values appropriately.
2025-07-17 19:10:12 +02:00
ItzCrazyKns
26e1d5fec3 feat(routes): lint & beautify 2025-07-17 22:23:11 +05:30
ItzCrazyKns
66be87b688 Merge branch 'pr/827' 2025-07-17 22:22:50 +05:30
amoshydra
f7b4e32218 fix(discover): provide language when fetching
some engines provide empty response when no language is provided.

fix #618
2025-07-17 02:14:49 +08:00
ItzCrazyKns
57407112fb feat(package): bump version 2025-07-16 10:39:50 +05:30
ItzCrazyKns
b280cc2e01 Merge pull request #787 from chriswritescode-dev/IOS
Fix: IOS Input Zoom / Support PWA Home Screen App, closes #458
2025-07-15 22:10:01 +05:30
ItzCrazyKns
e6ebf892c5 feat(styles): update globals.css 2025-07-15 21:47:20 +05:30
ItzCrazyKns
b754641058 feat(gitignore): add certificates 2025-07-15 21:45:44 +05:30
ItzCrazyKns
722f4f760e feat(manifest): update icons & screenshots 2025-07-15 21:45:37 +05:30
ItzCrazyKns
01e04a209f feat(public): add screenshots & update icons 2025-07-15 21:45:24 +05:30
ItzCrazyKns
0299fd1ea0 Merge pull request #817 from kittrydge/patch-1
Update Linux ollama instructions in README.md
2025-07-15 20:23:02 +05:30
kittrydge
ccd89d48d9 Update Linux ollama instructions in README.md
When setting the OLLAMA_HOST environment variable, the port number must be specified ( see https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux )

Also, 'systemctl daemon-reload' needs to be called after changing a systemd unit file, and before the relevant systemd service is reloaded.
2025-07-01 18:00:26 -06:00
Chris Scott
68c43ea372 Fix: IOS Input Zoom
config for theme consistency and iOS standalone mode
- Modified manifest.ts to ensure proper metadata

- Added display: standalone for iOS PWA behavior
2025-06-02 21:52:41 -04:00
53 changed files with 1584 additions and 1364 deletions

0
.assets/manifest.json Normal file
View File

2
.gitignore vendored
View File

@@ -37,3 +37,5 @@ Thumbs.db
# Db
db.sqlite
/searxng
certificates

View File

@@ -53,7 +53,7 @@ Want to know more about its architecture and how it works? You can read it [here
## Features
- **Local LLMs**: You can make use local LLMs such as Llama3 and Mixtral using Ollama.
- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, and Mistral.
- **Two Main Modes:**
- **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page.
- **Normal Mode:** Processes your query and performs a web search.
@@ -87,6 +87,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
@@ -120,7 +121,17 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
### Ollama Connection Errors
### Troubleshooting
#### Local OpenAI-API-Compliant Servers
If Perplexica tells you that you haven't configured any chat model providers, ensure that:
1. Your server is running on `0.0.0.0` (not `127.0.0.1`) and on the same port you put in the API URL.
2. You have specified the correct model name loaded by your local LLM server.
3. You have specified the correct API key, or if one is not defined, you have put *something* in the API key field and not left it empty.
#### Ollama Connection Errors
If you're encountering an Ollama connection error, it is likely due to the backend being unable to connect to Ollama's API. To fix this issue you can:
@@ -135,7 +146,7 @@ If you're encountering an Ollama connection error, it is likely due to the backe
3. **Linux Users - Expose Ollama to Network:**
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0"`. Then restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0:11434"`. (Change the port number if you are using a different one.) Then reload the systemd manager configuration with `systemctl daemon-reload`, and restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Ensure that the port (default is 11434) is not blocked by your firewall.

View File

@@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.11.0-rc1",
"version": "1.11.0-rc2",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@@ -15,11 +15,13 @@
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/google-genai": "^0.1.12",
"@langchain/openai": "^0.0.25",
"@langchain/anthropic": "^0.3.24",
"@langchain/community": "^0.3.49",
"@langchain/core": "^0.3.66",
"@langchain/google-genai": "^0.2.15",
"@langchain/groq": "^0.2.3",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.6.2",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
@@ -31,7 +33,7 @@
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"jspdf": "^3.0.1",
"langchain": "^0.1.30",
"langchain": "^0.3.30",
"lucide-react": "^0.363.0",
"mammoth": "^1.9.1",
"markdown-to-jsx": "^7.7.2",

BIN
public/icon-100.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 916 B

BIN
public/icon-50.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 515 B

BIN
public/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

BIN
public/screenshots/p1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

BIN
public/screenshots/p2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

View File

@@ -1,11 +1,7 @@
import prompts from '@/lib/prompts';
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream';
import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
@@ -138,6 +134,8 @@ const handleHistorySave = async (
where: eq(chats.id, message.chatId),
});
const fileData = files.map(getFileDetails);
if (!chat) {
await db
.insert(chats)
@@ -146,9 +144,15 @@ const handleHistorySave = async (
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
files: fileData,
})
.execute();
} else if (JSON.stringify(chat.files ?? []) != JSON.stringify(fileData)) {
db.update(chats)
.set({
files: files.map(getFileDetails),
})
.where(eq(chats.id, message.chatId));
}
const messageExists = await db.query.messages.findFirst({
@@ -223,7 +227,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -11,6 +11,7 @@ import {
getAimlApiKey,
getLMStudioApiEndpoint,
updateConfig,
getOllamaApiKey,
} from '@/lib/config';
import {
getAvailableChatModelProviders,
@@ -53,6 +54,7 @@ export const GET = async (req: Request) => {
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['ollamaApiKey'] = getOllamaApiKey();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
@@ -93,6 +95,7 @@ export const POST = async (req: Request) => {
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
API_KEY: config.ollamaApiKey,
},
DEEPSEEK: {
API_KEY: config.deepseekApiKey,

View File

@@ -1,55 +1,77 @@
import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const websitesForTopic = {
tech: {
query: ['technology news', 'latest tech', 'AI', 'science and innovation'],
links: ['techcrunch.com', 'wired.com', 'theverge.com'],
},
finance: {
query: ['finance news', 'economy', 'stock market', 'investing'],
links: ['bloomberg.com', 'cnbc.com', 'marketwatch.com'],
},
art: {
query: ['art news', 'culture', 'modern art', 'cultural events'],
links: ['artnews.com', 'hyperallergic.com', 'theartnewspaper.com'],
},
sports: {
query: ['sports news', 'latest sports', 'cricket football tennis'],
links: ['espn.com', 'bbc.com/sport', 'skysports.com'],
},
entertainment: {
query: ['entertainment news', 'movies', 'TV shows', 'celebrities'],
links: ['hollywoodreporter.com', 'variety.com', 'deadline.com'],
},
};
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
type Topic = keyof typeof websitesForTopic;
export const GET = async (req: Request) => {
try {
const params = new URL(req.url).searchParams;
const mode: 'normal' | 'preview' =
(params.get('mode') as 'normal' | 'preview') || 'normal';
const topic: Topic = (params.get('topic') as Topic) || 'tech';
const selectedTopic = websitesForTopic[topic];
let data = [];
if (mode === 'normal') {
const seenUrls = new Set();
data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
await Promise.all(
selectedTopic.links.flatMap((link) =>
selectedTopic.query.map(async (query) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
await searchSearxng(`site:${link} ${query}`, {
engines: ['bing news'],
pageno: 1,
language: 'en',
})
).results;
}),
])
),
)
)
.map((result) => result)
.flat()
.filter((item) => {
const url = item.url?.toLowerCase().trim();
if (seenUrls.has(url)) return false;
seenUrls.add(url);
return true;
})
.sort(() => Math.random() - 0.5);
} else {
data = (
await searchSearxng(
`site:${articleWebsites[Math.floor(Math.random() * articleWebsites.length)]} ${topics[Math.floor(Math.random() * topics.length)]}`,
{ engines: ['bing news'], pageno: 1 },
`site:${selectedTopic.links[Math.floor(Math.random() * selectedTopic.links.length)]} ${selectedTopic.query[Math.floor(Math.random() * selectedTopic.query.length)]}`,
{
engines: ['bing news'],
pageno: 1,
language: 'en',
},
)
).results;
}

View File

@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -81,8 +81,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
apiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:

View File

@@ -48,7 +48,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -1,6 +1,10 @@
export const POST = async (req: Request) => {
try {
const body: { lat: number; lng: number } = await req.json();
const body: {
lat: number;
lng: number;
measureUnit: 'Imperial' | 'Metric';
} = await req.json();
if (!body.lat || !body.lng) {
return Response.json(
@@ -12,7 +16,9 @@ export const POST = async (req: Request) => {
}
const res = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto`,
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto${
body.measureUnit === 'Metric' ? '' : '&temperature_unit=fahrenheit'
}${body.measureUnit === 'Metric' ? '' : '&wind_speed_unit=mph'}`,
);
const data = await res.json();
@@ -33,12 +39,16 @@ export const POST = async (req: Request) => {
humidity: number;
windSpeed: number;
icon: string;
temperatureUnit: 'C' | 'F';
windSpeedUnit: 'm/s' | 'mph';
} = {
temperature: data.current.temperature_2m,
condition: '',
humidity: data.current.relative_humidity_2m,
windSpeed: data.current.wind_speed_10m,
icon: '',
temperatureUnit: body.measureUnit === 'Metric' ? 'C' : 'F',
windSpeedUnit: body.measureUnit === 'Metric' ? 'm/s' : 'mph',
};
const code = data.current.weather_code;

View File

@@ -1,9 +1,17 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
'use client';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
import ChatWindow from '@/components/ChatWindow';
import { useParams } from 'next/navigation';
import React from 'react';
import { ChatProvider } from '@/lib/hooks/useChat';
const Page = () => {
const { chatId }: { chatId: string } = useParams();
return (
<ChatProvider id={chatId}>
<ChatWindow />
</ChatProvider>
);
};
export default Page;

View File

@@ -4,6 +4,7 @@ import { Search } from 'lucide-react';
import { useEffect, useState } from 'react';
import Link from 'next/link';
import { toast } from 'sonner';
import { cn } from '@/lib/utils';
interface Discover {
title: string;
@@ -12,60 +13,66 @@ interface Discover {
thumbnail: string;
}
const topics: { key: string; display: string }[] = [
{
display: 'Tech & Science',
key: 'tech',
},
{
display: 'Finance',
key: 'finance',
},
{
display: 'Art & Culture',
key: 'art',
},
{
display: 'Sports',
key: 'sports',
},
{
display: 'Entertainment',
key: 'entertainment',
},
];
const Page = () => {
const [discover, setDiscover] = useState<Discover[] | null>(null);
const [loading, setLoading] = useState(true);
const [activeTopic, setActiveTopic] = useState<string>(topics[0].key);
const fetchArticles = async (topic: string) => {
setLoading(true);
try {
const res = await fetch(`/api/discover?topic=${topic}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
if (!res.ok) {
throw new Error(data.message);
}
data.blogs = data.blogs.filter((blog: Discover) => blog.thumbnail);
setDiscover(data.blogs);
} catch (err: any) {
console.error('Error fetching data:', err.message);
toast.error('Error fetching data');
} finally {
setLoading(false);
}
};
useEffect(() => {
const fetchData = async () => {
try {
const res = await fetch(`/api/discover`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
fetchArticles(activeTopic);
}, [activeTopic]);
const data = await res.json();
if (!res.ok) {
throw new Error(data.message);
}
data.blogs = data.blogs.filter((blog: Discover) => blog.thumbnail);
setDiscover(data.blogs);
} catch (err: any) {
console.error('Error fetching data:', err.message);
toast.error('Error fetching data');
} finally {
setLoading(false);
}
};
fetchData();
}, []);
return loading ? (
<div className="flex flex-row items-center justify-center min-h-screen">
<svg
aria-hidden="true"
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
viewBox="0 0 100 101"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
fill="currentColor"
/>
<path
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
fill="currentFill"
/>
</svg>
</div>
) : (
return (
<>
<div>
<div className="flex flex-col pt-4">
@@ -76,35 +83,73 @@ const Page = () => {
<hr className="border-t border-[#2B2C2C] my-4 w-full" />
</div>
<div className="grid lg:grid-cols-3 sm:grid-cols-2 grid-cols-1 gap-4 pb-28 lg:pb-8 w-full justify-items-center lg:justify-items-start">
{discover &&
discover?.map((item, i) => (
<Link
href={`/?q=Summary: ${item.url}`}
key={i}
className="max-w-sm rounded-lg overflow-hidden bg-light-secondary dark:bg-dark-secondary hover:-translate-y-[1px] transition duration-200"
target="_blank"
>
<img
className="object-cover w-full aspect-video"
src={
new URL(item.thumbnail).origin +
new URL(item.thumbnail).pathname +
`?id=${new URL(item.thumbnail).searchParams.get('id')}`
}
alt={item.title}
/>
<div className="px-6 py-4">
<div className="font-bold text-lg mb-2">
{item.title.slice(0, 100)}...
</div>
<p className="text-black-70 dark:text-white/70 text-sm">
{item.content.slice(0, 100)}...
</p>
</div>
</Link>
))}
<div className="flex flex-row items-center space-x-2 overflow-x-auto">
{topics.map((t, i) => (
<div
key={i}
className={cn(
'border-[0.1px] rounded-full text-sm px-3 py-1 text-nowrap transition duration-200 cursor-pointer',
activeTopic === t.key
? 'text-cyan-300 bg-cyan-300/30 border-cyan-300/60'
: 'border-black/30 dark:border-white/30 text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white hover:border-black/40 dark:hover:border-white/40 hover:bg-black/5 dark:hover:bg-white/5',
)}
onClick={() => setActiveTopic(t.key)}
>
<span>{t.display}</span>
</div>
))}
</div>
{loading ? (
<div className="flex flex-row items-center justify-center min-h-screen">
<svg
aria-hidden="true"
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
viewBox="0 0 100 101"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
fill="currentColor"
/>
<path
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
fill="currentFill"
/>
</svg>
</div>
) : (
<div className="grid lg:grid-cols-3 sm:grid-cols-2 grid-cols-1 gap-4 pb-28 pt-5 lg:pb-8 w-full justify-items-center lg:justify-items-start">
{discover &&
discover?.map((item, i) => (
<Link
href={`/?q=Summary: ${item.url}`}
key={i}
className="max-w-sm rounded-lg overflow-hidden bg-light-secondary dark:bg-dark-secondary hover:-translate-y-[1px] transition duration-200"
target="_blank"
>
<img
className="object-cover w-full aspect-video"
src={
new URL(item.thumbnail).origin +
new URL(item.thumbnail).pathname +
`?id=${new URL(item.thumbnail).searchParams.get('id')}`
}
alt={item.title}
/>
<div className="px-6 py-4">
<div className="font-bold text-lg mb-2">
{item.title.slice(0, 100)}...
</div>
<p className="text-black-70 dark:text-white/70 text-sm">
{item.content.slice(0, 100)}...
</p>
</div>
</Link>
))}
</div>
)}
</div>
</>
);

View File

@@ -11,3 +11,11 @@
display: none;
}
}
@media screen and (-webkit-min-device-pixel-ratio: 0) {
select,
textarea,
input {
font-size: 16px !important;
}
}

54
src/app/manifest.ts Normal file
View File

@@ -0,0 +1,54 @@
import type { MetadataRoute } from 'next';
export default function manifest(): MetadataRoute.Manifest {
return {
name: 'Perplexica - Chat with the internet',
short_name: 'Perplexica',
description:
'Perplexica is an AI powered chatbot that is connected to the internet.',
start_url: '/',
display: 'standalone',
background_color: '#0a0a0a',
theme_color: '#0a0a0a',
screenshots: [
{
src: '/screenshots/p1.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p2.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p1_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
{
src: '/screenshots/p2_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
],
icons: [
{
src: '/icon-50.png',
sizes: '50x50',
type: 'image/png' as const,
},
{
src: '/icon-100.png',
sizes: '100x100',
type: 'image/png',
},
{
src: '/icon.png',
sizes: '440x440',
type: 'image/png',
purpose: 'any',
},
],
};
}

View File

@@ -1,4 +1,5 @@
import ChatWindow from '@/components/ChatWindow';
import { ChatProvider } from '@/lib/hooks/useChat';
import { Metadata } from 'next';
import { Suspense } from 'react';
@@ -11,7 +12,9 @@ const Home = () => {
return (
<div>
<Suspense>
<ChatWindow />
<ChatProvider>
<ChatWindow />
</ChatProvider>
</Suspense>
</div>
);

View File

@@ -21,6 +21,7 @@ interface SettingsType {
anthropicApiKey: string;
geminiApiKey: string;
ollamaApiUrl: string;
ollamaApiKey: string;
lmStudioApiUrl: string;
deepseekApiKey: string;
aimlApiKey: string;
@@ -148,6 +149,9 @@ const Page = () => {
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [systemInstructions, setSystemInstructions] = useState<string>('');
const [measureUnit, setMeasureUnit] = useState<'Imperial' | 'Metric'>(
'Metric',
);
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
useEffect(() => {
@@ -210,6 +214,10 @@ const Page = () => {
setSystemInstructions(localStorage.getItem('systemInstructions')!);
setMeasureUnit(
localStorage.getItem('measureUnit')! as 'Imperial' | 'Metric',
);
setIsLoading(false);
};
@@ -368,6 +376,8 @@ const Page = () => {
localStorage.setItem('embeddingModel', value);
} else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value);
} else if (key === 'measureUnit') {
localStorage.setItem('measureUnit', value.toString());
}
} catch (err) {
console.error('Failed to save:', err);
@@ -416,13 +426,35 @@ const Page = () => {
) : (
config && (
<div className="flex flex-col space-y-6 pb-28 lg:pb-8">
<SettingsSection title="Appearance">
<SettingsSection title="Preferences">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Theme
</p>
<ThemeSwitcher />
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Measurement Units
</p>
<Select
value={measureUnit ?? undefined}
onChange={(e) => {
setMeasureUnit(e.target.value as 'Imperial' | 'Metric');
saveConfig('measureUnit', e.target.value);
}}
options={[
{
label: 'Metric',
value: 'Metric',
},
{
label: 'Imperial',
value: 'Imperial',
},
]}
/>
</div>
</SettingsSection>
<SettingsSection title="Automatic Search">
@@ -516,7 +548,7 @@ const Page = () => {
<SettingsSection title="System Instructions">
<div className="flex flex-col space-y-4">
<Textarea
value={systemInstructions}
value={systemInstructions ?? undefined}
isSaving={savingStates['systemInstructions']}
onChange={(e) => {
setSystemInstructions(e.target.value);
@@ -787,6 +819,25 @@ const Page = () => {
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Ollama API Key (Can be left blank)
</p>
<Input
type="text"
placeholder="Ollama API Key"
value={config.ollamaApiKey}
isSaving={savingStates['ollamaApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
ollamaApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('ollamaApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
GROQ API Key

View File

@@ -5,28 +5,11 @@ import MessageInput from './MessageInput';
import { File, Message } from './ChatWindow';
import MessageBox from './MessageBox';
import MessageBoxLoading from './MessageBoxLoading';
import { useChat } from '@/lib/hooks/useChat';
const Chat = () => {
const { messages, loading, messageAppeared } = useChat();
const Chat = ({
loading,
messages,
sendMessage,
messageAppeared,
rewrite,
fileIds,
setFileIds,
files,
setFiles,
}: {
messages: Message[];
sendMessage: (message: string) => void;
loading: boolean;
messageAppeared: boolean;
rewrite: (messageId: string) => void;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
}) => {
const [dividerWidth, setDividerWidth] = useState(0);
const dividerRef = useRef<HTMLDivElement | null>(null);
const messageEnd = useRef<HTMLDivElement | null>(null);
@@ -72,12 +55,8 @@ const Chat = ({
key={i}
message={msg}
messageIndex={i}
history={messages}
loading={loading}
dividerRef={isLast ? dividerRef : undefined}
isLast={isLast}
rewrite={rewrite}
sendMessage={sendMessage}
/>
{!isLast && msg.role === 'assistant' && (
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
@@ -92,14 +71,7 @@ const Chat = ({
className="bottom-24 lg:bottom-10 fixed z-40"
style={{ width: dividerWidth }}
>
<MessageInput
loading={loading}
sendMessage={sendMessage}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<MessageInput />
</div>
)}
</div>

View File

@@ -1,17 +1,13 @@
'use client';
import { useEffect, useRef, useState } from 'react';
import { Document } from '@langchain/core/documents';
import Navbar from './Navbar';
import Chat from './Chat';
import EmptyChat from './EmptyChat';
import crypto from 'crypto';
import { toast } from 'sonner';
import { useSearchParams } from 'next/navigation';
import { getSuggestions } from '@/lib/actions';
import { Settings } from 'lucide-react';
import Link from 'next/link';
import NextError from 'next/error';
import { useChat } from '@/lib/hooks/useChat';
export type Message = {
messageId: string;
@@ -29,539 +25,8 @@ export interface File {
fileId: string;
}
interface ChatModelProvider {
name: string;
provider: string;
}
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => {
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
const chatModelProvidersKeys = Object.keys(chatModelProviders);
if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
return toast.error('No chat models available');
} else {
chatModelProvider =
chatModelProvidersKeys.find(
(provider) =>
Object.keys(chatModelProviders[provider]).length > 0,
) || chatModelProvidersKeys[0];
}
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
(!chatModelProviders[chatModelProvider] ||
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
};
const loadMessages = async (
chatId: string,
setMessages: (messages: Message[]) => void,
setIsMessagesLoaded: (loaded: boolean) => void,
setChatHistory: (history: [string, string][]) => void,
setFocusMode: (mode: string) => void,
setNotFound: (notFound: boolean) => void,
setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void,
) => {
const res = await fetch(`/api/chats/${chatId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
if (res.status === 404) {
setNotFound(true);
setIsMessagesLoaded(true);
return;
}
const data = await res.json();
const messages = data.messages.map((msg: any) => {
return {
...msg,
...JSON.parse(msg.metadata),
};
}) as Message[];
setMessages(messages);
const history = messages.map((msg) => {
return [msg.role, msg.content];
}) as [string, string][];
console.debug(new Date(), 'app:messages_loaded');
document.title = messages[0].content;
const files = data.chat.files.map((file: any) => {
return {
fileName: file.name,
fileExtension: file.name.split('.').pop(),
fileId: file.fileId,
};
});
setFiles(files);
setFileIds(files.map((file: File) => file.fileId));
setChatHistory(history);
setFocusMode(data.chat.focusMode);
setIsMessagesLoaded(true);
};
const ChatWindow = ({ id }: { id?: string }) => {
const searchParams = useSearchParams();
const initialMessage = searchParams.get('q');
const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false);
useEffect(() => {
checkConfig(
setChatModelProvider,
setEmbeddingModelProvider,
setIsConfigReady,
setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false);
const [chatHistory, setChatHistory] = useState<[string, string][]>([]);
const [messages, setMessages] = useState<Message[]>([]);
const [files, setFiles] = useState<File[]>([]);
const [fileIds, setFileIds] = useState<string[]>([]);
const [focusMode, setFocusMode] = useState('webSearch');
const [optimizationMode, setOptimizationMode] = useState('speed');
const [isMessagesLoaded, setIsMessagesLoaded] = useState(false);
const [notFound, setNotFound] = useState(false);
useEffect(() => {
if (
chatId &&
!newChatCreated &&
!isMessagesLoaded &&
messages.length === 0
) {
loadMessages(
chatId,
setMessages,
setIsMessagesLoaded,
setChatHistory,
setFocusMode,
setNotFound,
setFiles,
setFileIds,
);
} else if (!chatId) {
setNewChatCreated(true);
setIsMessagesLoaded(true);
setChatId(crypto.randomBytes(20).toString('hex'));
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const messagesRef = useRef<Message[]>([]);
useEffect(() => {
messagesRef.current = messages;
}, [messages]);
useEffect(() => {
if (isMessagesLoaded && isConfigReady) {
setIsReady(true);
console.debug(new Date(), 'app:ready');
} else {
setIsReady(false);
}
}, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => {
if (loading) return;
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
return;
}
setLoading(true);
setMessageAppeared(false);
let sources: Document[] | undefined = undefined;
let recievedMessage = '';
let added = false;
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
setMessages((prevMessages) => [
...prevMessages,
{
content: message,
messageId: messageId,
chatId: chatId!,
role: 'user',
createdAt: new Date(),
},
]);
const messageHandler = async (data: any) => {
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
return;
}
if (data.type === 'sources') {
sources = data.data;
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: '',
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
sources: sources,
createdAt: new Date(),
},
]);
added = true;
}
setMessageAppeared(true);
}
if (data.type === 'message') {
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: data.data,
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
sources: sources,
createdAt: new Date(),
},
]);
added = true;
}
setMessages((prev) =>
prev.map((message) => {
if (message.messageId === data.messageId) {
return { ...message, content: message.content + data.data };
}
return message;
}),
);
recievedMessage += data.data;
setMessageAppeared(true);
}
if (data.type === 'messageEnd') {
setChatHistory((prevHistory) => [
...prevHistory,
['human', message],
['assistant', recievedMessage],
]);
setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
}
if (
lastMsg.role === 'assistant' &&
lastMsg.sources &&
lastMsg.sources.length > 0 &&
!lastMsg.suggestions
) {
const suggestions = await getSuggestions(messagesRef.current);
setMessages((prev) =>
prev.map((msg) => {
if (msg.messageId === lastMsg.messageId) {
return { ...msg, suggestions: suggestions };
}
return msg;
}),
);
}
}
};
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
systemInstructions: localStorage.getItem('systemInstructions'),
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
};
const rewrite = (messageId: string) => {
const index = messages.findIndex((msg) => msg.messageId === messageId);
if (index === -1) return;
const message = messages[index - 1];
setMessages((prev) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
});
setChatHistory((prev) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
});
sendMessage(message.content, message.messageId);
};
useEffect(() => {
if (isReady && initialMessage && isConfigReady) {
sendMessage(initialMessage);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isConfigReady, isReady, initialMessage]);
const ChatWindow = () => {
const { hasError, isReady, notFound, messages } = useChat();
if (hasError) {
return (
<div className="relative">
@@ -586,31 +51,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
<div>
{messages.length > 0 ? (
<>
<Navbar chatId={chatId!} messages={messages} />
<Chat
loading={loading}
messages={messages}
sendMessage={sendMessage}
messageAppeared={messageAppeared}
rewrite={rewrite}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<Navbar />
<Chat />
</>
) : (
<EmptyChat
sendMessage={sendMessage}
focusMode={focusMode}
setFocusMode={setFocusMode}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<EmptyChat />
)}
</div>
)

View File

@@ -5,27 +5,7 @@ import Link from 'next/link';
import WeatherWidget from './WeatherWidget';
import NewsArticleWidget from './NewsArticleWidget';
const EmptyChat = ({
sendMessage,
focusMode,
setFocusMode,
optimizationMode,
setOptimizationMode,
fileIds,
setFileIds,
files,
setFiles,
}: {
sendMessage: (message: string) => void;
focusMode: string;
setFocusMode: (mode: string) => void;
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
}) => {
const EmptyChat = () => {
return (
<div className="relative">
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
@@ -38,17 +18,7 @@ const EmptyChat = ({
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
Research begins here.
</h2>
<EmptyChatMessageInput
sendMessage={sendMessage}
focusMode={focusMode}
setFocusMode={setFocusMode}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<EmptyChatMessageInput />
</div>
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
<div className="flex-1 w-full">

View File

@@ -1,34 +1,15 @@
import { ArrowRight } from 'lucide-react';
import { useEffect, useRef, useState } from 'react';
import TextareaAutosize from 'react-textarea-autosize';
import CopilotToggle from './MessageInputActions/Copilot';
import Focus from './MessageInputActions/Focus';
import Optimization from './MessageInputActions/Optimization';
import Attach from './MessageInputActions/Attach';
import { File } from './ChatWindow';
import { useChat } from '@/lib/hooks/useChat';
const EmptyChatMessageInput = ({
sendMessage,
focusMode,
setFocusMode,
optimizationMode,
setOptimizationMode,
fileIds,
setFileIds,
files,
setFiles,
}: {
sendMessage: (message: string) => void;
focusMode: string;
setFocusMode: (mode: string) => void;
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
}) => {
const [copilotEnabled, setCopilotEnabled] = useState(false);
const EmptyChatMessageInput = () => {
const { sendMessage } = useChat();
/* const [copilotEnabled, setCopilotEnabled] = useState(false); */
const [message, setMessage] = useState('');
const inputRef = useRef<HTMLTextAreaElement | null>(null);
@@ -84,20 +65,11 @@ const EmptyChatMessageInput = ({
/>
<div className="flex flex-row items-center justify-between mt-4">
<div className="flex flex-row items-center space-x-2 lg:space-x-4">
<Focus focusMode={focusMode} setFocusMode={setFocusMode} />
<Attach
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
showText
/>
<Focus />
<Attach showText />
</div>
<div className="flex flex-row items-center space-x-1 sm:space-x-4">
<Optimization
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
/>
<Optimization />
<button
disabled={message.trim().length === 0}
className="bg-[#24A0ED] text-white disabled:text-black/50 dark:disabled:text-white/50 disabled:bg-[#e0e0dc] dark:disabled:bg-[#ececec21] hover:bg-opacity-85 transition duration-100 rounded-full p-2"

View File

@@ -20,32 +20,36 @@ import SearchImages from './SearchImages';
import SearchVideos from './SearchVideos';
import { useSpeech } from 'react-text-to-speech';
import ThinkBox from './ThinkBox';
import { useChat } from '@/lib/hooks/useChat';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
return <ThinkBox content={children as string} />;
const ThinkTagProcessor = ({
children,
thinkingEnded,
}: {
children: React.ReactNode;
thinkingEnded: boolean;
}) => {
return (
<ThinkBox content={children as string} thinkingEnded={thinkingEnded} />
);
};
const MessageBox = ({
message,
messageIndex,
history,
loading,
dividerRef,
isLast,
rewrite,
sendMessage,
}: {
message: Message;
messageIndex: number;
history: Message[];
loading: boolean;
dividerRef?: MutableRefObject<HTMLDivElement | null>;
isLast: boolean;
rewrite: (messageId: string) => void;
sendMessage: (message: string) => void;
}) => {
const { loading, messages: history, sendMessage, rewrite } = useChat();
const [parsedMessage, setParsedMessage] = useState(message.content);
const [speechMessage, setSpeechMessage] = useState(message.content);
const [thinkingEnded, setThinkingEnded] = useState(false);
useEffect(() => {
const citationRegex = /\[([^\]]+)\]/g;
@@ -61,6 +65,10 @@ const MessageBox = ({
}
}
if (message.role === 'assistant' && message.content.includes('</think>')) {
setThinkingEnded(true);
}
if (
message.role === 'assistant' &&
message?.sources &&
@@ -88,7 +96,7 @@ const MessageBox = ({
if (url) {
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
} else {
return `[${numStr}]`;
return ``;
}
})
.join('');
@@ -99,6 +107,14 @@ const MessageBox = ({
);
setSpeechMessage(message.content.replace(regex, ''));
return;
} else if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length === 0
) {
setParsedMessage(processedMessage.replace(regex, ''));
setSpeechMessage(message.content.replace(regex, ''));
return;
}
setSpeechMessage(message.content.replace(regex, ''));
@@ -111,6 +127,9 @@ const MessageBox = ({
overrides: {
think: {
component: ThinkTagProcessor,
props: {
thinkingEnded: thinkingEnded,
},
},
},
};

View File

@@ -6,22 +6,11 @@ import Attach from './MessageInputActions/Attach';
import CopilotToggle from './MessageInputActions/Copilot';
import { File } from './ChatWindow';
import AttachSmall from './MessageInputActions/AttachSmall';
import { useChat } from '@/lib/hooks/useChat';
const MessageInput = () => {
const { loading, sendMessage } = useChat();
const MessageInput = ({
sendMessage,
loading,
fileIds,
setFileIds,
files,
setFiles,
}: {
sendMessage: (message: string) => void;
loading: boolean;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
}) => {
const [copilotEnabled, setCopilotEnabled] = useState(false);
const [message, setMessage] = useState('');
const [textareaRows, setTextareaRows] = useState(1);
@@ -79,14 +68,7 @@ const MessageInput = ({
mode === 'multi' ? 'flex-col rounded-lg' : 'flex-row rounded-full',
)}
>
{mode === 'single' && (
<AttachSmall
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
)}
{mode === 'single' && <AttachSmall />}
<TextareaAutosize
ref={inputRef}
value={message}
@@ -113,12 +95,7 @@ const MessageInput = ({
)}
{mode === 'multi' && (
<div className="flex flex-row items-center justify-between w-full pt-2">
<AttachSmall
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<AttachSmall />
<div className="flex flex-row items-center space-x-4">
<CopilotToggle
copilotEnabled={copilotEnabled}

View File

@@ -7,21 +7,11 @@ import {
} from '@headlessui/react';
import { CopyPlus, File, LoaderCircle, Plus, Trash } from 'lucide-react';
import { Fragment, useRef, useState } from 'react';
import { File as FileType } from '../ChatWindow';
import { useChat } from '@/lib/hooks/useChat';
const Attach = ({ showText }: { showText?: boolean }) => {
const { files, setFiles, setFileIds, fileIds } = useChat();
const Attach = ({
fileIds,
setFileIds,
showText,
files,
setFiles,
}: {
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
showText?: boolean;
files: FileType[];
setFiles: (files: FileType[]) => void;
}) => {
const [loading, setLoading] = useState(false);
const fileInputRef = useRef<any>();
@@ -142,8 +132,8 @@ const Attach = ({
key={i}
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
>
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
<div className="bg-light-100 dark:bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-black/70 dark:text-white/70" />
</div>
<p className="text-black/70 dark:text-white/70 text-sm">
{file.fileName.length > 25

View File

@@ -8,18 +8,11 @@ import {
import { CopyPlus, File, LoaderCircle, Plus, Trash } from 'lucide-react';
import { Fragment, useRef, useState } from 'react';
import { File as FileType } from '../ChatWindow';
import { useChat } from '@/lib/hooks/useChat';
const AttachSmall = () => {
const { files, setFiles, setFileIds, fileIds } = useChat();
const AttachSmall = ({
fileIds,
setFileIds,
files,
setFiles,
}: {
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: FileType[];
setFiles: (files: FileType[]) => void;
}) => {
const [loading, setLoading] = useState(false);
const fileInputRef = useRef<any>();
@@ -114,8 +107,8 @@ const AttachSmall = ({
key={i}
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
>
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
<div className="bg-light-100 dark:bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-black/70 dark:text-white/70" />
</div>
<p className="text-black/70 dark:text-white/70 text-sm">
{file.fileName.length > 25

View File

@@ -15,6 +15,7 @@ import {
} from '@headlessui/react';
import { SiReddit, SiYoutube } from '@icons-pack/react-simple-icons';
import { Fragment } from 'react';
import { useChat } from '@/lib/hooks/useChat';
const focusModes = [
{
@@ -55,13 +56,9 @@ const focusModes = [
},
];
const Focus = ({
focusMode,
setFocusMode,
}: {
focusMode: string;
setFocusMode: (mode: string) => void;
}) => {
const Focus = () => {
const { focusMode, setFocusMode } = useChat();
return (
<Popover className="relative w-full max-w-[15rem] md:max-w-md lg:max-w-lg mt-[6.5px]">
<PopoverButton

View File

@@ -7,6 +7,7 @@ import {
Transition,
} from '@headlessui/react';
import { Fragment } from 'react';
import { useChat } from '@/lib/hooks/useChat';
const OptimizationModes = [
{
@@ -34,13 +35,9 @@ const OptimizationModes = [
},
];
const Optimization = ({
optimizationMode,
setOptimizationMode,
}: {
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
}) => {
const Optimization = () => {
const { optimizationMode, setOptimizationMode } = useChat();
return (
<Popover className="relative w-full max-w-[15rem] md:max-w-md lg:max-w-lg">
<PopoverButton

View File

@@ -10,6 +10,7 @@ import {
Transition,
} from '@headlessui/react';
import jsPDF from 'jspdf';
import { useChat } from '@/lib/hooks/useChat';
const downloadFile = (filename: string, content: string, type: string) => {
const blob = new Blob([content], { type });
@@ -118,16 +119,12 @@ const exportAsPDF = (messages: Message[], title: string) => {
doc.save(`${title || 'chat'}.pdf`);
};
const Navbar = ({
chatId,
messages,
}: {
messages: Message[];
chatId: string;
}) => {
const Navbar = () => {
const [title, setTitle] = useState<string>('');
const [timeAgo, setTimeAgo] = useState<string>('');
const { messages, chatId } = useChat();
useEffect(() => {
if (messages.length > 0) {
const newTitle =
@@ -206,7 +203,7 @@ const Navbar = ({
</PopoverPanel>
</Transition>
</Popover>
<DeleteChat redirect chatId={chatId} chats={[]} setChats={() => {}} />
<DeleteChat redirect chatId={chatId!} chats={[]} setChats={() => {}} />
</div>
</div>
);

View File

@@ -1,15 +1,23 @@
'use client';
import { useState } from 'react';
import { cn } from '@/lib/utils';
import { useEffect, useState } from 'react';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps {
content: string;
thinkingEnded: boolean;
}
const ThinkBox = ({ content }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false);
const ThinkBox = ({ content, thinkingEnded }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(true);
useEffect(() => {
if (thinkingEnded) {
setIsExpanded(false);
} else {
setIsExpanded(true);
}
}, [thinkingEnded]);
return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">

View File

@@ -9,7 +9,10 @@ const WeatherWidget = () => {
humidity: 0,
windSpeed: 0,
icon: '',
temperatureUnit: 'C',
windSpeedUnit: 'm/s',
});
const [loading, setLoading] = useState(true);
useEffect(() => {
@@ -73,6 +76,7 @@ const WeatherWidget = () => {
body: JSON.stringify({
lat: location.latitude,
lng: location.longitude,
measureUnit: localStorage.getItem('measureUnit') ?? 'Metric',
}),
});
@@ -91,6 +95,8 @@ const WeatherWidget = () => {
humidity: data.humidity,
windSpeed: data.windSpeed,
icon: data.icon,
temperatureUnit: data.temperatureUnit,
windSpeedUnit: data.windSpeedUnit,
});
setLoading(false);
});
@@ -125,7 +131,7 @@ const WeatherWidget = () => {
className="h-10 w-auto"
/>
<span className="text-base font-semibold text-black dark:text-white">
{data.temperature}°C
{data.temperature}°{data.temperatureUnit}
</span>
</div>
<div className="flex flex-col justify-between flex-1 h-full py-1">
@@ -135,7 +141,7 @@ const WeatherWidget = () => {
</span>
<span className="flex items-center text-xs text-black/60 dark:text-white/60">
<Wind className="w-3 h-3 mr-1" />
{data.windSpeed} km/h
{data.windSpeed} {data.windSpeedUnit}
</span>
</div>
<span className="text-xs text-black/60 dark:text-white/60 mt-1">

View File

@@ -3,32 +3,18 @@ import {
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type ImageSearchChainInput = {
@@ -54,12 +40,39 @@ const createImageSearchChain = (llm: BaseChatModel) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
ChatPromptTemplate.fromMessages([
['system', imageSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
],
['assistant', '<query>A cat</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
],
['assistant', '<query>Car working</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});

View File

@@ -3,33 +3,19 @@ import {
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
const videoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
@@ -55,12 +41,37 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
ChatPromptTemplate.fromMessages([
['system', videoSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
],
['assistant', '<query>How does a car work?</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
],
['assistant', '<query>Theory of relativity</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['youtube'],
});
@@ -92,8 +103,8 @@ const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
const videoSearchChain = createVideoSearchChain(llm);
return videoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@@ -31,6 +31,7 @@ interface Config {
};
OLLAMA: {
API_URL: string;
API_KEY: string;
};
DEEPSEEK: {
API_KEY: string;
@@ -86,6 +87,8 @@ export const getSearxngApiEndpoint = () =>
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getOllamaApiKey = () => loadConfig().MODELS.OLLAMA.API_KEY;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;

643
src/lib/hooks/useChat.tsx Normal file
View File

@@ -0,0 +1,643 @@
'use client';
import { Message } from '@/components/ChatWindow';
import { createContext, useContext, useEffect, useRef, useState } from 'react';
import crypto from 'crypto';
import { useSearchParams } from 'next/navigation';
import { toast } from 'sonner';
import { Document } from '@langchain/core/documents';
import { getSuggestions } from '../actions';
type ChatContext = {
messages: Message[];
chatHistory: [string, string][];
files: File[];
fileIds: string[];
focusMode: string;
chatId: string | undefined;
optimizationMode: string;
isMessagesLoaded: boolean;
loading: boolean;
notFound: boolean;
messageAppeared: boolean;
isReady: boolean;
hasError: boolean;
setOptimizationMode: (mode: string) => void;
setFocusMode: (mode: string) => void;
setFiles: (files: File[]) => void;
setFileIds: (fileIds: string[]) => void;
sendMessage: (
message: string,
messageId?: string,
rewrite?: boolean,
) => Promise<void>;
rewrite: (messageId: string) => void;
};
export interface File {
fileName: string;
fileExtension: string;
fileId: string;
}
interface ChatModelProvider {
name: string;
provider: string;
}
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => {
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
const chatModelProvidersKeys = Object.keys(chatModelProviders);
if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
return toast.error('No chat models available');
} else {
chatModelProvider =
chatModelProvidersKeys.find(
(provider) =>
Object.keys(chatModelProviders[provider]).length > 0,
) || chatModelProvidersKeys[0];
}
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
(!chatModelProviders[chatModelProvider] ||
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
};
const loadMessages = async (
chatId: string,
setMessages: (messages: Message[]) => void,
setIsMessagesLoaded: (loaded: boolean) => void,
setChatHistory: (history: [string, string][]) => void,
setFocusMode: (mode: string) => void,
setNotFound: (notFound: boolean) => void,
setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void,
) => {
const res = await fetch(`/api/chats/${chatId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
if (res.status === 404) {
setNotFound(true);
setIsMessagesLoaded(true);
return;
}
const data = await res.json();
const messages = data.messages.map((msg: any) => {
return {
...msg,
...JSON.parse(msg.metadata),
};
}) as Message[];
setMessages(messages);
const history = messages.map((msg) => {
return [msg.role, msg.content];
}) as [string, string][];
console.debug(new Date(), 'app:messages_loaded');
document.title = messages[0].content;
const files = data.chat.files.map((file: any) => {
return {
fileName: file.name,
fileExtension: file.name.split('.').pop(),
fileId: file.fileId,
};
});
setFiles(files);
setFileIds(files.map((file: File) => file.fileId));
setChatHistory(history);
setFocusMode(data.chat.focusMode);
setIsMessagesLoaded(true);
};
export const chatContext = createContext<ChatContext>({
chatHistory: [],
chatId: '',
fileIds: [],
files: [],
focusMode: '',
hasError: false,
isMessagesLoaded: false,
isReady: false,
loading: false,
messageAppeared: false,
messages: [],
notFound: false,
optimizationMode: '',
rewrite: () => {},
sendMessage: async () => {},
setFileIds: () => {},
setFiles: () => {},
setFocusMode: () => {},
setOptimizationMode: () => {},
});
export const ChatProvider = ({
children,
id,
}: {
children: React.ReactNode;
id?: string;
}) => {
const searchParams = useSearchParams();
const initialMessage = searchParams.get('q');
const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false);
const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false);
const [chatHistory, setChatHistory] = useState<[string, string][]>([]);
const [messages, setMessages] = useState<Message[]>([]);
const [files, setFiles] = useState<File[]>([]);
const [fileIds, setFileIds] = useState<string[]>([]);
const [focusMode, setFocusMode] = useState('webSearch');
const [optimizationMode, setOptimizationMode] = useState('speed');
const [isMessagesLoaded, setIsMessagesLoaded] = useState(false);
const [notFound, setNotFound] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false);
const messagesRef = useRef<Message[]>([]);
useEffect(() => {
checkConfig(
setChatModelProvider,
setEmbeddingModelProvider,
setIsConfigReady,
setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
if (
chatId &&
!newChatCreated &&
!isMessagesLoaded &&
messages.length === 0
) {
loadMessages(
chatId,
setMessages,
setIsMessagesLoaded,
setChatHistory,
setFocusMode,
setNotFound,
setFiles,
setFileIds,
);
} else if (!chatId) {
setNewChatCreated(true);
setIsMessagesLoaded(true);
setChatId(crypto.randomBytes(20).toString('hex'));
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
messagesRef.current = messages;
}, [messages]);
useEffect(() => {
if (isMessagesLoaded && isConfigReady) {
setIsReady(true);
console.debug(new Date(), 'app:ready');
} else {
setIsReady(false);
}
}, [isMessagesLoaded, isConfigReady]);
const rewrite = (messageId: string) => {
const index = messages.findIndex((msg) => msg.messageId === messageId);
if (index === -1) return;
const message = messages[index - 1];
setMessages((prev) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
});
setChatHistory((prev) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
});
sendMessage(message.content, message.messageId, true);
};
useEffect(() => {
if (isReady && initialMessage && isConfigReady) {
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
return;
}
sendMessage(initialMessage);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isConfigReady, isReady, initialMessage]);
const sendMessage: ChatContext['sendMessage'] = async (
message,
messageId,
rewrite = false,
) => {
if (loading) return;
setLoading(true);
setMessageAppeared(false);
let sources: Document[] | undefined = undefined;
let recievedMessage = '';
let added = false;
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
setMessages((prevMessages) => [
...prevMessages,
{
content: message,
messageId: messageId,
chatId: chatId!,
role: 'user',
createdAt: new Date(),
},
]);
const messageHandler = async (data: any) => {
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
return;
}
if (data.type === 'sources') {
sources = data.data;
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: '',
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
sources: sources,
createdAt: new Date(),
},
]);
added = true;
}
setMessageAppeared(true);
}
if (data.type === 'message') {
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: data.data,
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
sources: sources,
createdAt: new Date(),
},
]);
added = true;
}
setMessages((prev) =>
prev.map((message) => {
if (message.messageId === data.messageId) {
return { ...message, content: message.content + data.data };
}
return message;
}),
);
recievedMessage += data.data;
setMessageAppeared(true);
}
if (data.type === 'messageEnd') {
setChatHistory((prevHistory) => [
...prevHistory,
['human', message],
['assistant', recievedMessage],
]);
setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
}
if (
lastMsg.role === 'assistant' &&
lastMsg.sources &&
lastMsg.sources.length > 0 &&
!lastMsg.suggestions
) {
const suggestions = await getSuggestions(messagesRef.current);
setMessages((prev) =>
prev.map((msg) => {
if (msg.messageId === lastMsg.messageId) {
return { ...msg, suggestions: suggestions };
}
return msg;
}),
);
}
}
};
const messageIndex = messages.findIndex((m) => m.messageId === messageId);
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: rewrite
? chatHistory.slice(0, messageIndex === -1 ? undefined : messageIndex)
: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
systemInstructions: localStorage.getItem('systemInstructions'),
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
};
return (
<chatContext.Provider
value={{
messages,
chatHistory,
files,
fileIds,
focusMode,
chatId,
hasError,
isMessagesLoaded,
isReady,
loading,
messageAppeared,
notFound,
optimizationMode,
setFileIds,
setFiles,
setFocusMode,
setOptimizationMode,
rewrite,
sendMessage,
}}
>
{children}
</chatContext.Provider>
);
};
export const useChat = () => {
const ctx = useContext(chatContext);
return ctx;
};

View File

@@ -38,7 +38,7 @@ export const loadAimlApiChatModels = async () => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: apiKey,
apiKey: apiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
@@ -76,7 +76,7 @@ export const loadAimlApiEmbeddingModels = async () => {
embeddingModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: apiKey,
apiKey: apiKey,
modelName: model.id,
configuration: {
baseURL: API_URL,

View File

@@ -9,6 +9,18 @@ export const PROVIDER_INFO = {
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 4.1 Opus',
key: 'claude-opus-4-1-20250805',
},
{
displayName: 'Claude 4 Opus',
key: 'claude-opus-4-20250514',
},
{
displayName: 'Claude 4 Sonnet',
key: 'claude-sonnet-4-20250514',
},
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',

View File

@@ -31,7 +31,7 @@ export const loadDeepseekChatModels = async () => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
apiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {

View File

@@ -14,16 +14,16 @@ import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Flash Preview 05-20',
key: 'gemini-2.5-flash-preview-05-20',
displayName: 'Gemini 2.5 Flash',
key: 'gemini-2.5-flash',
},
{
displayName: 'Gemini 2.5 Pro Preview',
key: 'gemini-2.5-pro-preview-05-06',
displayName: 'Gemini 2.5 Flash-Lite',
key: 'gemini-2.5-flash-lite',
},
{
displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-preview-05-06',
displayName: 'Gemini 2.5 Pro',
key: 'gemini-2.5-pro',
},
{
displayName: 'Gemini 2.0 Flash',
@@ -75,7 +75,7 @@ export const loadGeminiChatModels = async () => {
displayName: model.displayName,
model: new ChatGoogleGenerativeAI({
apiKey: geminiApiKey,
modelName: model.key,
model: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
};
@@ -108,7 +108,7 @@ export const loadGeminiEmbeddingModels = async () => {
return embeddingModels;
} catch (err) {
console.error(`Error loading OpenAI embeddings models: ${err}`);
console.error(`Error loading Gemini embeddings models: ${err}`);
return {};
}
};

View File

@@ -1,4 +1,4 @@
import { ChatOpenAI } from '@langchain/openai';
import { ChatGroq } from '@langchain/groq';
import { getGroqApiKey } from '../config';
import { ChatModel } from '.';
@@ -28,13 +28,10 @@ export const loadGroqChatModels = async () => {
groqChatModels.forEach((model: any) => {
chatModels[model.id] = {
displayName: model.id,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
modelName: model.id,
model: new ChatGroq({
apiKey: groqApiKey,
model: model.id,
temperature: 0.7,
configuration: {
baseURL: 'https://api.groq.com/openai/v1',
},
}) as unknown as BaseChatModel,
};
});

View File

@@ -118,9 +118,13 @@ export const getAvailableChatModelProviders = async () => {
[customOpenAiModelName]: {
displayName: customOpenAiModelName,
model: new ChatOpenAI({
openAIApiKey: customOpenAiApiKey,
apiKey: customOpenAiApiKey,
modelName: customOpenAiModelName,
temperature: 0.7,
...((() => {
const temperatureRestrictedModels = ['gpt-5-nano','gpt-5','gpt-5-mini','o1', 'o3', 'o3-mini', 'o4-mini'];
const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => customOpenAiModelName.includes(restrictedModel));
return isTemperatureRestricted ? {} : { temperature: 0.7 };
})()),
configuration: {
baseURL: customOpenAiApiUrl,
},

View File

@@ -47,7 +47,7 @@ export const loadLMStudioChatModels = async () => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
apiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
@@ -83,7 +83,7 @@ export const loadLMStudioEmbeddingsModels = async () => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
apiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},

View File

@@ -1,16 +1,17 @@
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { getKeepAlive, getOllamaApiEndpoint, getOllamaApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { ChatOllama } from '@langchain/ollama';
import { OllamaEmbeddings } from '@langchain/ollama';
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
const ollamaApiKey = getOllamaApiKey();
if (!ollamaApiEndpoint) return {};
@@ -33,6 +34,9 @@ export const loadOllamaChatModels = async () => {
model: model.model,
temperature: 0.7,
keepAlive: getKeepAlive(),
...(ollamaApiKey
? { headers: { Authorization: `Bearer ${ollamaApiKey}` } }
: {}),
}),
};
});
@@ -46,6 +50,7 @@ export const loadOllamaChatModels = async () => {
export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
const ollamaApiKey = getOllamaApiKey();
if (!ollamaApiEndpoint) return {};
@@ -66,6 +71,9 @@ export const loadOllamaEmbeddingModels = async () => {
model: new OllamaEmbeddings({
baseUrl: ollamaApiEndpoint,
model: model.model,
...(ollamaApiKey
? { headers: { Authorization: `Bearer ${ollamaApiKey}` } }
: {}),
}),
};
});

View File

@@ -26,6 +26,10 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4o (2024-05-13)',
key: 'gpt-4o-2024-05-13',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
@@ -42,6 +46,34 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT 4.1',
key: 'gpt-4.1',
},
{
displayName: 'GPT 5 nano',
key: 'gpt-5-nano',
},
{
displayName: 'GPT 5',
key: 'gpt-5',
},
{
displayName: 'GPT 5 Mini',
key: 'gpt-5-mini',
},
{
displayName: 'o1',
key: 'o1',
},
{
displayName: 'o3',
key: 'o3',
},
{
displayName: 'o3 Mini',
key: 'o3-mini',
},
{
displayName: 'o4 Mini',
key: 'o4-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
@@ -64,13 +96,23 @@ export const loadOpenAIChatModels = async () => {
const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => {
// Models that only support temperature = 1
const temperatureRestrictedModels = ['gpt-5-nano','gpt-5','gpt-5-mini','o1', 'o3', 'o3-mini', 'o4-mini'];
const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => model.key.includes(restrictedModel));
const modelConfig: any = {
apiKey: openaiApiKey,
modelName: model.key,
};
// Only add temperature if the model supports it
if (!isTemperatureRestricted) {
modelConfig.temperature = 0.7;
}
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: openaiApiKey,
modelName: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
model: new ChatOpenAI(modelConfig) as unknown as BaseChatModel,
};
});
@@ -93,7 +135,7 @@ export const loadOpenAIEmbeddingModels = async () => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey: openaiApiKey,
apiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};

View File

@@ -1,8 +1,11 @@
import { BaseMessage } from '@langchain/core/messages';
import { BaseMessage, isAIMessage } from '@langchain/core/messages';
const formatChatHistoryAsString = (history: BaseMessage[]) => {
return history
.map((message) => `${message._getType()}: ${message.content}`)
.map(
(message) =>
`${isAIMessage(message) ? 'AI' : 'User'}: ${message.content}`,
)
.join('\n');
};

701
yarn.lock

File diff suppressed because it is too large Load Diff