Compare commits

...

121 Commits

Author SHA1 Message Date
ItzCrazyKns
cf3cc4e638 feat(migrator): use DATA_DIR env var 2025-10-06 10:13:34 +05:30
ItzCrazyKns
46b9e41100 feat(db-table): add default values for createdAt 2025-10-06 08:59:01 +05:30
sjiampojamarn
02adafbd4b Handling double stringify JSON parsing 2025-10-05 10:46:01 -07:00
ItzCrazyKns
f141d4719c feat(assets): update preview image 2025-10-05 22:32:17 +05:30
ItzCrazyKns
f18e132d1b feat(news-widget): fix loading animation 2025-10-02 17:55:56 +05:30
ItzCrazyKns
37317992b4 feat(app): lint & beautify 2025-10-02 17:17:52 +05:30
ItzCrazyKns
8b588824f2 feat(css): add line-clamp-2 class 2025-10-02 17:17:44 +05:30
ItzCrazyKns
a19cf00873 feat(lineOutputParser): return undefined on invalid tags 2025-10-02 17:17:31 +05:30
ItzCrazyKns
5cc11ac0bf feat(message-handling): fix repeated first token, think tag handling
closes #889
2025-10-02 17:15:54 +05:30
Kushagra Srivastava
d611ddaab9 Merge pull request #880 from ruturaj-rathod/fix/body-validation-579
validate the request body of api/chat to prevent malformed request body
2025-09-27 19:52:14 +05:30
ItzCrazyKns
3b41905abb feat(app): lint & beautify 2025-09-27 19:51:36 +05:30
ItzCrazyKns
b9071ceed7 feat(navbar): fix margins on large screens 2025-09-26 14:39:00 +05:30
ItzCrazyKns
bb5002de85 feat(navbar): fix margins on mobiles 2025-09-26 14:37:59 +05:30
ItzCrazyKns
fc99653441 feat(contributing): update steps 2025-09-26 13:56:23 +05:30
ItzCrazyKns
23dde9fa59 feat(app): lint & beautify 2025-09-26 13:54:08 +05:30
ItzCrazyKns
dde6c8d719 feat(app): enhance ui/ux 2025-09-26 13:53:48 +05:30
ItzCrazyKns
650c69e04f feat(db): fix migration errors, explicitly migrate based on index 2025-09-26 13:44:39 +05:30
ItzCrazyKns
984163bbbc feat(app): lint & beautify 2025-09-25 18:57:21 +05:30
ItzCrazyKns
5f18fc1d22 feat(inputs): update theme 2025-09-25 18:57:06 +05:30
ItzCrazyKns
b1426e8638 feat(widgets): update theme 2025-09-25 18:56:51 +05:30
ItzCrazyKns
7337f3423d feat(sidebar): make sidebar floating 2025-09-25 18:56:35 +05:30
ItzCrazyKns
8f728a2518 feat(discover): update discover UI, add new font 2025-09-25 18:56:04 +05:30
ItzCrazyKns
bd8e3dfa2e feat(tailwind-config): update theme 2025-09-25 18:53:34 +05:30
ItzCrazyKns
9c6e42f7d8 Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-09-25 18:48:27 +05:30
ruturaj
fabb48cc2f move schema validation into route file and remove validation file 2025-09-23 14:57:55 +05:30
ruturaj
c46b421219 validate the request body of api/chat to prevent malformed request body 2025-09-19 16:06:46 +05:30
ItzCrazyKns
8dc24c2d1a Merge pull request #877 from ramkrishna2910/lemonade
Lemonade integration to run local LLMs with NPU and GPU acceleration
2025-09-18 21:15:20 +05:30
Ramakrishnan Sivakumar
8afcdd044c Update readme 2025-09-17 12:54:59 -07:00
Ramakrishnan Sivakumar
5b12e99335 update docs 2025-09-17 12:53:11 -07:00
Ramakrishnan Sivakumar
5b5e83a3a0 Add lemonade integration 2025-09-17 12:28:02 -07:00
ItzCrazyKns
6dd33aa33c Merge pull request #876 from fizikiukas/patch-1
Add deployment badge for Hostinger
2025-09-17 23:44:00 +05:30
Valentinas Čirba
e705952503 Add deployment badge for Hostinger 2025-09-17 18:16:35 +03:00
ItzCrazyKns
b8e4152e77 Merge pull request #857 from skoved/fix-light-mode
make file icon in attachment modal in chat page fit light theme better
2025-09-12 21:16:46 +05:30
ItzCrazyKns
c8ac9279bd Merge pull request #866 from agungbesti/feat/add-openai-models
feat: add new OpenAI models with proper temperature parameter handling
2025-09-12 21:15:09 +05:30
akubesti
6f367c34a8 feat: add gpt-5, gpt-5-mini, o3 models and remove gpt-5-chat-latest
- Add new OpenAI models: gpt-5, gpt-5-mini, and o3 series

- Fix temperature parameter handling for o3 models

- Update models list to ensure compatibility
2025-09-12 22:22:16 +07:00
akubesti
328b12ffbe feat: add new OpenAI models with proper temperature parameter handling
- Add GPT 4.1 series and o1/o3/o4 models with temperature compatibility fixes

- Remove gpt-5/gpt-5-mini models due to organization verification restrictions

- Fix 400 errors for models that only support default temperature values
2025-09-11 16:38:01 +07:00
ItzCrazyKns
536ec24093 feat(search): use single prompt for all focus modes 2025-09-03 14:37:11 +05:30
ItzCrazyKns
bb9eab7aa7 feat(db): add migrations 2025-09-03 14:36:47 +05:30
ItzCrazyKns
b0e8a33f1d feat(chat-hook): get suggestions when sources is > 0 2025-09-02 10:42:28 +05:30
ItzCrazyKns
a268ce345c feat(message-rendering): create citation component
Create new citation component to fix errors with tailwind not compiling CSS classes for the anchor tag
2025-08-30 20:48:26 +05:30
ItzCrazyKns
7b46b815c1 feat(components): handle new sections 2025-08-29 12:45:27 +05:30
ItzCrazyKns
d6b02db37a feat(chat-hook): handle messages as separate entities 2025-08-29 12:45:13 +05:30
ItzCrazyKns
34fa52ad12 feat(chat-api): handle messages as separate entities 2025-08-29 12:44:40 +05:30
ItzCrazyKns
266c333b29 feat(chat): only add eventListener on mount 2025-08-29 12:42:38 +05:30
ItzCrazyKns
a6f3d98aef feat(get-suggestion): correct parameter name 2025-08-29 12:42:00 +05:30
ItzCrazyKns
705ae464ad feat(chat-hook): load chat based on id after reload 2025-08-29 10:23:15 +05:30
skoved
d8486e90bb make file icon in attachment modal in chat page fit light theme better
make the file icon in the attachment modal for the chat page an off-white background so that it matches the light theme better and looks the same as the attachment modal on the home page
2025-08-27 09:43:09 -04:00
ItzCrazyKns
238bcaff2b Delete .github/FUNDING.yml 2025-08-27 16:23:39 +05:30
ItzCrazyKns
6f7c55b783 Update FUNDING.yml with Patreon username
Added Patreon username for funding support.
2025-08-27 16:19:53 +05:30
ItzCrazyKns
83a0cffe1b Merge pull request #828 from justinmayer/patch-1
docs: Add instructions for local OpenAI-API-compatible LLMs
2025-08-27 14:38:55 +05:30
ItzCrazyKns
829ae59944 Merge pull request #855 from skoved/fix-light-mode
fix text color for topic names on discover page in light mode
2025-08-27 14:38:13 +05:30
skoved
a546eb18a1 make file icon in attachment modal fit light theme better
make the file icon in the attachment modal black on an off-white background so that it matches the light theme better and looks stylistically equivalent to its look in dark mode
2025-08-26 21:53:51 -04:00
skoved
ff1ca56157 fix text color for topic names on discover page in light mode
make light mode color for topic text black so it is readable
2025-08-26 21:16:21 -04:00
ItzCrazyKns
30725b5d6d feat(attach): remove unused import 2025-08-25 21:48:08 +05:30
ItzCrazyKns
8dc54efbdd feat(chat-route): lint & beautify 2025-08-21 17:48:55 +05:30
ItzCrazyKns
72f26b4370 feat(upload): save files uploaded after chat created 2025-08-21 17:47:49 +05:30
ItzCrazyKns
f680188905 feat(ollama): add ability to provide api key 2025-08-20 20:32:13 +05:30
ItzCrazyKns
0b15bfbe32 feat(app): switch to useChat hook 2025-08-20 20:21:06 +05:30
ItzCrazyKns
8fc7808654 feat(hooks): implement useChat hook 2025-08-20 20:20:50 +05:30
ItzCrazyKns
0dc17286b9 Merge branch 'pr/843' 2025-08-12 21:39:25 +05:30
ItzCrazyKns
3edd7d44dd feat(openai): conditionally set temperature 2025-08-12 21:39:14 +05:30
Samuel Dockery
1132997108 feat: Add support for latest AI models from Anthropic, Google, & OpenAI 2025-08-10 07:50:31 -07:00
ItzCrazyKns
eadbedb713 feat(groq): switch to @langchain/groq for better handling 2025-08-02 17:14:34 +05:30
ItzCrazyKns
37cd6d3ab5 feat(discover): prevent duplicate articles 2025-08-01 20:41:07 +05:30
ItzCrazyKns
88be3a045b feat(discover): randomly sort results 2025-07-29 13:18:36 +05:30
ItzCrazyKns
45b51ab156 feat(discover-api): handle topics 2025-07-29 13:17:07 +05:30
ItzCrazyKns
3bee01cfa7 feat(discover): add topic selection 2025-07-28 20:39:50 +05:30
ItzCrazyKns
567c6a8758 Merge branch 'pr/831' 2025-07-24 22:36:19 +05:30
ItzCrazyKns
81a91da743 feat(gemini): use model instead of modelName 2025-07-23 12:22:26 +05:30
ItzCrazyKns
70a61ee1eb feat(think-box): handle thinkingEnded 2025-07-23 12:21:07 +05:30
ItzCrazyKns
9d89a4413b feat(format-history): remove extra : 2025-07-23 12:20:49 +05:30
ItzCrazyKns
6ea17d54c6 feat(chat-window): fix wrong history while rewriting 2025-07-22 21:21:49 +05:30
ItzCrazyKns
11a828b073 feat(message-box): close think box after thinking process ends 2025-07-22 21:21:09 +05:30
ItzCrazyKns
37022fb11e feat(format-history): update role determination 2025-07-22 21:20:16 +05:30
ItzCrazyKns
dd50d4927b Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-07-22 12:27:11 +05:30
ItzCrazyKns
fdaf3af3af Merge pull request #832 from tuxthepenguin84/patch-2
Fix name of provider in embeddings models error message
2025-07-21 20:56:24 +05:30
Samuel Dockery
3f2a8f862c Fix name of provider in embeddings models error message 2025-07-20 09:20:39 -07:00
Samuel Dockery
58c7be6e95 Update Gemini 2.5 Models 2025-07-20 09:17:20 -07:00
ItzCrazyKns
829b4e7134 feat(custom-openai): use apiKey instead of openAIApiKey 2025-07-19 21:37:34 +05:30
ItzCrazyKns
77870b39cc feat(ollama): use @langchain/ollama library 2025-07-19 21:37:34 +05:30
ItzCrazyKns
8e0ae9b867 feat(providers): switch to apiKey key 2025-07-19 21:37:34 +05:30
ItzCrazyKns
543f1df5ce feat(modules): update langchain packages 2025-07-19 21:37:34 +05:30
ItzCrazyKns
341aae4587 Merge branch 'pr/830' 2025-07-19 21:36:23 +05:30
Willie Zutz
7f62907385 feat(weather): update measurement units to Imperial/Metric 2025-07-19 08:53:11 -06:00
ItzCrazyKns
7c4aa683a2 feat(chains): remove unused imports 2025-07-19 17:57:32 +05:30
ItzCrazyKns
b48b0eeb0e feat(imageSearch): use XML parsing, implement few shot prompting 2025-07-19 17:52:30 +05:30
ItzCrazyKns
cddc793915 feat(videoSearch): use XML parsing, use few shot prompting 2025-07-19 17:52:14 +05:30
ItzCrazyKns
94e6db10bb feat(weather): add other measurement units, closes #821 #790 2025-07-18 21:09:32 +05:30
Justin Mayer
65fc881356 docs: Add instructions for local OpenAI-API-compatible LLMs
Perplexica supports local OpenAI-API-compatible LLMs via the `[MODELS.CUSTOM_OPENAI]` header in `config.toml` but was missing documentation for configuring the necessary settings. This makes that support more explicit and visible, as well as helping end users set the required configuration values appropriately.
2025-07-17 19:10:12 +02:00
ItzCrazyKns
26e1d5fec3 feat(routes): lint & beautify 2025-07-17 22:23:11 +05:30
ItzCrazyKns
66be87b688 Merge branch 'pr/827' 2025-07-17 22:22:50 +05:30
amoshydra
f7b4e32218 fix(discover): provide language when fetching
some engines provide empty response when no language is provided.

fix #618
2025-07-17 02:14:49 +08:00
ItzCrazyKns
57407112fb feat(package): bump version 2025-07-16 10:39:50 +05:30
ItzCrazyKns
b280cc2e01 Merge pull request #787 from chriswritescode-dev/IOS
Fix: IOS Input Zoom / Support PWA Home Screen App, closes #458
2025-07-15 22:10:01 +05:30
ItzCrazyKns
e6ebf892c5 feat(styles): update globals.css 2025-07-15 21:47:20 +05:30
ItzCrazyKns
b754641058 feat(gitignore): add certificates 2025-07-15 21:45:44 +05:30
ItzCrazyKns
722f4f760e feat(manifest): update icons & screenshots 2025-07-15 21:45:37 +05:30
ItzCrazyKns
01e04a209f feat(public): add screenshots & update icons 2025-07-15 21:45:24 +05:30
ItzCrazyKns
0299fd1ea0 Merge pull request #817 from kittrydge/patch-1
Update Linux ollama instructions in README.md
2025-07-15 20:23:02 +05:30
ItzCrazyKns
cf8dec53ca feat(chat-window): select provider if model's present, closes #803 2025-07-07 16:09:36 +05:30
ItzCrazyKns
d5c012d748 Revert "Update ChatWindow.tsx"
This reverts commit 2ccbd9a44c.
2025-07-07 15:52:39 +05:30
ItzCrazyKns
2ccbd9a44c Update ChatWindow.tsx 2025-07-05 22:00:06 +05:30
kittrydge
ccd89d48d9 Update Linux ollama instructions in README.md
When setting the OLLAMA_HOST environment variable, the port number must be specified ( see https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux )

Also, 'systemctl daemon-reload' needs to be called after changing a systemd unit file, and before the relevant systemd service is reloaded.
2025-07-01 18:00:26 -06:00
ItzCrazyKns
87d788ddef Update README.md 2025-06-30 19:55:23 +05:30
ItzCrazyKns
809b625a34 feat(widgets): fix size on smaller screens, closes #791 2025-06-30 15:42:41 +05:30
ItzCrazyKns
95c753a549 Merge branch 'pr/815' 2025-06-30 15:38:31 +05:30
ItzCrazyKns
0bb8b7ec5c feat(weather-widget): enable geolocation for weather data
Replaces the previous commented-out geolocation logic with an implementation that uses the browser's geolocation API and reverse geocoding to determine the user's city. Falls back to approximate location if permission is denied or unavailable.
2025-06-28 13:49:17 +05:30
D1m7asis
c6d084f5dc feat: add AIML API provider
Introduces support for the AI/ML API provider, including configuration options, chat and embedding model loading, and UI integration. Updates documentation and sample config to reflect the new provider.
2025-06-27 13:43:54 +02:00
ItzCrazyKns
0024ce36c8 Merge pull request #784 from Davixk/fix/docs-typo
docs: correct typo in npm start command
2025-06-21 20:27:34 +05:30
ItzCrazyKns
c44e746807 Merge pull request #785 from koyasi777/patch-1
feat(gemini): add Gemini 2.5 Flash & Pro preview models (May 2025)
2025-06-21 20:26:37 +05:30
ItzCrazyKns
b1826066f4 Merge pull request #801 from glitchySid/patch-1
Update README.md
2025-06-21 20:25:41 +05:30
ItzCrazyKns
b0b8acc45b Merge pull request #781 from alckasoc/master
feat(models): Update Gemini 2.5 pro key
2025-06-21 20:25:06 +05:30
Siddhesh Mhatre
e2b9ffc072 Update README.md
Mentioned that Gemini api key can be used in perplexica.
2025-06-11 22:52:13 +05:30
Chris Scott
68c43ea372 Fix: IOS Input Zoom
config for theme consistency and iOS standalone mode
- Modified manifest.ts to ensure proper metadata

- Added display: standalone for iOS PWA behavior
2025-06-02 21:52:41 -04:00
Dave
3b46baca4f docs(readme): fix typo in npm start command 2025-06-02 05:52:31 +02:00
こやし
772e461c08 feat(gemini): add Gemini 2.5 Flash & Pro preview models (May 2025) 2025-06-02 00:30:18 +09:00
Dave
5c6018a0f9 docs: correct typo in npm start command 2025-06-01 06:35:16 +02:00
ItzCrazyKns
0b7989c3d3 feat(empty-chat): remove unused imports 2025-05-30 09:55:06 +05:30
ItzCrazyKns
8cfcc3e39c feat(chat): update margins and spacing 2025-05-30 09:52:36 +05:30
alckasoc
9eba4b7373 Merge branch 'master' of https://github.com/alckasoc/Perplexica 2025-05-29 18:27:00 -07:00
alckasoc
91306dc0c7 update gemini 2.5 pro key 2025-05-29 18:26:36 -07:00
82 changed files with 3437 additions and 2098 deletions

0
.assets/manifest.json Normal file
View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 641 KiB

After

Width:  |  Height:  |  Size: 633 KiB

2
.gitignore vendored
View File

@@ -37,3 +37,5 @@ Thumbs.db
# Db
db.sqlite
/searxng
certificates

View File

@@ -36,7 +36,7 @@ Before diving into coding, setting up your local environment is key. Here's what
1. In the root directory, locate the `sample.config.toml` file.
2. Rename it to `config.toml` and fill in the necessary configuration fields.
3. Run `npm install` to install all dependencies.
4. Run `npm run db:push` to set up the local sqlite database.
4. Run `npm run db:migrate` to set up the local sqlite database.
5. Use `npm run dev` to start the application in development mode.
**Please note**: Docker configurations are present for setting up production environments, whereas `npm run dev` is used for development purposes.

View File

@@ -16,7 +16,7 @@
<hr/>
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT)
[![Discord](https://dcbadge.limes.pink/api/server/26aArMy8tT?style=flat)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?)
@@ -29,6 +29,7 @@
- [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended)
- [Non-Docker Installation](#non-docker-installation)
- [Ollama Connection Errors](#ollama-connection-errors)
- [Lemonade Connection Errors](#lemonade-connection-errors)
- [Using as a Search Engine](#using-as-a-search-engine)
- [Using Perplexica's API](#using-perplexicas-api)
- [Expose Perplexica to a network](#expose-perplexica-to-network)
@@ -53,7 +54,7 @@ Want to know more about its architecture and how it works? You can read it [here
## Features
- **Local LLMs**: You can make use local LLMs such as Llama3 and Mixtral using Ollama.
- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, and Mistral.
- **Two Main Modes:**
- **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page.
- **Normal Mode:** Processes your query and performs a web search.
@@ -87,9 +88,14 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
- `LEMONADE`: Your Lemonade API URL. Since Lemonade runs directly on your local machine (not in Docker), you should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Lemonade on port 8000, use `http://host.docker.internal:8000`. For other ports, adjust accordingly. **You need to fill this if you wish to use Lemonade's models**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.`
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
**Note**: You can change these after starting Perplexica from the settings dialog.
@@ -111,13 +117,23 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm rum start`
5. Finally, start the app by running `npm run start`
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
### Ollama Connection Errors
### Troubleshooting
#### Local OpenAI-API-Compliant Servers
If Perplexica tells you that you haven't configured any chat model providers, ensure that:
1. Your server is running on `0.0.0.0` (not `127.0.0.1`) and on the same port you put in the API URL.
2. You have specified the correct model name loaded by your local LLM server.
3. You have specified the correct API key, or if one is not defined, you have put _something_ in the API key field and not left it empty.
#### Ollama Connection Errors
If you're encountering an Ollama connection error, it is likely due to the backend being unable to connect to Ollama's API. To fix this issue you can:
@@ -132,10 +148,29 @@ If you're encountering an Ollama connection error, it is likely due to the backe
3. **Linux Users - Expose Ollama to Network:**
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0"`. Then restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0:11434"`. (Change the port number if you are using a different one.) Then reload the systemd manager configuration with `systemctl daemon-reload`, and restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Ensure that the port (default is 11434) is not blocked by your firewall.
#### Lemonade Connection Errors
If you're encountering a Lemonade connection error, it is likely due to the backend being unable to connect to Lemonade's API. To fix this issue you can:
1. **Check your Lemonade API URL:** Ensure that the API URL is correctly set in the settings menu.
2. **Update API URL Based on OS:**
- **Windows:** Use `http://host.docker.internal:8000`
- **Mac:** Use `http://host.docker.internal:8000`
- **Linux:** Use `http://<private_ip_of_host>:8000`
Adjust the port number if you're using a different one.
3. **Ensure Lemonade Server is Running:**
- Make sure your Lemonade server is running and accessible on the configured port (default is 8000).
- Verify that Lemonade is configured to accept connections from all interfaces (`0.0.0.0`), not just localhost (`127.0.0.1`).
- Ensure that the port (default is 8000) is not blocked by your firewall.
## Using as a Search Engine
If you wish to use Perplexica as an alternative to traditional search engines like Google or Bing, or if you want to add a shortcut for quick access from your browser's search bar, follow these steps:
@@ -160,6 +195,7 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
[![Run on ClawCloud](https://raw.githubusercontent.com/ClawCloud/Run-Template/refs/heads/main/Run-on-ClawCloud.svg)](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
[![Deploy on Hostinger](https://assets.hostinger.com/vps/deploy.svg)](https://www.hostinger.com/vps/docker-hosting?compose_url=https://raw.githubusercontent.com/ItzCrazyKns/Perplexica/refs/heads/master/docker-compose.yaml)
## Upcoming Features

View File

@@ -1,4 +1,6 @@
FROM node:20.18.0-slim AS builder
FROM node:24.5.0-slim AS builder
RUN apt-get update && apt-get install -y python3 python3-pip sqlite3 && rm -rf /var/lib/apt/lists/*
WORKDIR /home/perplexica
@@ -8,6 +10,7 @@ RUN yarn install --frozen-lockfile --network-timeout 600000
COPY tsconfig.json next.config.mjs next-env.d.ts postcss.config.js drizzle.config.ts tailwind.config.ts ./
COPY src ./src
COPY public ./public
COPY drizzle ./drizzle
RUN mkdir -p /home/perplexica/data
RUN yarn build
@@ -15,7 +18,9 @@ RUN yarn build
RUN yarn add --dev @vercel/ncc
RUN yarn ncc build ./src/lib/db/migrate.ts -o migrator
FROM node:20.18.0-slim
FROM node:24.5.0-slim
RUN apt-get update && apt-get install -y python3 python3-pip sqlite3 && rm -rf /var/lib/apt/lists/*
WORKDIR /home/perplexica
@@ -32,4 +37,6 @@ RUN mkdir /home/perplexica/uploads
COPY entrypoint.sh ./entrypoint.sh
RUN chmod +x ./entrypoint.sh
CMD ["./entrypoint.sh"]
RUN sed -i 's/\r$//' ./entrypoint.sh || true
CMD ["/home/perplexica/entrypoint.sh"]

View File

@@ -41,6 +41,6 @@ To update Perplexica to the latest version, follow these steps:
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm rum start`
6. Finally, start the app by running `npm run start`
---

View File

@@ -0,0 +1 @@
/* Do nothing */

View File

@@ -0,0 +1,125 @@
{
"version": "6",
"dialect": "sqlite",
"id": "6dedf55f-0e44-478f-82cf-14a21ac686f8",
"prevId": "ef3a044b-0f34-40b5-babb-2bb3a909ba27",
"tables": {
"chats": {
"name": "chats",
"columns": {
"id": {
"name": "id",
"type": "text",
"primaryKey": true,
"notNull": true,
"autoincrement": false
},
"title": {
"name": "title",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"createdAt": {
"name": "createdAt",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"focusMode": {
"name": "focusMode",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"files": {
"name": "files",
"type": "text",
"primaryKey": false,
"notNull": false,
"autoincrement": false,
"default": "'[]'"
}
},
"indexes": {},
"foreignKeys": {},
"compositePrimaryKeys": {},
"uniqueConstraints": {},
"checkConstraints": {}
},
"messages": {
"name": "messages",
"columns": {
"id": {
"name": "id",
"type": "integer",
"primaryKey": true,
"notNull": true,
"autoincrement": false
},
"type": {
"name": "type",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"chatId": {
"name": "chatId",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"createdAt": {
"name": "createdAt",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false,
"default": "CURRENT_TIMESTAMP"
},
"messageId": {
"name": "messageId",
"type": "text",
"primaryKey": false,
"notNull": true,
"autoincrement": false
},
"content": {
"name": "content",
"type": "text",
"primaryKey": false,
"notNull": false,
"autoincrement": false
},
"sources": {
"name": "sources",
"type": "text",
"primaryKey": false,
"notNull": false,
"autoincrement": false,
"default": "'[]'"
}
},
"indexes": {},
"foreignKeys": {},
"compositePrimaryKeys": {},
"uniqueConstraints": {},
"checkConstraints": {}
}
},
"views": {},
"enums": {},
"_meta": {
"schemas": {},
"tables": {},
"columns": {}
},
"internal": {
"indexes": {}
}
}

View File

@@ -8,6 +8,13 @@
"when": 1748405503809,
"tag": "0000_fuzzy_randall",
"breakpoints": true
},
{
"idx": 1,
"version": "6",
"when": 1758863991284,
"tag": "0001_wise_rockslide",
"breakpoints": true
}
]
}

View File

@@ -1,25 +1,27 @@
{
"name": "perplexica-frontend",
"version": "1.11.0-rc1",
"version": "1.11.0-rc2",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
"dev": "next dev",
"build": "npm run db:push && next build",
"build": "npm run db:migrate && next build",
"start": "next start",
"lint": "next lint",
"format:write": "prettier . --write",
"db:push": "drizzle-kit push"
"db:migrate": "node ./src/lib/db/migrate.ts"
},
"dependencies": {
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/google-genai": "^0.1.12",
"@langchain/openai": "^0.0.25",
"@langchain/anthropic": "^0.3.24",
"@langchain/community": "^0.3.49",
"@langchain/core": "^0.3.66",
"@langchain/google-genai": "^0.2.15",
"@langchain/groq": "^0.2.3",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.6.2",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
@@ -31,7 +33,7 @@
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"jspdf": "^3.0.1",
"langchain": "^0.1.30",
"langchain": "^0.3.30",
"lucide-react": "^0.363.0",
"mammoth": "^1.9.1",
"markdown-to-jsx": "^7.7.2",

BIN
public/fonts/pp-ed-ul.otf Normal file

Binary file not shown.

BIN
public/icon-100.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 916 B

BIN
public/icon-50.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 515 B

BIN
public/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

BIN
public/screenshots/p1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

BIN
public/screenshots/p2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

View File

@@ -25,8 +25,15 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK]
API_KEY = ""
[MODELS.AIMLAPI]
API_KEY = "" # Required to use AI/ML API chat and embedding models
[MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
[MODELS.LEMONADE]
API_URL = "" # Lemonade API URL - http://host.docker.internal:8000
API_KEY = "" # Optional API key for Lemonade
[API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@@ -1,11 +1,7 @@
import prompts from '@/lib/prompts';
import MetaSearchAgent from '@/lib/search/metaSearchAgent';
import crypto from 'crypto';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { EventEmitter } from 'stream';
import {
chatModelProviders,
embeddingModelProviders,
getAvailableChatModelProviders,
getAvailableEmbeddingModelProviders,
} from '@/lib/providers';
@@ -21,46 +17,81 @@ import {
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
import { z } from 'zod';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
type Message = {
messageId: string;
chatId: string;
content: string;
};
const messageSchema = z.object({
messageId: z.string().min(1, 'Message ID is required'),
chatId: z.string().min(1, 'Chat ID is required'),
content: z.string().min(1, 'Message content is required'),
});
type ChatModel = {
provider: string;
name: string;
};
const chatModelSchema = z.object({
provider: z.string().optional(),
name: z.string().optional(),
});
type EmbeddingModel = {
provider: string;
name: string;
};
const embeddingModelSchema = z.object({
provider: z.string().optional(),
name: z.string().optional(),
});
type Body = {
message: Message;
optimizationMode: 'speed' | 'balanced' | 'quality';
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
systemInstructions: string;
const bodySchema = z.object({
message: messageSchema,
optimizationMode: z.enum(['speed', 'balanced', 'quality'], {
errorMap: () => ({
message: 'Optimization mode must be one of: speed, balanced, quality',
}),
}),
focusMode: z.string().min(1, 'Focus mode is required'),
history: z
.array(
z.tuple([z.string(), z.string()], {
errorMap: () => ({
message: 'History items must be tuples of two strings',
}),
}),
)
.optional()
.default([]),
files: z.array(z.string()).optional().default([]),
chatModel: chatModelSchema.optional().default({}),
embeddingModel: embeddingModelSchema.optional().default({}),
systemInstructions: z.string().nullable().optional().default(''),
});
type Message = z.infer<typeof messageSchema>;
type Body = z.infer<typeof bodySchema>;
const safeValidateBody = (data: unknown) => {
const result = bodySchema.safeParse(data);
if (!result.success) {
return {
success: false,
error: result.error.errors.map((e) => ({
path: e.path.join('.'),
message: e.message,
})),
};
}
return {
success: true,
data: result.data,
};
};
const handleEmitterEvents = async (
stream: EventEmitter,
writer: WritableStreamDefaultWriter,
encoder: TextEncoder,
aiMessageId: string,
chatId: string,
) => {
let recievedMessage = '';
let sources: any[] = [];
const aiMessageId = crypto.randomBytes(7).toString('hex');
stream.on('data', (data) => {
const parsedData = JSON.parse(data);
@@ -87,7 +118,17 @@ const handleEmitterEvents = async (
),
);
sources = parsedData.data;
const sourceMessageId = crypto.randomBytes(7).toString('hex');
db.insert(messagesSchema)
.values({
chatId: chatId,
messageId: sourceMessageId,
role: 'source',
sources: parsedData.data,
createdAt: new Date().toString(),
})
.execute();
}
});
stream.on('end', () => {
@@ -95,7 +136,6 @@ const handleEmitterEvents = async (
encoder.encode(
JSON.stringify({
type: 'messageEnd',
messageId: aiMessageId,
}) + '\n',
),
);
@@ -107,10 +147,7 @@ const handleEmitterEvents = async (
chatId: chatId,
messageId: aiMessageId,
role: 'assistant',
metadata: JSON.stringify({
createdAt: new Date(),
...(sources && sources.length > 0 && { sources }),
}),
createdAt: new Date().toString(),
})
.execute();
});
@@ -138,6 +175,8 @@ const handleHistorySave = async (
where: eq(chats.id, message.chatId),
});
const fileData = files.map(getFileDetails);
if (!chat) {
await db
.insert(chats)
@@ -146,9 +185,15 @@ const handleHistorySave = async (
title: message.content,
createdAt: new Date().toString(),
focusMode: focusMode,
files: files.map(getFileDetails),
files: fileData,
})
.execute();
} else if (JSON.stringify(chat.files ?? []) != JSON.stringify(fileData)) {
db.update(chats)
.set({
files: files.map(getFileDetails),
})
.where(eq(chats.id, message.chatId));
}
const messageExists = await db.query.messages.findFirst({
@@ -163,9 +208,7 @@ const handleHistorySave = async (
chatId: message.chatId,
messageId: humanMessageId,
role: 'user',
metadata: JSON.stringify({
createdAt: new Date(),
}),
createdAt: new Date().toString(),
})
.execute();
} else {
@@ -183,7 +226,17 @@ const handleHistorySave = async (
export const POST = async (req: Request) => {
try {
const body = (await req.json()) as Body;
const reqBody = (await req.json()) as Body;
const parseBody = safeValidateBody(reqBody);
if (!parseBody.success) {
return Response.json(
{ message: 'Invalid request body', error: parseBody.error },
{ status: 400 },
);
}
const body = parseBody.data as Body;
const { message } = body;
if (message.content === '') {
@@ -223,7 +276,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {
@@ -247,7 +300,6 @@ export const POST = async (req: Request) => {
const humanMessageId =
message.messageId ?? crypto.randomBytes(7).toString('hex');
const aiMessageId = crypto.randomBytes(7).toString('hex');
const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') {
@@ -279,14 +331,14 @@ export const POST = async (req: Request) => {
embedding,
body.optimizationMode,
body.files,
body.systemInstructions,
body.systemInstructions as string,
);
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
handleEmitterEvents(stream, writer, encoder, aiMessageId, message.chatId);
handleEmitterEvents(stream, writer, encoder, message.chatId);
handleHistorySave(message, humanMessageId, body.focusMode, body.files);
return new Response(responseStream.readable, {

View File

@@ -8,8 +8,12 @@ import {
getOllamaApiEndpoint,
getOpenaiApiKey,
getDeepseekApiKey,
getAimlApiKey,
getLMStudioApiEndpoint,
getLemonadeApiEndpoint,
getLemonadeApiKey,
updateConfig,
getOllamaApiKey,
} from '@/lib/config';
import {
getAvailableChatModelProviders,
@@ -52,11 +56,15 @@ export const GET = async (req: Request) => {
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['ollamaApiKey'] = getOllamaApiKey();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['lemonadeApiUrl'] = getLemonadeApiEndpoint();
config['lemonadeApiKey'] = getLemonadeApiKey();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey();
config['aimlApiKey'] = getAimlApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName();
@@ -91,13 +99,21 @@ export const POST = async (req: Request) => {
},
OLLAMA: {
API_URL: config.ollamaApiUrl,
API_KEY: config.ollamaApiKey,
},
DEEPSEEK: {
API_KEY: config.deepseekApiKey,
},
AIMLAPI: {
API_KEY: config.aimlApiKey,
},
LM_STUDIO: {
API_URL: config.lmStudioApiUrl,
},
LEMONADE: {
API_URL: config.lemonadeApiUrl,
API_KEY: config.lemonadeApiKey,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,

View File

@@ -1,55 +1,77 @@
import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [
'yahoo.com',
'www.exchangewire.com',
'businessinsider.com',
/* 'wired.com',
'mashable.com',
'theverge.com',
'gizmodo.com',
'cnet.com',
'venturebeat.com', */
];
const websitesForTopic = {
tech: {
query: ['technology news', 'latest tech', 'AI', 'science and innovation'],
links: ['techcrunch.com', 'wired.com', 'theverge.com'],
},
finance: {
query: ['finance news', 'economy', 'stock market', 'investing'],
links: ['bloomberg.com', 'cnbc.com', 'marketwatch.com'],
},
art: {
query: ['art news', 'culture', 'modern art', 'cultural events'],
links: ['artnews.com', 'hyperallergic.com', 'theartnewspaper.com'],
},
sports: {
query: ['sports news', 'latest sports', 'cricket football tennis'],
links: ['espn.com', 'bbc.com/sport', 'skysports.com'],
},
entertainment: {
query: ['entertainment news', 'movies', 'TV shows', 'celebrities'],
links: ['hollywoodreporter.com', 'variety.com', 'deadline.com'],
},
};
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */
type Topic = keyof typeof websitesForTopic;
export const GET = async (req: Request) => {
try {
const params = new URL(req.url).searchParams;
const mode: 'normal' | 'preview' =
(params.get('mode') as 'normal' | 'preview') || 'normal';
const topic: Topic = (params.get('topic') as Topic) || 'tech';
const selectedTopic = websitesForTopic[topic];
let data = [];
if (mode === 'normal') {
const seenUrls = new Set();
data = (
await Promise.all([
...new Array(articleWebsites.length * topics.length)
.fill(0)
.map(async (_, i) => {
await Promise.all(
selectedTopic.links.flatMap((link) =>
selectedTopic.query.map(async (query) => {
return (
await searchSearxng(
`site:${articleWebsites[i % articleWebsites.length]} ${
topics[i % topics.length]
}`,
{
engines: ['bing news'],
pageno: 1,
},
)
await searchSearxng(`site:${link} ${query}`, {
engines: ['bing news'],
pageno: 1,
language: 'en',
})
).results;
}),
])
),
)
)
.map((result) => result)
.flat()
.filter((item) => {
const url = item.url?.toLowerCase().trim();
if (seenUrls.has(url)) return false;
seenUrls.add(url);
return true;
})
.sort(() => Math.random() - 0.5);
} else {
data = (
await searchSearxng(
`site:${articleWebsites[Math.floor(Math.random() * articleWebsites.length)]} ${topics[Math.floor(Math.random() * topics.length)]}`,
{ engines: ['bing news'], pageno: 1 },
`site:${selectedTopic.links[Math.floor(Math.random() * selectedTopic.links.length)]} ${selectedTopic.query[Math.floor(Math.random() * selectedTopic.query.length)]}`,
{
engines: ['bing news'],
pageno: 1,
language: 'en',
},
)
).results;
}

View File

@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -81,8 +81,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
apiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL:

View File

@@ -48,7 +48,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -1,6 +1,10 @@
export const POST = async (req: Request) => {
try {
const body: { lat: number; lng: number } = await req.json();
const body: {
lat: number;
lng: number;
measureUnit: 'Imperial' | 'Metric';
} = await req.json();
if (!body.lat || !body.lng) {
return Response.json(
@@ -12,7 +16,9 @@ export const POST = async (req: Request) => {
}
const res = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto`,
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto${
body.measureUnit === 'Metric' ? '' : '&temperature_unit=fahrenheit'
}${body.measureUnit === 'Metric' ? '' : '&wind_speed_unit=mph'}`,
);
const data = await res.json();
@@ -33,12 +39,16 @@ export const POST = async (req: Request) => {
humidity: number;
windSpeed: number;
icon: string;
temperatureUnit: 'C' | 'F';
windSpeedUnit: 'm/s' | 'mph';
} = {
temperature: data.current.temperature_2m,
condition: '',
humidity: data.current.relative_humidity_2m,
windSpeed: data.current.wind_speed_10m,
icon: '',
temperatureUnit: body.measureUnit === 'Metric' ? 'C' : 'F',
windSpeedUnit: body.measureUnit === 'Metric' ? 'm/s' : 'mph',
};
const code = data.current.weather_code;

View File

@@ -1,9 +1,17 @@
import ChatWindow from '@/components/ChatWindow';
import React from 'react';
'use client';
const Page = ({ params }: { params: Promise<{ chatId: string }> }) => {
const { chatId } = React.use(params);
return <ChatWindow id={chatId} />;
import ChatWindow from '@/components/ChatWindow';
import { useParams } from 'next/navigation';
import React from 'react';
import { ChatProvider } from '@/lib/hooks/useChat';
const Page = () => {
const { chatId }: { chatId: string } = useParams();
return (
<ChatProvider id={chatId}>
<ChatWindow />
</ChatProvider>
);
};
export default Page;

View File

@@ -1,110 +1,268 @@
'use client';
import { Search } from 'lucide-react';
import { Globe2Icon } from 'lucide-react';
import { useEffect, useState } from 'react';
import Link from 'next/link';
import { toast } from 'sonner';
import { cn } from '@/lib/utils';
import SmallNewsCard from '@/components/Discover/SmallNewsCard';
import MajorNewsCard from '@/components/Discover/MajorNewsCard';
interface Discover {
export interface Discover {
title: string;
content: string;
url: string;
thumbnail: string;
}
const topics: { key: string; display: string }[] = [
{
display: 'Tech & Science',
key: 'tech',
},
{
display: 'Finance',
key: 'finance',
},
{
display: 'Art & Culture',
key: 'art',
},
{
display: 'Sports',
key: 'sports',
},
{
display: 'Entertainment',
key: 'entertainment',
},
];
const Page = () => {
const [discover, setDiscover] = useState<Discover[] | null>(null);
const [loading, setLoading] = useState(true);
const [activeTopic, setActiveTopic] = useState<string>(topics[0].key);
const fetchArticles = async (topic: string) => {
setLoading(true);
try {
const res = await fetch(`/api/discover?topic=${topic}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
if (!res.ok) {
throw new Error(data.message);
}
data.blogs = data.blogs.filter((blog: Discover) => blog.thumbnail);
setDiscover(data.blogs);
} catch (err: any) {
console.error('Error fetching data:', err.message);
toast.error('Error fetching data');
} finally {
setLoading(false);
}
};
useEffect(() => {
const fetchData = async () => {
try {
const res = await fetch(`/api/discover`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
fetchArticles(activeTopic);
}, [activeTopic]);
const data = await res.json();
if (!res.ok) {
throw new Error(data.message);
}
data.blogs = data.blogs.filter((blog: Discover) => blog.thumbnail);
setDiscover(data.blogs);
} catch (err: any) {
console.error('Error fetching data:', err.message);
toast.error('Error fetching data');
} finally {
setLoading(false);
}
};
fetchData();
}, []);
return loading ? (
<div className="flex flex-row items-center justify-center min-h-screen">
<svg
aria-hidden="true"
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
viewBox="0 0 100 101"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
fill="currentColor"
/>
<path
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
fill="currentFill"
/>
</svg>
</div>
) : (
return (
<>
<div>
<div className="flex flex-col pt-4">
<div className="flex items-center">
<Search />
<h1 className="text-3xl font-medium p-2">Discover</h1>
<div className="flex flex-col pt-10 border-b border-light-200/20 dark:border-dark-200/20 pb-6 px-2">
<div className="flex flex-col lg:flex-row lg:items-center lg:justify-between gap-4">
<div className="flex items-center justify-center">
<Globe2Icon size={45} className="mb-2.5" />
<h1
className="text-5xl font-normal p-2"
style={{ fontFamily: 'PP Editorial, serif' }}
>
Discover
</h1>
</div>
<div className="flex flex-row items-center space-x-2 overflow-x-auto">
{topics.map((t, i) => (
<div
key={i}
className={cn(
'border-[0.1px] rounded-full text-sm px-3 py-1 text-nowrap transition duration-200 cursor-pointer',
activeTopic === t.key
? 'text-cyan-700 dark:text-cyan-300 bg-cyan-300/20 border-cyan-700/60 dar:bg-cyan-300/30 dark:border-cyan-300/40'
: 'border-black/30 dark:border-white/30 text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white hover:border-black/40 dark:hover:border-white/40 hover:bg-black/5 dark:hover:bg-white/5',
)}
onClick={() => setActiveTopic(t.key)}
>
<span>{t.display}</span>
</div>
))}
</div>
</div>
<hr className="border-t border-[#2B2C2C] my-4 w-full" />
</div>
<div className="grid lg:grid-cols-3 sm:grid-cols-2 grid-cols-1 gap-4 pb-28 lg:pb-8 w-full justify-items-center lg:justify-items-start">
{discover &&
discover?.map((item, i) => (
<Link
href={`/?q=Summary: ${item.url}`}
key={i}
className="max-w-sm rounded-lg overflow-hidden bg-light-secondary dark:bg-dark-secondary hover:-translate-y-[1px] transition duration-200"
target="_blank"
>
<img
className="object-cover w-full aspect-video"
src={
new URL(item.thumbnail).origin +
new URL(item.thumbnail).pathname +
`?id=${new URL(item.thumbnail).searchParams.get('id')}`
{loading ? (
<div className="flex flex-row items-center justify-center min-h-screen">
<svg
aria-hidden="true"
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
viewBox="0 0 100 101"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
fill="currentColor"
/>
<path
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
fill="currentFill"
/>
</svg>
</div>
) : (
<div className="flex flex-col gap-4 pb-28 pt-5 lg:pb-8 w-full">
<div className="block lg:hidden">
<div className="grid grid-cols-1 sm:grid-cols-2 gap-4">
{discover?.map((item, i) => (
<SmallNewsCard key={`mobile-${i}`} item={item} />
))}
</div>
</div>
<div className="hidden lg:block">
{discover &&
discover.length > 0 &&
(() => {
const sections = [];
let index = 0;
while (index < discover.length) {
if (sections.length > 0) {
sections.push(
<hr
key={`sep-${index}`}
className="border-t border-light-200/20 dark:border-dark-200/20 my-3 w-full"
/>,
);
}
if (index < discover.length) {
sections.push(
<MajorNewsCard
key={`major-${index}`}
item={discover[index]}
isLeft={false}
/>,
);
index++;
}
if (index < discover.length) {
sections.push(
<hr
key={`sep-${index}-after`}
className="border-t border-light-200/20 dark:border-dark-200/20 my-3 w-full"
/>,
);
}
if (index < discover.length) {
const smallCards = discover.slice(index, index + 3);
sections.push(
<div
key={`small-group-${index}`}
className="grid lg:grid-cols-3 sm:grid-cols-2 grid-cols-1 gap-4"
>
{smallCards.map((item, i) => (
<SmallNewsCard
key={`small-${index + i}`}
item={item}
/>
))}
</div>,
);
index += 3;
}
if (index < discover.length) {
sections.push(
<hr
key={`sep-${index}-after-small`}
className="border-t border-light-200/20 dark:border-dark-200/20 my-3 w-full"
/>,
);
}
if (index < discover.length - 1) {
const twoMajorCards = discover.slice(index, index + 2);
twoMajorCards.forEach((item, i) => {
sections.push(
<MajorNewsCard
key={`double-${index + i}`}
item={item}
isLeft={i === 0}
/>,
);
if (i === 0) {
sections.push(
<hr
key={`sep-double-${index + i}`}
className="border-t border-light-200/20 dark:border-dark-200/20 my-3 w-full"
/>,
);
}
});
index += 2;
} else if (index < discover.length) {
sections.push(
<MajorNewsCard
key={`final-major-${index}`}
item={discover[index]}
isLeft={true}
/>,
);
index++;
}
if (index < discover.length) {
sections.push(
<hr
key={`sep-${index}-after-major`}
className="border-t border-light-200/20 dark:border-dark-200/20 my-3 w-full"
/>,
);
}
if (index < discover.length) {
const smallCards = discover.slice(index, index + 3);
sections.push(
<div
key={`small-group-2-${index}`}
className="grid lg:grid-cols-3 sm:grid-cols-2 grid-cols-1 gap-4"
>
{smallCards.map((item, i) => (
<SmallNewsCard
key={`small-2-${index + i}`}
item={item}
/>
))}
</div>,
);
index += 3;
}
}
alt={item.title}
/>
<div className="px-6 py-4">
<div className="font-bold text-lg mb-2">
{item.title.slice(0, 100)}...
</div>
<p className="text-black-70 dark:text-white/70 text-sm">
{item.content.slice(0, 100)}...
</p>
</div>
</Link>
))}
</div>
return sections;
})()}
</div>
</div>
)}
</div>
</>
);

View File

@@ -2,6 +2,14 @@
@tailwind components;
@tailwind utilities;
@font-face {
font-family: 'PP Editorial';
src: url('/fonts/pp-ed-ul.otf') format('opentype');
font-weight: 200;
font-style: normal;
font-display: swap;
}
@layer base {
.overflow-hidden-scrollable {
-ms-overflow-style: none;
@@ -11,3 +19,20 @@
display: none;
}
}
@layer utilities {
.line-clamp-2 {
display: -webkit-box;
-webkit-box-orient: vertical;
-webkit-line-clamp: 2;
overflow: hidden;
}
}
@media screen and (-webkit-min-device-pixel-ratio: 0) {
select,
textarea,
input {
font-size: 16px !important;
}
}

54
src/app/manifest.ts Normal file
View File

@@ -0,0 +1,54 @@
import type { MetadataRoute } from 'next';
export default function manifest(): MetadataRoute.Manifest {
return {
name: 'Perplexica - Chat with the internet',
short_name: 'Perplexica',
description:
'Perplexica is an AI powered chatbot that is connected to the internet.',
start_url: '/',
display: 'standalone',
background_color: '#0a0a0a',
theme_color: '#0a0a0a',
screenshots: [
{
src: '/screenshots/p1.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p2.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p1_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
{
src: '/screenshots/p2_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
],
icons: [
{
src: '/icon-50.png',
sizes: '50x50',
type: 'image/png' as const,
},
{
src: '/icon-100.png',
sizes: '100x100',
type: 'image/png',
},
{
src: '/icon.png',
sizes: '440x440',
type: 'image/png',
purpose: 'any',
},
],
};
}

View File

@@ -1,4 +1,5 @@
import ChatWindow from '@/components/ChatWindow';
import { ChatProvider } from '@/lib/hooks/useChat';
import { Metadata } from 'next';
import { Suspense } from 'react';
@@ -11,7 +12,9 @@ const Home = () => {
return (
<div>
<Suspense>
<ChatWindow />
<ChatProvider>
<ChatWindow />
</ChatProvider>
</Suspense>
</div>
);

View File

@@ -21,8 +21,12 @@ interface SettingsType {
anthropicApiKey: string;
geminiApiKey: string;
ollamaApiUrl: string;
ollamaApiKey: string;
lmStudioApiUrl: string;
lemonadeApiUrl: string;
lemonadeApiKey: string;
deepseekApiKey: string;
aimlApiKey: string;
customOpenaiApiKey: string;
customOpenaiApiUrl: string;
customOpenaiModelName: string;
@@ -147,6 +151,9 @@ const Page = () => {
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [systemInstructions, setSystemInstructions] = useState<string>('');
const [measureUnit, setMeasureUnit] = useState<'Imperial' | 'Metric'>(
'Metric',
);
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
useEffect(() => {
@@ -209,6 +216,10 @@ const Page = () => {
setSystemInstructions(localStorage.getItem('systemInstructions')!);
setMeasureUnit(
localStorage.getItem('measureUnit')! as 'Imperial' | 'Metric',
);
setIsLoading(false);
};
@@ -367,6 +378,8 @@ const Page = () => {
localStorage.setItem('embeddingModel', value);
} else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value);
} else if (key === 'measureUnit') {
localStorage.setItem('measureUnit', value.toString());
}
} catch (err) {
console.error('Failed to save:', err);
@@ -415,13 +428,35 @@ const Page = () => {
) : (
config && (
<div className="flex flex-col space-y-6 pb-28 lg:pb-8">
<SettingsSection title="Appearance">
<SettingsSection title="Preferences">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Theme
</p>
<ThemeSwitcher />
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Measurement Units
</p>
<Select
value={measureUnit ?? undefined}
onChange={(e) => {
setMeasureUnit(e.target.value as 'Imperial' | 'Metric');
saveConfig('measureUnit', e.target.value);
}}
options={[
{
label: 'Metric',
value: 'Metric',
},
{
label: 'Imperial',
value: 'Imperial',
},
]}
/>
</div>
</SettingsSection>
<SettingsSection title="Automatic Search">
@@ -515,7 +550,7 @@ const Page = () => {
<SettingsSection title="System Instructions">
<div className="flex flex-col space-y-4">
<Textarea
value={systemInstructions}
value={systemInstructions ?? undefined}
isSaving={savingStates['systemInstructions']}
onChange={(e) => {
setSystemInstructions(e.target.value);
@@ -786,6 +821,25 @@ const Page = () => {
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Ollama API Key (Can be left blank)
</p>
<Input
type="text"
placeholder="Ollama API Key"
value={config.ollamaApiKey}
isSaving={savingStates['ollamaApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
ollamaApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('ollamaApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
GROQ API Key
@@ -862,6 +916,25 @@ const Page = () => {
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
AI/ML API Key
</p>
<Input
type="text"
placeholder="AI/ML API Key"
value={config.aimlApiKey}
isSaving={savingStates['aimlApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
aimlApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('aimlApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL
@@ -882,6 +955,48 @@ const Page = () => {
</div>
</div>
</SettingsSection>
<SettingsSection title="Lemonade">
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Lemonade API URL
</p>
<Input
type="text"
placeholder="Lemonade API URL"
value={config.lemonadeApiUrl}
isSaving={savingStates['lemonadeApiUrl']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lemonadeApiUrl: e.target.value,
}));
}}
onSave={(value) => saveConfig('lemonadeApiUrl', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Lemonade API Key (Optional)
</p>
<Input
type="password"
placeholder="Lemonade API Key"
value={config.lemonadeApiKey}
isSaving={savingStates['lemonadeApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lemonadeApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('lemonadeApiKey', value)}
/>
</div>
</div>
</SettingsSection>
</div>
)
)}

View File

@@ -2,31 +2,13 @@
import { Fragment, useEffect, useRef, useState } from 'react';
import MessageInput from './MessageInput';
import { File, Message } from './ChatWindow';
import MessageBox from './MessageBox';
import MessageBoxLoading from './MessageBoxLoading';
import { useChat } from '@/lib/hooks/useChat';
const Chat = () => {
const { sections, chatTurns, loading, messageAppeared } = useChat();
const Chat = ({
loading,
messages,
sendMessage,
messageAppeared,
rewrite,
fileIds,
setFileIds,
files,
setFiles,
}: {
messages: Message[];
sendMessage: (message: string) => void;
loading: boolean;
messageAppeared: boolean;
rewrite: (messageId: string) => void;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
}) => {
const [dividerWidth, setDividerWidth] = useState(0);
const dividerRef = useRef<HTMLDivElement | null>(null);
const messageEnd = useRef<HTMLDivElement | null>(null);
@@ -45,41 +27,36 @@ const Chat = ({
return () => {
window.removeEventListener('resize', updateDividerWidth);
};
});
}, []);
useEffect(() => {
const scroll = () => {
messageEnd.current?.scrollIntoView({ behavior: 'smooth' });
};
if (messages.length === 1) {
document.title = `${messages[0].content.substring(0, 30)} - Perplexica`;
if (chatTurns.length === 1) {
document.title = `${chatTurns[0].content.substring(0, 30)} - Perplexica`;
}
if (messages[messages.length - 1]?.role == 'user') {
if (chatTurns[chatTurns.length - 1]?.role === 'user') {
scroll();
}
}, [messages]);
}, [chatTurns]);
return (
<div className="flex flex-col space-y-6 pt-8 pb-44 lg:pb-32 sm:mx-4 md:mx-8">
{messages.map((msg, i) => {
const isLast = i === messages.length - 1;
{sections.map((section, i) => {
const isLast = i === sections.length - 1;
return (
<Fragment key={msg.messageId}>
<Fragment key={section.userMessage.messageId}>
<MessageBox
key={i}
message={msg}
messageIndex={i}
history={messages}
loading={loading}
section={section}
sectionIndex={i}
dividerRef={isLast ? dividerRef : undefined}
isLast={isLast}
rewrite={rewrite}
sendMessage={sendMessage}
/>
{!isLast && msg.role === 'assistant' && (
{!isLast && (
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
)}
</Fragment>
@@ -92,14 +69,7 @@ const Chat = ({
className="bottom-24 lg:bottom-10 fixed z-40"
style={{ width: dividerWidth }}
>
<MessageInput
loading={loading}
sendMessage={sendMessage}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<MessageInput />
</div>
)}
</div>

View File

@@ -1,27 +1,47 @@
'use client';
import { useEffect, useRef, useState } from 'react';
import { Document } from '@langchain/core/documents';
import Navbar from './Navbar';
import Chat from './Chat';
import EmptyChat from './EmptyChat';
import crypto from 'crypto';
import { toast } from 'sonner';
import { useSearchParams } from 'next/navigation';
import { getSuggestions } from '@/lib/actions';
import { Settings } from 'lucide-react';
import Link from 'next/link';
import NextError from 'next/error';
import { useChat } from '@/lib/hooks/useChat';
export type Message = {
messageId: string;
export interface BaseMessage {
chatId: string;
messageId: string;
createdAt: Date;
}
export interface AssistantMessage extends BaseMessage {
role: 'assistant';
content: string;
role: 'user' | 'assistant';
suggestions?: string[];
sources?: Document[];
};
}
export interface UserMessage extends BaseMessage {
role: 'user';
content: string;
}
export interface SourceMessage extends BaseMessage {
role: 'source';
sources: Document[];
}
export interface SuggestionMessage extends BaseMessage {
role: 'suggestion';
suggestions: string[];
}
export type Message =
| AssistantMessage
| UserMessage
| SourceMessage
| SuggestionMessage;
export type ChatTurn = UserMessage | AssistantMessage;
export interface File {
fileName: string;
@@ -29,512 +49,8 @@ export interface File {
fileId: string;
}
interface ChatModelProvider {
name: string;
provider: string;
}
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => {
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
chatModelProvider =
chatModelProvider || Object.keys(chatModelProviders)[0];
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
};
const loadMessages = async (
chatId: string,
setMessages: (messages: Message[]) => void,
setIsMessagesLoaded: (loaded: boolean) => void,
setChatHistory: (history: [string, string][]) => void,
setFocusMode: (mode: string) => void,
setNotFound: (notFound: boolean) => void,
setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void,
) => {
const res = await fetch(`/api/chats/${chatId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
if (res.status === 404) {
setNotFound(true);
setIsMessagesLoaded(true);
return;
}
const data = await res.json();
const messages = data.messages.map((msg: any) => {
return {
...msg,
...JSON.parse(msg.metadata),
};
}) as Message[];
setMessages(messages);
const history = messages.map((msg) => {
return [msg.role, msg.content];
}) as [string, string][];
console.debug(new Date(), 'app:messages_loaded');
document.title = messages[0].content;
const files = data.chat.files.map((file: any) => {
return {
fileName: file.name,
fileExtension: file.name.split('.').pop(),
fileId: file.fileId,
};
});
setFiles(files);
setFileIds(files.map((file: File) => file.fileId));
setChatHistory(history);
setFocusMode(data.chat.focusMode);
setIsMessagesLoaded(true);
};
const ChatWindow = ({ id }: { id?: string }) => {
const searchParams = useSearchParams();
const initialMessage = searchParams.get('q');
const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false);
useEffect(() => {
checkConfig(
setChatModelProvider,
setEmbeddingModelProvider,
setIsConfigReady,
setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false);
const [chatHistory, setChatHistory] = useState<[string, string][]>([]);
const [messages, setMessages] = useState<Message[]>([]);
const [files, setFiles] = useState<File[]>([]);
const [fileIds, setFileIds] = useState<string[]>([]);
const [focusMode, setFocusMode] = useState('webSearch');
const [optimizationMode, setOptimizationMode] = useState('speed');
const [isMessagesLoaded, setIsMessagesLoaded] = useState(false);
const [notFound, setNotFound] = useState(false);
useEffect(() => {
if (
chatId &&
!newChatCreated &&
!isMessagesLoaded &&
messages.length === 0
) {
loadMessages(
chatId,
setMessages,
setIsMessagesLoaded,
setChatHistory,
setFocusMode,
setNotFound,
setFiles,
setFileIds,
);
} else if (!chatId) {
setNewChatCreated(true);
setIsMessagesLoaded(true);
setChatId(crypto.randomBytes(20).toString('hex'));
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const messagesRef = useRef<Message[]>([]);
useEffect(() => {
messagesRef.current = messages;
}, [messages]);
useEffect(() => {
if (isMessagesLoaded && isConfigReady) {
setIsReady(true);
console.debug(new Date(), 'app:ready');
} else {
setIsReady(false);
}
}, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => {
if (loading) return;
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
return;
}
setLoading(true);
setMessageAppeared(false);
let sources: Document[] | undefined = undefined;
let recievedMessage = '';
let added = false;
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
setMessages((prevMessages) => [
...prevMessages,
{
content: message,
messageId: messageId,
chatId: chatId!,
role: 'user',
createdAt: new Date(),
},
]);
const messageHandler = async (data: any) => {
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
return;
}
if (data.type === 'sources') {
sources = data.data;
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: '',
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
sources: sources,
createdAt: new Date(),
},
]);
added = true;
}
setMessageAppeared(true);
}
if (data.type === 'message') {
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: data.data,
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
sources: sources,
createdAt: new Date(),
},
]);
added = true;
}
setMessages((prev) =>
prev.map((message) => {
if (message.messageId === data.messageId) {
return { ...message, content: message.content + data.data };
}
return message;
}),
);
recievedMessage += data.data;
setMessageAppeared(true);
}
if (data.type === 'messageEnd') {
setChatHistory((prevHistory) => [
...prevHistory,
['human', message],
['assistant', recievedMessage],
]);
setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
}
if (
lastMsg.role === 'assistant' &&
lastMsg.sources &&
lastMsg.sources.length > 0 &&
!lastMsg.suggestions
) {
const suggestions = await getSuggestions(messagesRef.current);
setMessages((prev) =>
prev.map((msg) => {
if (msg.messageId === lastMsg.messageId) {
return { ...msg, suggestions: suggestions };
}
return msg;
}),
);
}
}
};
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
systemInstructions: localStorage.getItem('systemInstructions'),
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
};
const rewrite = (messageId: string) => {
const index = messages.findIndex((msg) => msg.messageId === messageId);
if (index === -1) return;
const message = messages[index - 1];
setMessages((prev) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
});
setChatHistory((prev) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
});
sendMessage(message.content, message.messageId);
};
useEffect(() => {
if (isReady && initialMessage && isConfigReady) {
sendMessage(initialMessage);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isConfigReady, isReady, initialMessage]);
const ChatWindow = () => {
const { hasError, isReady, notFound, messages } = useChat();
if (hasError) {
return (
<div className="relative">
@@ -559,31 +75,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
<div>
{messages.length > 0 ? (
<>
<Navbar chatId={chatId!} messages={messages} />
<Chat
loading={loading}
messages={messages}
sendMessage={sendMessage}
messageAppeared={messageAppeared}
rewrite={rewrite}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<Navbar />
<Chat />
</>
) : (
<EmptyChat
sendMessage={sendMessage}
focusMode={focusMode}
setFocusMode={setFocusMode}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<EmptyChat />
)}
</div>
)

View File

@@ -0,0 +1,19 @@
const Citation = ({
href,
children,
}: {
href: string;
children: React.ReactNode;
}) => {
return (
<a
href={href}
target="_blank"
className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative"
>
{children}
</a>
);
};
export default Citation;

View File

@@ -0,0 +1,70 @@
import { Discover } from '@/app/discover/page';
import Link from 'next/link';
const MajorNewsCard = ({
item,
isLeft = true,
}: {
item: Discover;
isLeft?: boolean;
}) => (
<Link
href={`/?q=Summary: ${item.url}`}
className="w-full group flex flex-row items-stretch gap-6 h-60 py-3"
target="_blank"
>
{isLeft ? (
<>
<div className="relative w-80 h-full overflow-hidden rounded-2xl flex-shrink-0">
<img
className="object-cover w-full h-full group-hover:scale-105 transition-transform duration-500"
src={
new URL(item.thumbnail).origin +
new URL(item.thumbnail).pathname +
`?id=${new URL(item.thumbnail).searchParams.get('id')}`
}
alt={item.title}
/>
</div>
<div className="flex flex-col justify-center flex-1 py-4">
<h2
className="text-3xl font-light mb-3 leading-tight line-clamp-3 group-hover:text-cyan-500 dark:group-hover:text-cyan-300 transition duration-200"
style={{ fontFamily: 'PP Editorial, serif' }}
>
{item.title}
</h2>
<p className="text-black/60 dark:text-white/60 text-base leading-relaxed line-clamp-4">
{item.content}
</p>
</div>
</>
) : (
<>
<div className="flex flex-col justify-center flex-1 py-4">
<h2
className="text-3xl font-light mb-3 leading-tight line-clamp-3 group-hover:text-cyan-500 dark:group-hover:text-cyan-300 transition duration-200"
style={{ fontFamily: 'PP Editorial, serif' }}
>
{item.title}
</h2>
<p className="text-black/60 dark:text-white/60 text-base leading-relaxed line-clamp-4">
{item.content}
</p>
</div>
<div className="relative w-80 h-full overflow-hidden rounded-2xl flex-shrink-0">
<img
className="object-cover w-full h-full group-hover:scale-105 transition-transform duration-500"
src={
new URL(item.thumbnail).origin +
new URL(item.thumbnail).pathname +
`?id=${new URL(item.thumbnail).searchParams.get('id')}`
}
alt={item.title}
/>
</div>
</>
)}
</Link>
);
export default MajorNewsCard;

View File

@@ -0,0 +1,32 @@
import { Discover } from '@/app/discover/page';
import Link from 'next/link';
const SmallNewsCard = ({ item }: { item: Discover }) => (
<Link
href={`/?q=Summary: ${item.url}`}
className="rounded-3xl overflow-hidden bg-light-secondary dark:bg-dark-secondary shadow-sm shadow-light-200/10 dark:shadow-black/25 group flex flex-col"
target="_blank"
>
<div className="relative aspect-video overflow-hidden">
<img
className="object-cover w-full h-full group-hover:scale-105 transition-transform duration-300"
src={
new URL(item.thumbnail).origin +
new URL(item.thumbnail).pathname +
`?id=${new URL(item.thumbnail).searchParams.get('id')}`
}
alt={item.title}
/>
</div>
<div className="p-4">
<h3 className="font-semibold text-sm mb-2 leading-tight line-clamp-2 group-hover:text-cyan-500 dark:group-hover:text-cyan-300 transition duration-200">
{item.title}
</h3>
<p className="text-black/60 dark:text-white/60 text-xs leading-relaxed line-clamp-2">
{item.content}
</p>
</div>
</Link>
);
export default SmallNewsCard;

View File

@@ -1,32 +1,11 @@
import { Settings } from 'lucide-react';
import EmptyChatMessageInput from './EmptyChatMessageInput';
import { useEffect, useState } from 'react';
import { File } from './ChatWindow';
import Link from 'next/link';
import WeatherWidget from './WeatherWidget';
import NewsArticleWidget from './NewsArticleWidget';
const EmptyChat = ({
sendMessage,
focusMode,
setFocusMode,
optimizationMode,
setOptimizationMode,
fileIds,
setFileIds,
files,
setFiles,
}: {
sendMessage: (message: string) => void;
focusMode: string;
setFocusMode: (mode: string) => void;
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
}) => {
const EmptyChat = () => {
return (
<div className="relative">
<div className="absolute w-full flex flex-row items-center justify-end mr-5 mt-5">
@@ -34,26 +13,18 @@ const EmptyChat = ({
<Settings className="cursor-pointer lg:hidden" />
</Link>
</div>
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-8">
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
Research begins here.
</h2>
<EmptyChatMessageInput
sendMessage={sendMessage}
focusMode={focusMode}
setFocusMode={setFocusMode}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<div className="flex flex-col items-center justify-center min-h-screen max-w-screen-sm mx-auto p-2 space-y-4">
<div className="flex flex-col items-center justify-center w-full space-y-8">
<h2 className="text-black/70 dark:text-white/70 text-3xl font-medium -mt-8">
Research begins here.
</h2>
<EmptyChatMessageInput />
</div>
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
<div className="flex-1 max-w-xs">
<div className="flex-1 w-full">
<WeatherWidget />
</div>
<div className="flex-1 max-w-xs">
<div className="flex-1 w-full">
<NewsArticleWidget />
</div>
</div>

View File

@@ -1,34 +1,15 @@
import { ArrowRight } from 'lucide-react';
import { useEffect, useRef, useState } from 'react';
import TextareaAutosize from 'react-textarea-autosize';
import CopilotToggle from './MessageInputActions/Copilot';
import Focus from './MessageInputActions/Focus';
import Optimization from './MessageInputActions/Optimization';
import Attach from './MessageInputActions/Attach';
import { File } from './ChatWindow';
import { useChat } from '@/lib/hooks/useChat';
const EmptyChatMessageInput = ({
sendMessage,
focusMode,
setFocusMode,
optimizationMode,
setOptimizationMode,
fileIds,
setFileIds,
files,
setFiles,
}: {
sendMessage: (message: string) => void;
focusMode: string;
setFocusMode: (mode: string) => void;
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
}) => {
const [copilotEnabled, setCopilotEnabled] = useState(false);
const EmptyChatMessageInput = () => {
const { sendMessage } = useChat();
/* const [copilotEnabled, setCopilotEnabled] = useState(false); */
const [message, setMessage] = useState('');
const inputRef = useRef<HTMLTextAreaElement | null>(null);
@@ -73,7 +54,7 @@ const EmptyChatMessageInput = ({
}}
className="w-full"
>
<div className="flex flex-col bg-light-secondary dark:bg-dark-secondary px-5 pt-5 pb-2 rounded-lg w-full border border-light-200 dark:border-dark-200">
<div className="flex flex-col bg-light-secondary dark:bg-dark-secondary px-5 pt-5 pb-2 rounded-2xl w-full border border-light-200 dark:border-dark-200 shadow-sm shadow-light-200/10 dark:shadow-black/20 transition-all duration-200 focus-within:border-light-300 dark:focus-within:border-dark-300">
<TextareaAutosize
ref={inputRef}
value={message}
@@ -84,20 +65,11 @@ const EmptyChatMessageInput = ({
/>
<div className="flex flex-row items-center justify-between mt-4">
<div className="flex flex-row items-center space-x-2 lg:space-x-4">
<Focus focusMode={focusMode} setFocusMode={setFocusMode} />
<Attach
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
showText
/>
<Focus />
<Attach showText />
</div>
<div className="flex flex-row items-center space-x-1 sm:space-x-4">
<Optimization
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
/>
<Optimization />
<button
disabled={message.trim().length === 0}
className="bg-[#24A0ED] text-white disabled:text-black/50 dark:disabled:text-white/50 disabled:bg-[#e0e0dc] dark:disabled:bg-[#ececec21] hover:bg-opacity-85 transition duration-100 rounded-full p-2"

View File

@@ -1,12 +1,13 @@
import { Check, ClipboardList } from 'lucide-react';
import { Message } from '../ChatWindow';
import { useState } from 'react';
import { Section } from '@/lib/hooks/useChat';
const Copy = ({
message,
section,
initialMessage,
}: {
message: Message;
section: Section;
initialMessage: string;
}) => {
const [copied, setCopied] = useState(false);
@@ -14,7 +15,7 @@ const Copy = ({
return (
<button
onClick={() => {
const contentToCopy = `${initialMessage}${message.sources && message.sources.length > 0 && `\n\nCitations:\n${message.sources?.map((source: any, i: any) => `[${i + 1}] ${source.metadata.url}`).join(`\n`)}`}`;
const contentToCopy = `${initialMessage}${section?.sourceMessage?.sources && section.sourceMessage.sources.length > 0 && `\n\nCitations:\n${section.sourceMessage.sources?.map((source: any, i: any) => `[${i + 1}] ${source.metadata.url}`).join(`\n`)}`}`;
navigator.clipboard.writeText(contentToCopy);
setCopied(true);
setTimeout(() => setCopied(false), 1000);

View File

@@ -1,8 +1,7 @@
'use client';
/* eslint-disable @next/next/no-img-element */
import React, { MutableRefObject, useEffect, useState } from 'react';
import { Message } from './ChatWindow';
import React, { MutableRefObject } from 'react';
import { cn } from '@/lib/utils';
import {
BookCopy,
@@ -20,90 +19,37 @@ import SearchImages from './SearchImages';
import SearchVideos from './SearchVideos';
import { useSpeech } from 'react-text-to-speech';
import ThinkBox from './ThinkBox';
import { useChat, Section } from '@/lib/hooks/useChat';
import Citation from './Citation';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => {
return <ThinkBox content={children as string} />;
const ThinkTagProcessor = ({
children,
thinkingEnded,
}: {
children: React.ReactNode;
thinkingEnded: boolean;
}) => {
return (
<ThinkBox content={children as string} thinkingEnded={thinkingEnded} />
);
};
const MessageBox = ({
message,
messageIndex,
history,
loading,
section,
sectionIndex,
dividerRef,
isLast,
rewrite,
sendMessage,
}: {
message: Message;
messageIndex: number;
history: Message[];
loading: boolean;
section: Section;
sectionIndex: number;
dividerRef?: MutableRefObject<HTMLDivElement | null>;
isLast: boolean;
rewrite: (messageId: string) => void;
sendMessage: (message: string) => void;
}) => {
const [parsedMessage, setParsedMessage] = useState(message.content);
const [speechMessage, setSpeechMessage] = useState(message.content);
const { loading, chatTurns, sendMessage, rewrite } = useChat();
useEffect(() => {
const citationRegex = /\[([^\]]+)\]/g;
const regex = /\[(\d+)\]/g;
let processedMessage = message.content;
if (message.role === 'assistant' && message.content.includes('<think>')) {
const openThinkTag = processedMessage.match(/<think>/g)?.length || 0;
const closeThinkTag = processedMessage.match(/<\/think>/g)?.length || 0;
if (openThinkTag > closeThinkTag) {
processedMessage += '</think> <a> </a>'; // The extra <a> </a> is to prevent the the think component from looking bad
}
}
if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length > 0
) {
setParsedMessage(
processedMessage.replace(
citationRegex,
(_, capturedContent: string) => {
const numbers = capturedContent
.split(',')
.map((numStr) => numStr.trim());
const linksHtml = numbers
.map((numStr) => {
const number = parseInt(numStr);
if (isNaN(number) || number <= 0) {
return `[${numStr}]`;
}
const source = message.sources?.[number - 1];
const url = source?.metadata?.url;
if (url) {
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
} else {
return `[${numStr}]`;
}
})
.join('');
return linksHtml;
},
),
);
setSpeechMessage(message.content.replace(regex, ''));
return;
}
setSpeechMessage(message.content.replace(regex, ''));
setParsedMessage(processedMessage);
}, [message.content, message.sources, message.role]);
const parsedMessage = section.parsedAssistantMessage || '';
const speechMessage = section.speechMessage || '';
const thinkingEnded = section.thinkingEnded;
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
@@ -111,33 +57,31 @@ const MessageBox = ({
overrides: {
think: {
component: ThinkTagProcessor,
props: {
thinkingEnded: thinkingEnded,
},
},
citation: {
component: Citation,
},
},
};
return (
<div>
{message.role === 'user' && (
<div
className={cn(
'w-full',
messageIndex === 0 ? 'pt-16' : 'pt-8',
'break-words',
)}
>
<h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12">
{message.content}
</h2>
</div>
)}
<div className="space-y-6">
<div className={'w-full pt-8 break-words'}>
<h2 className="text-black dark:text-white font-medium text-3xl lg:w-9/12">
{section.userMessage.content}
</h2>
</div>
{message.role === 'assistant' && (
<div className="flex flex-col space-y-9 lg:space-y-0 lg:flex-row lg:justify-between lg:space-x-9">
<div
ref={dividerRef}
className="flex flex-col space-y-6 w-full lg:w-9/12"
>
{message.sources && message.sources.length > 0 && (
<div className="flex flex-col space-y-9 lg:space-y-0 lg:flex-row lg:justify-between lg:space-x-9">
<div
ref={dividerRef}
className="flex flex-col space-y-6 w-full lg:w-9/12"
>
{section.sourceMessage &&
section.sourceMessage.sources.length > 0 && (
<div className="flex flex-col space-y-2">
<div className="flex flex-row items-center space-x-2">
<BookCopy className="text-black dark:text-white" size={20} />
@@ -145,10 +89,12 @@ const MessageBox = ({
Sources
</h3>
</div>
<MessageSources sources={message.sources} />
<MessageSources sources={section.sourceMessage.sources} />
</div>
)}
<div className="flex flex-col space-y-2">
<div className="flex flex-col space-y-2">
{section.sourceMessage && (
<div className="flex flex-row items-center space-x-2">
<Disc3
className={cn(
@@ -161,100 +107,115 @@ const MessageBox = ({
Answer
</h3>
</div>
)}
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white',
{section.assistantMessage && (
<>
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white',
)}
options={markdownOverrides}
>
{parsedMessage}
</Markdown>
{loading && isLast ? null : (
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
<div className="flex flex-row items-center space-x-1">
<Rewrite
rewrite={rewrite}
messageId={section.assistantMessage.messageId}
/>
</div>
<div className="flex flex-row items-center space-x-1">
<Copy
initialMessage={section.assistantMessage.content}
section={section}
/>
<button
onClick={() => {
if (speechStatus === 'started') {
stop();
} else {
start();
}
}}
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
>
{speechStatus === 'started' ? (
<StopCircle size={18} />
) : (
<Volume2 size={18} />
)}
</button>
</div>
</div>
)}
options={markdownOverrides}
>
{parsedMessage}
</Markdown>
{loading && isLast ? null : (
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
<div className="flex flex-row items-center space-x-1">
{/* <button className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black text-black dark:hover:text-white">
<Share size={18} />
</button> */}
<Rewrite rewrite={rewrite} messageId={message.messageId} />
</div>
<div className="flex flex-row items-center space-x-1">
<Copy initialMessage={message.content} message={message} />
<button
onClick={() => {
if (speechStatus === 'started') {
stop();
} else {
start();
}
}}
className="p-2 text-black/70 dark:text-white/70 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
>
{speechStatus === 'started' ? (
<StopCircle size={18} />
) : (
<Volume2 size={18} />
)}
</button>
</div>
</div>
)}
{isLast &&
message.suggestions &&
message.suggestions.length > 0 &&
message.role === 'assistant' &&
!loading && (
<>
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
<div className="flex flex-col space-y-3 text-black dark:text-white">
<div className="flex flex-row items-center space-x-2 mt-4">
<Layers3 />
<h3 className="text-xl font-medium">Related</h3>
{isLast &&
section.suggestions &&
section.suggestions.length > 0 &&
section.assistantMessage &&
!loading && (
<div className="mt-8 pt-6 border-t border-light-200/50 dark:border-dark-200/50">
<div className="flex flex-row items-center space-x-2 mb-4">
<Layers3
className="text-black dark:text-white"
size={20}
/>
<h3 className="text-black dark:text-white font-medium text-xl">
Related
</h3>
</div>
<div className="flex flex-col space-y-3">
{message.suggestions.map((suggestion, i) => (
<div
className="flex flex-col space-y-3 text-sm"
key={i}
>
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
<div
onClick={() => {
sendMessage(suggestion);
}}
className="cursor-pointer flex flex-row justify-between font-medium space-x-2 items-center"
>
<p className="transition duration-200 hover:text-[#24A0ED]">
{suggestion}
</p>
<Plus
size={20}
className="text-[#24A0ED] flex-shrink-0"
/>
<div className="space-y-0">
{section.suggestions.map(
(suggestion: string, i: number) => (
<div key={i}>
{i > 0 && (
<div className="h-px bg-light-200/40 dark:bg-dark-200/40 mx-3" />
)}
<button
onClick={() => sendMessage(suggestion)}
className="group w-full px-3 py-4 text-left transition-colors duration-200"
>
<div className="flex items-center justify-between gap-3">
<p className="text-sm text-black/70 dark:text-white/70 group-hover:text-[#24A0ED] transition-colors duration-200 leading-relaxed">
{suggestion}
</p>
<Plus
size={16}
className="text-black/40 dark:text-white/40 group-hover:text-[#24A0ED] transition-colors duration-200 flex-shrink-0"
/>
</div>
</button>
</div>
</div>
))}
),
)}
</div>
</div>
</>
)}
</div>
</div>
<div className="lg:sticky lg:top-20 flex flex-col items-center space-y-3 w-full lg:w-3/12 z-30 h-full pb-4">
<SearchImages
query={history[messageIndex - 1].content}
chatHistory={history.slice(0, messageIndex - 1)}
messageId={message.messageId}
/>
<SearchVideos
chatHistory={history.slice(0, messageIndex - 1)}
query={history[messageIndex - 1].content}
messageId={message.messageId}
/>
)}
</>
)}
</div>
</div>
)}
{section.assistantMessage && (
<div className="lg:sticky lg:top-20 flex flex-col items-center space-y-3 w-full lg:w-3/12 z-30 h-full pb-4">
<SearchImages
query={section.userMessage.content}
chatHistory={chatTurns.slice(0, sectionIndex * 2)}
messageId={section.assistantMessage.messageId}
/>
<SearchVideos
chatHistory={chatTurns.slice(0, sectionIndex * 2)}
query={section.userMessage.content}
messageId={section.assistantMessage.messageId}
/>
</div>
)}
</div>
</div>
);
};

View File

@@ -6,22 +6,11 @@ import Attach from './MessageInputActions/Attach';
import CopilotToggle from './MessageInputActions/Copilot';
import { File } from './ChatWindow';
import AttachSmall from './MessageInputActions/AttachSmall';
import { useChat } from '@/lib/hooks/useChat';
const MessageInput = () => {
const { loading, sendMessage } = useChat();
const MessageInput = ({
sendMessage,
loading,
fileIds,
setFileIds,
files,
setFiles,
}: {
sendMessage: (message: string) => void;
loading: boolean;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
}) => {
const [copilotEnabled, setCopilotEnabled] = useState(false);
const [message, setMessage] = useState('');
const [textareaRows, setTextareaRows] = useState(1);
@@ -75,18 +64,11 @@ const MessageInput = ({
}
}}
className={cn(
'bg-light-secondary dark:bg-dark-secondary p-4 flex items-center overflow-hidden border border-light-200 dark:border-dark-200',
mode === 'multi' ? 'flex-col rounded-lg' : 'flex-row rounded-full',
'bg-light-secondary dark:bg-dark-secondary p-4 flex items-center overflow-hidden border border-light-200 dark:border-dark-200 shadow-sm shadow-light-200/10 dark:shadow-black/20 transition-all duration-200 focus-within:border-light-300 dark:focus-within:border-dark-300',
mode === 'multi' ? 'flex-col rounded-2xl' : 'flex-row rounded-full',
)}
>
{mode === 'single' && (
<AttachSmall
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
)}
{mode === 'single' && <AttachSmall />}
<TextareaAutosize
ref={inputRef}
value={message}
@@ -113,12 +95,7 @@ const MessageInput = ({
)}
{mode === 'multi' && (
<div className="flex flex-row items-center justify-between w-full pt-2">
<AttachSmall
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<AttachSmall />
<div className="flex flex-row items-center space-x-4">
<CopilotToggle
copilotEnabled={copilotEnabled}

View File

@@ -7,21 +7,11 @@ import {
} from '@headlessui/react';
import { CopyPlus, File, LoaderCircle, Plus, Trash } from 'lucide-react';
import { Fragment, useRef, useState } from 'react';
import { File as FileType } from '../ChatWindow';
import { useChat } from '@/lib/hooks/useChat';
const Attach = ({ showText }: { showText?: boolean }) => {
const { files, setFiles, setFileIds, fileIds } = useChat();
const Attach = ({
fileIds,
setFileIds,
showText,
files,
setFiles,
}: {
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
showText?: boolean;
files: FileType[];
setFiles: (files: FileType[]) => void;
}) => {
const [loading, setLoading] = useState(false);
const fileInputRef = useRef<any>();
@@ -142,8 +132,11 @@ const Attach = ({
key={i}
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
>
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
<div className="bg-light-100 dark:bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File
size={16}
className="text-black/70 dark:text-white/70"
/>
</div>
<p className="text-black/70 dark:text-white/70 text-sm">
{file.fileName.length > 25

View File

@@ -8,18 +8,11 @@ import {
import { CopyPlus, File, LoaderCircle, Plus, Trash } from 'lucide-react';
import { Fragment, useRef, useState } from 'react';
import { File as FileType } from '../ChatWindow';
import { useChat } from '@/lib/hooks/useChat';
const AttachSmall = () => {
const { files, setFiles, setFileIds, fileIds } = useChat();
const AttachSmall = ({
fileIds,
setFileIds,
files,
setFiles,
}: {
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: FileType[];
setFiles: (files: FileType[]) => void;
}) => {
const [loading, setLoading] = useState(false);
const fileInputRef = useRef<any>();
@@ -114,8 +107,11 @@ const AttachSmall = ({
key={i}
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
>
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
<div className="bg-light-100 dark:bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File
size={16}
className="text-black/70 dark:text-white/70"
/>
</div>
<p className="text-black/70 dark:text-white/70 text-sm">
{file.fileName.length > 25

View File

@@ -15,6 +15,7 @@ import {
} from '@headlessui/react';
import { SiReddit, SiYoutube } from '@icons-pack/react-simple-icons';
import { Fragment } from 'react';
import { useChat } from '@/lib/hooks/useChat';
const focusModes = [
{
@@ -55,13 +56,9 @@ const focusModes = [
},
];
const Focus = ({
focusMode,
setFocusMode,
}: {
focusMode: string;
setFocusMode: (mode: string) => void;
}) => {
const Focus = () => {
const { focusMode, setFocusMode } = useChat();
return (
<Popover className="relative w-full max-w-[15rem] md:max-w-md lg:max-w-lg mt-[6.5px]">
<PopoverButton

View File

@@ -7,6 +7,7 @@ import {
Transition,
} from '@headlessui/react';
import { Fragment } from 'react';
import { useChat } from '@/lib/hooks/useChat';
const OptimizationModes = [
{
@@ -34,13 +35,9 @@ const OptimizationModes = [
},
];
const Optimization = ({
optimizationMode,
setOptimizationMode,
}: {
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
}) => {
const Optimization = () => {
const { optimizationMode, setOptimizationMode } = useChat();
return (
<Popover className="relative w-full max-w-[15rem] md:max-w-md lg:max-w-lg">
<PopoverButton

View File

@@ -10,6 +10,7 @@ import {
Transition,
} from '@headlessui/react';
import jsPDF from 'jspdf';
import { useChat, Section } from '@/lib/hooks/useChat';
const downloadFile = (filename: string, content: string, type: string) => {
const blob = new Blob([content], { type });
@@ -25,19 +26,37 @@ const downloadFile = (filename: string, content: string, type: string) => {
}, 0);
};
const exportAsMarkdown = (messages: Message[], title: string) => {
const date = new Date(messages[0]?.createdAt || Date.now()).toLocaleString();
const exportAsMarkdown = (sections: Section[], title: string) => {
const date = new Date(
sections[0]?.userMessage?.createdAt || Date.now(),
).toLocaleString();
let md = `# 💬 Chat Export: ${title}\n\n`;
md += `*Exported on: ${date}*\n\n---\n`;
messages.forEach((msg, idx) => {
md += `\n---\n`;
md += `**${msg.role === 'user' ? '🧑 User' : '🤖 Assistant'}**
sections.forEach((section, idx) => {
if (section.userMessage) {
md += `\n---\n`;
md += `**🧑 User**
`;
md += `*${new Date(msg.createdAt).toLocaleString()}*\n\n`;
md += `> ${msg.content.replace(/\n/g, '\n> ')}\n`;
if (msg.sources && msg.sources.length > 0) {
md += `*${new Date(section.userMessage.createdAt).toLocaleString()}*\n\n`;
md += `> ${section.userMessage.content.replace(/\n/g, '\n> ')}\n`;
}
if (section.assistantMessage) {
md += `\n---\n`;
md += `**🤖 Assistant**
`;
md += `*${new Date(section.assistantMessage.createdAt).toLocaleString()}*\n\n`;
md += `> ${section.assistantMessage.content.replace(/\n/g, '\n> ')}\n`;
}
if (
section.sourceMessage &&
section.sourceMessage.sources &&
section.sourceMessage.sources.length > 0
) {
md += `\n**Citations:**\n`;
msg.sources.forEach((src: any, i: number) => {
section.sourceMessage.sources.forEach((src: any, i: number) => {
const url = src.metadata?.url || '';
md += `- [${i + 1}] [${url}](${url})\n`;
});
@@ -47,9 +66,11 @@ const exportAsMarkdown = (messages: Message[], title: string) => {
downloadFile(`${title || 'chat'}.md`, md, 'text/markdown');
};
const exportAsPDF = (messages: Message[], title: string) => {
const exportAsPDF = (sections: Section[], title: string) => {
const doc = new jsPDF();
const date = new Date(messages[0]?.createdAt || Date.now()).toLocaleString();
const date = new Date(
sections[0]?.userMessage?.createdAt || Date.now(),
).toLocaleString();
let y = 15;
const pageHeight = doc.internal.pageSize.height;
doc.setFontSize(18);
@@ -63,92 +84,140 @@ const exportAsPDF = (messages: Message[], title: string) => {
doc.line(10, y, 200, y);
y += 6;
doc.setTextColor(30);
messages.forEach((msg, idx) => {
if (y > pageHeight - 30) {
doc.addPage();
y = 15;
}
doc.setFont('helvetica', 'bold');
doc.text(`${msg.role === 'user' ? 'User' : 'Assistant'}`, 10, y);
doc.setFont('helvetica', 'normal');
doc.setFontSize(10);
doc.setTextColor(120);
doc.text(`${new Date(msg.createdAt).toLocaleString()}`, 40, y);
y += 6;
doc.setTextColor(30);
doc.setFontSize(12);
const lines = doc.splitTextToSize(msg.content, 180);
for (let i = 0; i < lines.length; i++) {
if (y > pageHeight - 20) {
sections.forEach((section, idx) => {
if (section.userMessage) {
if (y > pageHeight - 30) {
doc.addPage();
y = 15;
}
doc.text(lines[i], 12, y);
doc.setFont('helvetica', 'bold');
doc.text('User', 10, y);
doc.setFont('helvetica', 'normal');
doc.setFontSize(10);
doc.setTextColor(120);
doc.text(
`${new Date(section.userMessage.createdAt).toLocaleString()}`,
40,
y,
);
y += 6;
}
if (msg.sources && msg.sources.length > 0) {
doc.setFontSize(11);
doc.setTextColor(80);
if (y > pageHeight - 20) {
doc.addPage();
y = 15;
}
doc.text('Citations:', 12, y);
y += 5;
msg.sources.forEach((src: any, i: number) => {
const url = src.metadata?.url || '';
if (y > pageHeight - 15) {
doc.setTextColor(30);
doc.setFontSize(12);
const userLines = doc.splitTextToSize(section.userMessage.content, 180);
for (let i = 0; i < userLines.length; i++) {
if (y > pageHeight - 20) {
doc.addPage();
y = 15;
}
doc.text(`- [${i + 1}] ${url}`, 15, y);
y += 5;
});
doc.text(userLines[i], 12, y);
y += 6;
}
y += 6;
doc.setDrawColor(230);
if (y > pageHeight - 10) {
doc.addPage();
y = 15;
}
doc.line(10, y, 200, y);
y += 4;
}
if (section.assistantMessage) {
if (y > pageHeight - 30) {
doc.addPage();
y = 15;
}
doc.setFont('helvetica', 'bold');
doc.text('Assistant', 10, y);
doc.setFont('helvetica', 'normal');
doc.setFontSize(10);
doc.setTextColor(120);
doc.text(
`${new Date(section.assistantMessage.createdAt).toLocaleString()}`,
40,
y,
);
y += 6;
doc.setTextColor(30);
doc.setFontSize(12);
const assistantLines = doc.splitTextToSize(
section.assistantMessage.content,
180,
);
for (let i = 0; i < assistantLines.length; i++) {
if (y > pageHeight - 20) {
doc.addPage();
y = 15;
}
doc.text(assistantLines[i], 12, y);
y += 6;
}
if (
section.sourceMessage &&
section.sourceMessage.sources &&
section.sourceMessage.sources.length > 0
) {
doc.setFontSize(11);
doc.setTextColor(80);
if (y > pageHeight - 20) {
doc.addPage();
y = 15;
}
doc.text('Citations:', 12, y);
y += 5;
section.sourceMessage.sources.forEach((src: any, i: number) => {
const url = src.metadata?.url || '';
if (y > pageHeight - 15) {
doc.addPage();
y = 15;
}
doc.text(`- [${i + 1}] ${url}`, 15, y);
y += 5;
});
doc.setTextColor(30);
}
y += 6;
doc.setDrawColor(230);
if (y > pageHeight - 10) {
doc.addPage();
y = 15;
}
doc.line(10, y, 200, y);
y += 4;
}
y += 6;
doc.setDrawColor(230);
if (y > pageHeight - 10) {
doc.addPage();
y = 15;
}
doc.line(10, y, 200, y);
y += 4;
});
doc.save(`${title || 'chat'}.pdf`);
};
const Navbar = ({
chatId,
messages,
}: {
messages: Message[];
chatId: string;
}) => {
const Navbar = () => {
const [title, setTitle] = useState<string>('');
const [timeAgo, setTimeAgo] = useState<string>('');
const { sections, chatId } = useChat();
useEffect(() => {
if (messages.length > 0) {
if (sections.length > 0 && sections[0].userMessage) {
const newTitle =
messages[0].content.length > 20
? `${messages[0].content.substring(0, 20).trim()}...`
: messages[0].content;
sections[0].userMessage.content.length > 20
? `${sections[0].userMessage.content.substring(0, 20).trim()}...`
: sections[0].userMessage.content;
setTitle(newTitle);
const newTimeAgo = formatTimeDifference(
new Date(),
messages[0].createdAt,
sections[0].userMessage.createdAt,
);
setTimeAgo(newTimeAgo);
}
}, [messages]);
}, [sections]);
useEffect(() => {
const intervalId = setInterval(() => {
if (messages.length > 0) {
if (sections.length > 0 && sections[0].userMessage) {
const newTimeAgo = formatTimeDifference(
new Date(),
messages[0].createdAt,
sections[0].userMessage.createdAt,
);
setTimeAgo(newTimeAgo);
}
@@ -159,54 +228,91 @@ const Navbar = ({
}, []);
return (
<div className="fixed z-40 top-0 left-0 right-0 px-4 lg:pl-[104px] lg:pr-6 lg:px-8 flex flex-row items-center justify-between w-full py-4 text-sm text-black dark:text-white/70 border-b bg-light-primary dark:bg-dark-primary border-light-100 dark:border-dark-200">
<a
href="/"
className="active:scale-95 transition duration-100 cursor-pointer lg:hidden"
>
<Edit size={17} />
</a>
<div className="hidden lg:flex flex-row items-center justify-center space-x-2">
<Clock size={17} />
<p className="text-xs">{timeAgo} ago</p>
</div>
<p className="hidden lg:flex">{title}</p>
<div className="sticky -mx-4 lg:mx-0 top-0 z-40 bg-light-primary/95 dark:bg-dark-primary/95 backdrop-blur-sm border-b border-light-200/50 dark:border-dark-200/30">
<div className="px-4 lg:px-6 py-4">
<div className="flex items-center justify-between">
<div className="flex items-center min-w-0">
<a
href="/"
className="lg:hidden mr-3 p-2 -ml-2 rounded-lg hover:bg-light-secondary dark:hover:bg-dark-secondary transition-colors duration-200"
>
<Edit size={18} className="text-black/70 dark:text-white/70" />
</a>
<div className="hidden lg:flex items-center gap-2 text-black/50 dark:text-white/50 min-w-0">
<Clock size={14} />
<span className="text-xs whitespace-nowrap">{timeAgo} ago</span>
</div>
</div>
<div className="flex flex-row items-center space-x-4">
<Popover className="relative">
<PopoverButton className="active:scale-95 transition duration-100 cursor-pointer p-2 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary">
<Share size={17} />
</PopoverButton>
<Transition
as={Fragment}
enter="transition ease-out duration-100"
enterFrom="opacity-0 translate-y-1"
enterTo="opacity-100 translate-y-0"
leave="transition ease-in duration-75"
leaveFrom="opacity-100 translate-y-0"
leaveTo="opacity-0 translate-y-1"
>
<PopoverPanel className="absolute right-0 mt-2 w-64 rounded-xl shadow-xl bg-light-primary dark:bg-dark-primary border border-light-200 dark:border-dark-200 z-50">
<div className="flex flex-col py-3 px-3 gap-2">
<button
className="flex items-center gap-2 px-4 py-2 text-left hover:bg-light-secondary dark:hover:bg-dark-secondary transition-colors text-black dark:text-white rounded-lg font-medium"
onClick={() => exportAsMarkdown(messages, title || '')}
>
<FileText size={17} className="text-[#24A0ED]" />
Export as Markdown
</button>
<button
className="flex items-center gap-2 px-4 py-2 text-left hover:bg-light-secondary dark:hover:bg-dark-secondary transition-colors text-black dark:text-white rounded-lg font-medium"
onClick={() => exportAsPDF(messages, title || '')}
>
<FileDown size={17} className="text-[#24A0ED]" />
Export as PDF
</button>
</div>
</PopoverPanel>
</Transition>
</Popover>
<DeleteChat redirect chatId={chatId} chats={[]} setChats={() => {}} />
<div className="flex-1 mx-4 min-w-0">
<h1 className="text-center text-sm font-medium text-black/80 dark:text-white/90 truncate">
{title || 'New Conversation'}
</h1>
</div>
<div className="flex items-center gap-1 min-w-0">
<Popover className="relative">
<PopoverButton className="p-2 rounded-lg hover:bg-light-secondary dark:hover:bg-dark-secondary transition-colors duration-200">
<Share size={16} className="text-black/60 dark:text-white/60" />
</PopoverButton>
<Transition
as={Fragment}
enter="transition ease-out duration-200"
enterFrom="opacity-0 translate-y-1"
enterTo="opacity-100 translate-y-0"
leave="transition ease-in duration-150"
leaveFrom="opacity-100 translate-y-0"
leaveTo="opacity-0 translate-y-1"
>
<PopoverPanel className="absolute right-0 mt-2 w-64 origin-top-right rounded-2xl bg-light-primary dark:bg-dark-primary border border-light-200 dark:border-dark-200 shadow-xl shadow-black/10 dark:shadow-black/30 z-50">
<div className="p-3">
<div className="mb-2">
<p className="text-xs font-medium text-black/40 dark:text-white/40 uppercase tracking-wide">
Export Chat
</p>
</div>
<div className="space-y-1">
<button
className="w-full flex items-center gap-3 px-3 py-2 text-left rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition-colors duration-200"
onClick={() => exportAsMarkdown(sections, title || '')}
>
<FileText size={16} className="text-[#24A0ED]" />
<div>
<p className="text-sm font-medium text-black dark:text-white">
Markdown
</p>
<p className="text-xs text-black/50 dark:text-white/50">
.md format
</p>
</div>
</button>
<button
className="w-full flex items-center gap-3 px-3 py-2 text-left rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition-colors duration-200"
onClick={() => exportAsPDF(sections, title || '')}
>
<FileDown size={16} className="text-[#24A0ED]" />
<div>
<p className="text-sm font-medium text-black dark:text-white">
PDF
</p>
<p className="text-xs text-black/50 dark:text-white/50">
Document format
</p>
</div>
</button>
</div>
</div>
</PopoverPanel>
</Transition>
</Popover>
<DeleteChat
redirect
chatId={chatId!}
chats={[]}
setChats={() => {}}
/>
</div>
</div>
</div>
</div>
);

View File

@@ -27,38 +27,38 @@ const NewsArticleWidget = () => {
}, []);
return (
<div className="bg-light-secondary dark:bg-dark-secondary rounded-xl border border-light-200 dark:border-dark-200 shadow-sm flex flex-row items-center w-full h-24 min-h-[96px] max-h-[96px] px-3 py-2 gap-3 overflow-hidden">
<div className="bg-light-secondary dark:bg-dark-secondary rounded-2xl border border-light-200 dark:border-dark-200 shadow-sm shadow-light-200/10 dark:shadow-black/25 flex flex-row items-stretch w-full h-24 min-h-[96px] max-h-[96px] p-0 overflow-hidden">
{loading ? (
<>
<div className="animate-pulse flex flex-row items-center w-full h-full">
<div className="rounded-lg w-16 min-w-16 max-w-16 h-16 min-h-16 max-h-16 bg-light-200 dark:bg-dark-200 mr-3" />
<div className="flex flex-col justify-center flex-1 h-full w-0 gap-2">
<div className="h-4 w-3/4 rounded bg-light-200 dark:bg-dark-200" />
<div className="h-3 w-1/2 rounded bg-light-200 dark:bg-dark-200" />
</div>
<div className="animate-pulse flex flex-row items-stretch w-full h-full">
<div className="w-24 min-w-24 max-w-24 h-full bg-light-200 dark:bg-dark-200" />
<div className="flex flex-col justify-center flex-1 px-3 py-2 gap-2">
<div className="h-4 w-3/4 rounded bg-light-200 dark:bg-dark-200" />
<div className="h-3 w-1/2 rounded bg-light-200 dark:bg-dark-200" />
</div>
</>
</div>
) : error ? (
<div className="w-full text-xs text-red-400">Could not load news.</div>
) : article ? (
<a
href={`/?q=Summary: ${article.url}`}
className="flex flex-row items-center w-full h-full group"
className="flex flex-row items-stretch w-full h-full relative overflow-hidden group"
>
<img
className="object-cover rounded-lg w-16 min-w-16 max-w-16 h-16 min-h-16 max-h-16 border border-light-200 dark:border-dark-200 bg-light-200 dark:bg-dark-200 group-hover:opacity-90 transition"
src={
new URL(article.thumbnail).origin +
new URL(article.thumbnail).pathname +
`?id=${new URL(article.thumbnail).searchParams.get('id')}`
}
alt={article.title}
/>
<div className="flex flex-col justify-center flex-1 h-full pl-3 w-0">
<div className="font-bold text-xs text-black dark:text-white leading-tight truncate overflow-hidden whitespace-nowrap">
<div className="relative w-24 min-w-24 max-w-24 h-full overflow-hidden">
<img
className="object-cover w-full h-full bg-light-200 dark:bg-dark-200 group-hover:scale-110 transition-transform duration-300"
src={
new URL(article.thumbnail).origin +
new URL(article.thumbnail).pathname +
`?id=${new URL(article.thumbnail).searchParams.get('id')}`
}
alt={article.title}
/>
</div>
<div className="flex flex-col justify-center flex-1 px-3 py-2">
<div className="font-semibold text-xs text-black dark:text-white leading-tight line-clamp-2 mb-1">
{article.title}
</div>
<p className="text-black/70 dark:text-white/70 text-xs leading-snug truncate overflow-hidden whitespace-nowrap">
<p className="text-black/60 dark:text-white/60 text-[10px] leading-relaxed line-clamp-2">
{article.content}
</p>
</div>

View File

@@ -40,7 +40,7 @@ const Sidebar = ({ children }: { children: React.ReactNode }) => {
return (
<div>
<div className="hidden lg:fixed lg:inset-y-0 lg:z-50 lg:flex lg:w-20 lg:flex-col">
<div className="flex grow flex-col items-center justify-between gap-y-5 overflow-y-auto bg-light-secondary dark:bg-dark-secondary px-2 py-8">
<div className="flex grow flex-col items-center justify-between gap-y-5 overflow-y-auto bg-light-secondary dark:bg-dark-secondary px-2 py-8 mx-2 my-2 rounded-2xl shadow-sm shadow-light-200/10 dark:shadow-black/25">
<a href="/">
<SquarePen className="cursor-pointer" />
</a>

View File

@@ -1,15 +1,23 @@
'use client';
import { useState } from 'react';
import { cn } from '@/lib/utils';
import { useEffect, useState } from 'react';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps {
content: string;
thinkingEnded: boolean;
}
const ThinkBox = ({ content }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false);
const ThinkBox = ({ content, thinkingEnded }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(true);
useEffect(() => {
if (thinkingEnded) {
setIsExpanded(false);
} else {
setIsExpanded(true);
}
}, [thinkingEnded]);
return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">

View File

@@ -9,60 +9,74 @@ const WeatherWidget = () => {
humidity: 0,
windSpeed: 0,
icon: '',
temperatureUnit: 'C',
windSpeedUnit: 'm/s',
});
const [loading, setLoading] = useState(true);
useEffect(() => {
const getApproxLocation = async () => {
const res = await fetch('https://ipwhois.app/json/');
const data = await res.json();
const getApproxLocation = async () => {
const res = await fetch('https://ipwhois.app/json/');
const data = await res.json();
return {
latitude: data.latitude,
longitude: data.longitude,
city: data.city,
};
return {
latitude: data.latitude,
longitude: data.longitude,
city: data.city,
};
};
const getLocation = async (
callback: (location: {
latitude: number;
longitude: number;
city: string;
}) => void,
) => {
/*
// Geolocation doesn't give city so we'll country using ipapi for now
if (navigator.geolocation) {
const result = await navigator.permissions.query({
name: 'geolocation',
})
if (result.state === 'granted') {
navigator.geolocation.getCurrentPosition(position => {
callback({
latitude: position.coords.latitude,
longitude: position.coords.longitude,
})
})
} else if (result.state === 'prompt') {
callback(await getApproxLocation())
navigator.geolocation.getCurrentPosition(position => {})
} else if (result.state === 'denied') {
callback(await getApproxLocation())
}
} else {
callback(await getApproxLocation())
} */
const getLocation = async (
callback: (location: {
latitude: number;
longitude: number;
city: string;
}) => void,
) => {
if (navigator.geolocation) {
const result = await navigator.permissions.query({
name: 'geolocation',
});
if (result.state === 'granted') {
navigator.geolocation.getCurrentPosition(async (position) => {
const res = await fetch(
`https://api-bdc.io/data/reverse-geocode-client?latitude=${position.coords.latitude}&longitude=${position.coords.longitude}&localityLanguage=en`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
},
);
const data = await res.json();
callback({
latitude: position.coords.latitude,
longitude: position.coords.longitude,
city: data.locality,
});
});
} else if (result.state === 'prompt') {
callback(await getApproxLocation());
navigator.geolocation.getCurrentPosition((position) => {});
} else if (result.state === 'denied') {
callback(await getApproxLocation());
}
} else {
callback(await getApproxLocation());
};
}
};
const updateWeather = async () => {
getLocation(async (location) => {
const res = await fetch(`/api/weather`, {
method: 'POST',
body: JSON.stringify({
lat: location.latitude,
lng: location.longitude,
measureUnit: localStorage.getItem('measureUnit') ?? 'Metric',
}),
});
@@ -81,13 +95,21 @@ const WeatherWidget = () => {
humidity: data.humidity,
windSpeed: data.windSpeed,
icon: data.icon,
temperatureUnit: data.temperatureUnit,
windSpeedUnit: data.windSpeedUnit,
});
setLoading(false);
});
};
useEffect(() => {
updateWeather();
const intervalId = setInterval(updateWeather, 2 * 60 * 1000);
return () => clearInterval(intervalId);
}, []);
return (
<div className="bg-light-secondary dark:bg-dark-secondary rounded-xl border border-light-200 dark:border-dark-200 shadow-sm flex flex-row items-center w-full h-24 min-h-[96px] max-h-[96px] px-3 py-2 gap-3">
<div className="bg-light-secondary dark:bg-dark-secondary rounded-2xl border border-light-200 dark:border-dark-200 shadow-sm shadow-light-200/10 dark:shadow-black/25 flex flex-row items-center w-full h-24 min-h-[96px] max-h-[96px] px-3 py-2 gap-3">
{loading ? (
<>
<div className="flex flex-col items-center justify-center w-16 min-w-16 max-w-16 h-full animate-pulse">
@@ -115,25 +137,27 @@ const WeatherWidget = () => {
className="h-10 w-auto"
/>
<span className="text-base font-semibold text-black dark:text-white">
{data.temperature}°C
{data.temperature}°{data.temperatureUnit}
</span>
</div>
<div className="flex flex-col justify-between flex-1 h-full py-1">
<div className="flex flex-col justify-between flex-1 h-full py-2">
<div className="flex flex-row items-center justify-between">
<span className="text-xs font-medium text-black dark:text-white">
<span className="text-sm font-semibold text-black dark:text-white">
{data.location}
</span>
<span className="flex items-center text-xs text-black/60 dark:text-white/60">
<span className="flex items-center text-xs text-black/60 dark:text-white/60 font-medium">
<Wind className="w-3 h-3 mr-1" />
{data.windSpeed} km/h
{data.windSpeed} {data.windSpeedUnit}
</span>
</div>
<span className="text-xs text-black/60 dark:text-white/60 mt-1">
<span className="text-xs text-black/50 dark:text-white/50 italic">
{data.condition}
</span>
<div className="flex flex-row justify-between w-full mt-auto pt-1 border-t border-light-200 dark:border-dark-200 text-xs text-black/60 dark:text-white/60">
<span>Humidity: {data.humidity}%</span>
<span>Now</span>
<div className="flex flex-row justify-between w-full mt-auto pt-2 border-t border-light-200/50 dark:border-dark-200/50 text-xs text-black/50 dark:text-white/50 font-medium">
<span>Humidity {data.humidity}%</span>
<span className="font-semibold text-black/70 dark:text-white/70">
Now
</span>
</div>
</div>
</>

View File

@@ -1,6 +1,6 @@
import { Message } from '@/components/ChatWindow';
export const getSuggestions = async (chatHisory: Message[]) => {
export const getSuggestions = async (chatHistory: Message[]) => {
const chatModel = localStorage.getItem('chatModel');
const chatModelProvider = localStorage.getItem('chatModelProvider');
@@ -13,7 +13,7 @@ export const getSuggestions = async (chatHisory: Message[]) => {
'Content-Type': 'application/json',
},
body: JSON.stringify({
chatHistory: chatHisory,
chatHistory: chatHistory,
chatModel: {
provider: chatModelProvider,
model: chatModel,

View File

@@ -3,32 +3,18 @@ import {
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type ImageSearchChainInput = {
@@ -54,12 +40,39 @@ const createImageSearchChain = (llm: BaseChatModel) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
ChatPromptTemplate.fromMessages([
['system', imageSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
],
['assistant', '<query>A cat</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
],
['assistant', '<query>Car working</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['bing images', 'google images'],
});

View File

@@ -3,33 +3,19 @@ import {
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
const videoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
@@ -55,12 +41,37 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
ChatPromptTemplate.fromMessages([
['system', videoSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
],
['assistant', '<query>How does a car work?</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
],
['assistant', '<query>Theory of relativity</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, '');
const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['youtube'],
});
@@ -92,8 +103,8 @@ const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
const videoSearchChain = createVideoSearchChain(llm);
return videoSearchChain.invoke(input);
};
export default handleVideoSearch;

View File

@@ -31,13 +31,21 @@ interface Config {
};
OLLAMA: {
API_URL: string;
API_KEY: string;
};
DEEPSEEK: {
API_KEY: string;
};
AIMLAPI: {
API_KEY: string;
};
LM_STUDIO: {
API_URL: string;
};
LEMONADE: {
API_URL: string;
API_KEY: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
@@ -83,8 +91,12 @@ export const getSearxngApiEndpoint = () =>
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getOllamaApiKey = () => loadConfig().MODELS.OLLAMA.API_KEY;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@@ -97,6 +109,11 @@ export const getCustomOpenaiModelName = () =>
export const getLMStudioApiEndpoint = () =>
loadConfig().MODELS.LM_STUDIO.API_URL;
export const getLemonadeApiEndpoint = () =>
loadConfig().MODELS.LEMONADE.API_URL;
export const getLemonadeApiKey = () => loadConfig().MODELS.LEMONADE.API_KEY;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;

View File

@@ -1,5 +1,122 @@
import db from './';
import { migrate } from 'drizzle-orm/better-sqlite3/migrator';
import Database from 'better-sqlite3';
import path from 'path';
import fs from 'fs';
migrate(db, { migrationsFolder: path.join(process.cwd(), 'drizzle') });
const DATA_DIR = process.env.DATA_DIR || process.cwd();
const dbPath = path.join(DATA_DIR, './data/db.sqlite');
const db = new Database(dbPath);
const migrationsFolder = path.join(DATA_DIR, 'drizzle');
db.exec(`
CREATE TABLE IF NOT EXISTS ran_migrations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
run_on DATETIME DEFAULT CURRENT_TIMESTAMP
);
`);
function sanitizeSql(content: string) {
return content
.split(/\r?\n/)
.filter(
(l) => !l.trim().startsWith('-->') && !l.includes('statement-breakpoint'),
)
.join('\n');
}
fs.readdirSync(migrationsFolder)
.filter((f) => f.endsWith('.sql'))
.sort()
.forEach((file) => {
const filePath = path.join(migrationsFolder, file);
let content = fs.readFileSync(filePath, 'utf-8');
content = sanitizeSql(content);
const migrationName = file.split('_')[0] || file;
const already = db
.prepare('SELECT 1 FROM ran_migrations WHERE name = ?')
.get(migrationName);
if (already) {
console.log(`Skipping already-applied migration: ${file}`);
return;
}
try {
if (migrationName === '0001') {
const messages = db
.prepare(
'SELECT id, type, metadata, content, chatId, messageId FROM messages',
)
.all();
db.exec(`
CREATE TABLE IF NOT EXISTS messages_with_sources (
id INTEGER PRIMARY KEY,
type TEXT NOT NULL,
chatId TEXT NOT NULL,
createdAt TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP,
messageId TEXT NOT NULL,
content TEXT,
sources TEXT DEFAULT '[]'
);
`);
const insertMessage = db.prepare(`
INSERT INTO messages_with_sources (type, chatId, createdAt, messageId, content, sources)
VALUES (?, ?, ?, ?, ?, ?)
`);
messages.forEach((msg: any) => {
while (typeof msg.metadata === 'string') {
msg.metadata = JSON.parse(msg.metadata || '{}');
}
if (msg.type === 'user') {
insertMessage.run(
'user',
msg.chatId,
msg.metadata['createdAt'],
msg.messageId,
msg.content,
'[]',
);
} else if (msg.type === 'assistant') {
insertMessage.run(
'assistant',
msg.chatId,
msg.metadata['createdAt'],
msg.messageId,
msg.content,
'[]',
);
const sources = msg.metadata['sources'] || '[]';
if (sources && sources.length > 0) {
insertMessage.run(
'source',
msg.chatId,
msg.metadata['createdAt'],
`${msg.messageId}-source`,
'',
JSON.stringify(sources),
);
}
}
});
db.exec('DROP TABLE messages;');
db.exec('ALTER TABLE messages_with_sources RENAME TO messages;');
} else {
db.exec(content);
}
db.prepare('INSERT OR IGNORE INTO ran_migrations (name) VALUES (?)').run(
migrationName,
);
console.log(`Applied migration: ${file}`);
} catch (err) {
console.error(`Failed to apply migration ${file}:`, err);
throw err;
}
});

View File

@@ -1,15 +1,23 @@
import { sql } from 'drizzle-orm';
import { text, integer, sqliteTable } from 'drizzle-orm/sqlite-core';
import { Document } from 'langchain/document';
export const messages = sqliteTable('messages', {
id: integer('id').primaryKey(),
content: text('content').notNull(),
role: text('type', { enum: ['assistant', 'user', 'source'] }).notNull(),
chatId: text('chatId').notNull(),
createdAt: text('createdAt')
.notNull()
.default(sql`CURRENT_TIMESTAMP`),
messageId: text('messageId').notNull(),
role: text('type', { enum: ['assistant', 'user'] }),
metadata: text('metadata', {
content: text('content'),
sources: text('sources', {
mode: 'json',
}),
})
.$type<Document[]>()
.default(sql`'[]'`),
});
interface File {

817
src/lib/hooks/useChat.tsx Normal file
View File

@@ -0,0 +1,817 @@
'use client';
import {
AssistantMessage,
ChatTurn,
Message,
SourceMessage,
SuggestionMessage,
UserMessage,
} from '@/components/ChatWindow';
import {
createContext,
useContext,
useEffect,
useMemo,
useRef,
useState,
} from 'react';
import crypto from 'crypto';
import { useSearchParams } from 'next/navigation';
import { toast } from 'sonner';
import { getSuggestions } from '../actions';
export type Section = {
userMessage: UserMessage;
assistantMessage: AssistantMessage | undefined;
parsedAssistantMessage: string | undefined;
speechMessage: string | undefined;
sourceMessage: SourceMessage | undefined;
thinkingEnded: boolean;
suggestions?: string[];
};
type ChatContext = {
messages: Message[];
chatTurns: ChatTurn[];
sections: Section[];
chatHistory: [string, string][];
files: File[];
fileIds: string[];
focusMode: string;
chatId: string | undefined;
optimizationMode: string;
isMessagesLoaded: boolean;
loading: boolean;
notFound: boolean;
messageAppeared: boolean;
isReady: boolean;
hasError: boolean;
setOptimizationMode: (mode: string) => void;
setFocusMode: (mode: string) => void;
setFiles: (files: File[]) => void;
setFileIds: (fileIds: string[]) => void;
sendMessage: (
message: string,
messageId?: string,
rewrite?: boolean,
) => Promise<void>;
rewrite: (messageId: string) => void;
};
export interface File {
fileName: string;
fileExtension: string;
fileId: string;
}
interface ChatModelProvider {
name: string;
provider: string;
}
interface EmbeddingModelProvider {
name: string;
provider: string;
}
const checkConfig = async (
setChatModelProvider: (provider: ChatModelProvider) => void,
setEmbeddingModelProvider: (provider: EmbeddingModelProvider) => void,
setIsConfigReady: (ready: boolean) => void,
setHasError: (hasError: boolean) => void,
) => {
try {
let chatModel = localStorage.getItem('chatModel');
let chatModelProvider = localStorage.getItem('chatModelProvider');
let embeddingModel = localStorage.getItem('embeddingModel');
let embeddingModelProvider = localStorage.getItem('embeddingModelProvider');
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (!autoImageSearch) {
localStorage.setItem('autoImageSearch', 'true');
}
if (!autoVideoSearch) {
localStorage.setItem('autoVideoSearch', 'false');
}
const providers = await fetch(`/api/models`, {
headers: {
'Content-Type': 'application/json',
},
}).then(async (res) => {
if (!res.ok)
throw new Error(
`Failed to fetch models: ${res.status} ${res.statusText}`,
);
return res.json();
});
if (
!chatModel ||
!chatModelProvider ||
!embeddingModel ||
!embeddingModelProvider
) {
if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders;
const chatModelProvidersKeys = Object.keys(chatModelProviders);
if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
return toast.error('No chat models available');
} else {
chatModelProvider =
chatModelProvidersKeys.find(
(provider) =>
Object.keys(chatModelProviders[provider]).length > 0,
) || chatModelProvidersKeys[0];
}
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
}
if (!embeddingModel || !embeddingModelProvider) {
const embeddingModelProviders = providers.embeddingModelProviders;
if (
!embeddingModelProviders ||
Object.keys(embeddingModelProviders).length === 0
)
return toast.error('No embedding models available');
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
}
localStorage.setItem('chatModel', chatModel!);
localStorage.setItem('chatModelProvider', chatModelProvider);
localStorage.setItem('embeddingModel', embeddingModel!);
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
} else {
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
(!chatModelProviders[chatModelProvider] ||
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
) {
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider =
chatModelProvidersKeys.find(
(key) => Object.keys(chatModelProviders[key]).length > 0,
) || chatModelProvidersKeys[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(
chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0
? chatModelProvider
: Object.keys(chatModelProviders)[0]
],
)[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem('embeddingModelProvider', embeddingModelProvider);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
}
setChatModelProvider({
name: chatModel!,
provider: chatModelProvider,
});
setEmbeddingModelProvider({
name: embeddingModel!,
provider: embeddingModelProvider,
});
setIsConfigReady(true);
} catch (err) {
console.error('An error occurred while checking the configuration:', err);
setIsConfigReady(false);
setHasError(true);
}
};
const loadMessages = async (
chatId: string,
setMessages: (messages: Message[]) => void,
setIsMessagesLoaded: (loaded: boolean) => void,
setChatHistory: (history: [string, string][]) => void,
setFocusMode: (mode: string) => void,
setNotFound: (notFound: boolean) => void,
setFiles: (files: File[]) => void,
setFileIds: (fileIds: string[]) => void,
) => {
const res = await fetch(`/api/chats/${chatId}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
if (res.status === 404) {
setNotFound(true);
setIsMessagesLoaded(true);
return;
}
const data = await res.json();
const messages = data.messages as Message[];
setMessages(messages);
const chatTurns = messages.filter(
(msg): msg is ChatTurn => msg.role === 'user' || msg.role === 'assistant',
);
const history = chatTurns.map((msg) => {
return [msg.role, msg.content];
}) as [string, string][];
console.debug(new Date(), 'app:messages_loaded');
if (chatTurns.length > 0) {
document.title = chatTurns[0].content;
}
const files = data.chat.files.map((file: any) => {
return {
fileName: file.name,
fileExtension: file.name.split('.').pop(),
fileId: file.fileId,
};
});
setFiles(files);
setFileIds(files.map((file: File) => file.fileId));
setChatHistory(history);
setFocusMode(data.chat.focusMode);
setIsMessagesLoaded(true);
};
export const chatContext = createContext<ChatContext>({
chatHistory: [],
chatId: '',
fileIds: [],
files: [],
focusMode: '',
hasError: false,
isMessagesLoaded: false,
isReady: false,
loading: false,
messageAppeared: false,
messages: [],
chatTurns: [],
sections: [],
notFound: false,
optimizationMode: '',
rewrite: () => {},
sendMessage: async () => {},
setFileIds: () => {},
setFiles: () => {},
setFocusMode: () => {},
setOptimizationMode: () => {},
});
export const ChatProvider = ({
children,
id,
}: {
children: React.ReactNode;
id?: string;
}) => {
const searchParams = useSearchParams();
const initialMessage = searchParams.get('q');
const [chatId, setChatId] = useState<string | undefined>(id);
const [newChatCreated, setNewChatCreated] = useState(false);
const [loading, setLoading] = useState(false);
const [messageAppeared, setMessageAppeared] = useState(false);
const [chatHistory, setChatHistory] = useState<[string, string][]>([]);
const [messages, setMessages] = useState<Message[]>([]);
const [files, setFiles] = useState<File[]>([]);
const [fileIds, setFileIds] = useState<string[]>([]);
const [focusMode, setFocusMode] = useState('webSearch');
const [optimizationMode, setOptimizationMode] = useState('speed');
const [isMessagesLoaded, setIsMessagesLoaded] = useState(false);
const [notFound, setNotFound] = useState(false);
const [chatModelProvider, setChatModelProvider] = useState<ChatModelProvider>(
{
name: '',
provider: '',
},
);
const [embeddingModelProvider, setEmbeddingModelProvider] =
useState<EmbeddingModelProvider>({
name: '',
provider: '',
});
const [isConfigReady, setIsConfigReady] = useState(false);
const [hasError, setHasError] = useState(false);
const [isReady, setIsReady] = useState(false);
const messagesRef = useRef<Message[]>([]);
const chatTurns = useMemo((): ChatTurn[] => {
return messages.filter(
(msg): msg is ChatTurn => msg.role === 'user' || msg.role === 'assistant',
);
}, [messages]);
const sections = useMemo<Section[]>(() => {
const sections: Section[] = [];
messages.forEach((msg, i) => {
if (msg.role === 'user') {
const nextUserMessageIndex = messages.findIndex(
(m, j) => j > i && m.role === 'user',
);
const aiMessage = messages.find(
(m, j) =>
j > i &&
m.role === 'assistant' &&
(nextUserMessageIndex === -1 || j < nextUserMessageIndex),
) as AssistantMessage | undefined;
const sourceMessage = messages.find(
(m, j) =>
j > i &&
m.role === 'source' &&
m.sources &&
(nextUserMessageIndex === -1 || j < nextUserMessageIndex),
) as SourceMessage | undefined;
let thinkingEnded = false;
let processedMessage = aiMessage?.content ?? '';
let speechMessage = aiMessage?.content ?? '';
let suggestions: string[] = [];
if (aiMessage) {
const citationRegex = /\[([^\]]+)\]/g;
const regex = /\[(\d+)\]/g;
if (processedMessage.includes('<think>')) {
const openThinkTag =
processedMessage.match(/<think>/g)?.length || 0;
const closeThinkTag =
processedMessage.match(/<\/think>/g)?.length || 0;
if (openThinkTag && !closeThinkTag) {
processedMessage += '</think> <a> </a>';
}
}
if (aiMessage.content.includes('</think>')) {
thinkingEnded = true;
}
if (
sourceMessage &&
sourceMessage.sources &&
sourceMessage.sources.length > 0
) {
processedMessage = processedMessage.replace(
citationRegex,
(_, capturedContent: string) => {
const numbers = capturedContent
.split(',')
.map((numStr) => numStr.trim());
const linksHtml = numbers
.map((numStr) => {
const number = parseInt(numStr);
if (isNaN(number) || number <= 0) {
return `[${numStr}]`;
}
const source = sourceMessage.sources?.[number - 1];
const url = source?.metadata?.url;
if (url) {
return `<citation href="${url}">${numStr}</citation>`;
} else {
return ``;
}
})
.join('');
return linksHtml;
},
);
speechMessage = aiMessage.content.replace(regex, '');
} else {
processedMessage = processedMessage.replace(regex, '');
speechMessage = aiMessage.content.replace(regex, '');
}
const suggestionMessage = messages.find(
(m, j) =>
j > i &&
m.role === 'suggestion' &&
(nextUserMessageIndex === -1 || j < nextUserMessageIndex),
) as SuggestionMessage | undefined;
if (suggestionMessage && suggestionMessage.suggestions.length > 0) {
suggestions = suggestionMessage.suggestions;
}
}
sections.push({
userMessage: msg,
assistantMessage: aiMessage,
sourceMessage: sourceMessage,
parsedAssistantMessage: processedMessage,
speechMessage,
thinkingEnded,
suggestions: suggestions,
});
}
});
return sections;
}, [messages]);
useEffect(() => {
checkConfig(
setChatModelProvider,
setEmbeddingModelProvider,
setIsConfigReady,
setHasError,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
if (
chatId &&
!newChatCreated &&
!isMessagesLoaded &&
messages.length === 0
) {
loadMessages(
chatId,
setMessages,
setIsMessagesLoaded,
setChatHistory,
setFocusMode,
setNotFound,
setFiles,
setFileIds,
);
} else if (!chatId) {
setNewChatCreated(true);
setIsMessagesLoaded(true);
setChatId(crypto.randomBytes(20).toString('hex'));
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
useEffect(() => {
messagesRef.current = messages;
}, [messages]);
useEffect(() => {
if (isMessagesLoaded && isConfigReady) {
setIsReady(true);
console.debug(new Date(), 'app:ready');
} else {
setIsReady(false);
}
}, [isMessagesLoaded, isConfigReady]);
const rewrite = (messageId: string) => {
const index = messages.findIndex((msg) => msg.messageId === messageId);
const chatTurnsIndex = chatTurns.findIndex(
(msg) => msg.messageId === messageId,
);
if (index === -1) return;
const message = chatTurns[chatTurnsIndex - 1];
setMessages((prev) => {
return [
...prev.slice(0, messages.length > 2 ? messages.indexOf(message) : 0),
];
});
setChatHistory((prev) => {
return [...prev.slice(0, chatTurns.length > 2 ? chatTurnsIndex - 1 : 0)];
});
sendMessage(message.content, message.messageId, true);
};
useEffect(() => {
if (isReady && initialMessage && isConfigReady) {
if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready');
return;
}
sendMessage(initialMessage);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isConfigReady, isReady, initialMessage]);
const sendMessage: ChatContext['sendMessage'] = async (
message,
messageId,
rewrite = false,
) => {
if (loading) return;
setLoading(true);
setMessageAppeared(false);
if (messages.length <= 1) {
window.history.replaceState(null, '', `/c/${chatId}`);
}
let recievedMessage = '';
let added = false;
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
setMessages((prevMessages) => [
...prevMessages,
{
content: message,
messageId: messageId,
chatId: chatId!,
role: 'user',
createdAt: new Date(),
},
]);
const messageHandler = async (data: any) => {
if (data.type === 'error') {
toast.error(data.data);
setLoading(false);
return;
}
if (data.type === 'sources') {
setMessages((prevMessages) => [
...prevMessages,
{
messageId: data.messageId,
chatId: chatId!,
role: 'source',
sources: data.data,
createdAt: new Date(),
},
]);
if (data.data.length > 0) {
setMessageAppeared(true);
}
}
if (data.type === 'message') {
if (!added) {
setMessages((prevMessages) => [
...prevMessages,
{
content: data.data,
messageId: data.messageId,
chatId: chatId!,
role: 'assistant',
createdAt: new Date(),
},
]);
added = true;
setMessageAppeared(true);
} else {
setMessages((prev) =>
prev.map((message) => {
if (
message.messageId === data.messageId &&
message.role === 'assistant'
) {
return { ...message, content: message.content + data.data };
}
return message;
}),
);
}
recievedMessage += data.data;
}
if (data.type === 'messageEnd') {
setChatHistory((prevHistory) => [
...prevHistory,
['human', message],
['assistant', recievedMessage],
]);
setLoading(false);
const lastMsg = messagesRef.current[messagesRef.current.length - 1];
const autoImageSearch = localStorage.getItem('autoImageSearch');
const autoVideoSearch = localStorage.getItem('autoVideoSearch');
if (autoImageSearch === 'true') {
document
.getElementById(`search-images-${lastMsg.messageId}`)
?.click();
}
if (autoVideoSearch === 'true') {
document
.getElementById(`search-videos-${lastMsg.messageId}`)
?.click();
}
/* Check if there are sources after message id's index and no suggestions */
const userMessageIndex = messagesRef.current.findIndex(
(msg) => msg.messageId === messageId && msg.role === 'user',
);
const sourceMessage = messagesRef.current.find(
(msg, i) => i > userMessageIndex && msg.role === 'source',
) as SourceMessage | undefined;
const suggestionMessageIndex = messagesRef.current.findIndex(
(msg, i) => i > userMessageIndex && msg.role === 'suggestion',
);
if (
sourceMessage &&
sourceMessage.sources.length > 0 &&
suggestionMessageIndex == -1
) {
const suggestions = await getSuggestions(messagesRef.current);
setMessages((prev) => {
return [
...prev,
{
role: 'suggestion',
suggestions: suggestions,
chatId: chatId!,
createdAt: new Date(),
messageId: crypto.randomBytes(7).toString('hex'),
},
];
});
}
}
};
const messageIndex = messages.findIndex((m) => m.messageId === messageId);
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: message,
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
chatId: chatId!,
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: rewrite
? chatHistory.slice(0, messageIndex === -1 ? undefined : messageIndex)
: chatHistory,
chatModel: {
name: chatModelProvider.name,
provider: chatModelProvider.provider,
},
embeddingModel: {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
systemInstructions: localStorage.getItem('systemInstructions'),
}),
});
if (!res.body) throw new Error('No response body');
const reader = res.body?.getReader();
const decoder = new TextDecoder('utf-8');
let partialChunk = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
partialChunk += decoder.decode(value, { stream: true });
try {
const messages = partialChunk.split('\n');
for (const msg of messages) {
if (!msg.trim()) continue;
const json = JSON.parse(msg);
messageHandler(json);
}
partialChunk = '';
} catch (error) {
console.warn('Incomplete JSON, waiting for next chunk...');
}
}
};
return (
<chatContext.Provider
value={{
messages,
chatTurns,
sections,
chatHistory,
files,
fileIds,
focusMode,
chatId,
hasError,
isMessagesLoaded,
isReady,
loading,
messageAppeared,
notFound,
optimizationMode,
setFileIds,
setFiles,
setFocusMode,
setOptimizationMode,
rewrite,
sendMessage,
}}
>
{children}
</chatContext.Provider>
);
};
export const useChat = () => {
const ctx = useContext(chatContext);
return ctx;
};

View File

@@ -4,7 +4,7 @@ interface LineOutputParserArgs {
key?: string;
}
class LineOutputParser extends BaseOutputParser<string> {
class LineOutputParser extends BaseOutputParser<string | undefined> {
private key = 'questions';
constructor(args?: LineOutputParserArgs) {
@@ -18,7 +18,7 @@ class LineOutputParser extends BaseOutputParser<string> {
lc_namespace = ['langchain', 'output_parsers', 'line_output_parser'];
async parse(text: string): Promise<string> {
async parse(text: string): Promise<string | undefined> {
text = text.trim() || '';
const regex = /^(\s*(-|\*|\d+\.\s|\d+\)\s|\u2022)\s*)+/;
@@ -26,7 +26,7 @@ class LineOutputParser extends BaseOutputParser<string> {
const endKeyIndex = text.indexOf(`</${this.key}>`);
if (startKeyIndex === -1 || endKeyIndex === -1) {
return '';
return undefined;
}
const questionsStartIndex =

View File

@@ -1,69 +0,0 @@
export const academicSearchRetrieverPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
Example:
1. Follow up question: How does stable diffusion work?
Rephrased: Stable diffusion working
2. Follow up question: What is linear algebra?
Rephrased: Linear algebra
3. Follow up question: What is the third law of thermodynamics?
Rephrased: Third law of thermodynamics
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
export const academicSearchResponsePrompt = `
You are Perplexica, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses.
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically.
- **Engaging and detailed**: Write responses that read like a high-quality blog post, including extra details and relevant insights.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included.
- **Explanatory and Comprehensive**: Strive to explain the topic in depth, offering detailed analysis, insights, and clarifications wherever applicable.
### Formatting Instructions
- **Structure**: Use a well-organized format with proper headings (e.g., "## Example heading 1" or "## Example heading 2"). Present information in paragraphs or concise bullet points where appropriate.
- **Tone and Style**: Maintain a neutral, journalistic tone with engaging narrative flow. Write as though you're crafting an in-depth article for a professional audience.
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability.
- **Length and Depth**: Provide comprehensive coverage of the topic. Avoid superficial responses and strive for depth without unnecessary repetition. Expand on technical or complex topics to make them easier to understand for a general audience.
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title.
- **Conclusion or Summary**: Include a concluding paragraph that synthesizes the provided information or suggests potential next steps, where appropriate.
### Citation Requirements
- Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`.
- Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]."
- Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context.
- Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]."
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources.
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation.
### Special Instructions
- If the query involves technical, historical, or complex topics, provide detailed background and explanatory sections to ensure clarity.
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Academic', this means you will be searching for academic papers and articles on the web.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.
- Provide explanations or historical context as needed to enhance understanding.
- End with a conclusion or overall perspective if relevant.
<context>
{context}
</context>
Current date & time in ISO format (UTC timezone) is: {date}.
`;

View File

@@ -1,32 +1,13 @@
import {
academicSearchResponsePrompt,
academicSearchRetrieverPrompt,
} from './academicSearch';
import {
redditSearchResponsePrompt,
redditSearchRetrieverPrompt,
} from './redditSearch';
import { webSearchResponsePrompt, webSearchRetrieverPrompt } from './webSearch';
import {
wolframAlphaSearchResponsePrompt,
wolframAlphaSearchRetrieverPrompt,
} from './wolframAlpha';
webSearchResponsePrompt,
webSearchRetrieverFewShots,
webSearchRetrieverPrompt,
} from './webSearch';
import { writingAssistantPrompt } from './writingAssistant';
import {
youtubeSearchResponsePrompt,
youtubeSearchRetrieverPrompt,
} from './youtubeSearch';
export default {
webSearchResponsePrompt,
webSearchRetrieverPrompt,
academicSearchResponsePrompt,
academicSearchRetrieverPrompt,
redditSearchResponsePrompt,
redditSearchRetrieverPrompt,
wolframAlphaSearchResponsePrompt,
wolframAlphaSearchRetrieverPrompt,
webSearchRetrieverFewShots,
writingAssistantPrompt,
youtubeSearchResponsePrompt,
youtubeSearchRetrieverPrompt,
};

View File

@@ -1,69 +0,0 @@
export const redditSearchRetrieverPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
Example:
1. Follow up question: Which company is most likely to create an AGI
Rephrased: Which company is most likely to create an AGI
2. Follow up question: Is Earth flat?
Rephrased: Is Earth flat?
3. Follow up question: Is there life on Mars?
Rephrased: Is there life on Mars?
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
export const redditSearchResponsePrompt = `
You are Perplexica, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses.
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically.
- **Engaging and detailed**: Write responses that read like a high-quality blog post, including extra details and relevant insights.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included.
- **Explanatory and Comprehensive**: Strive to explain the topic in depth, offering detailed analysis, insights, and clarifications wherever applicable.
### Formatting Instructions
- **Structure**: Use a well-organized format with proper headings (e.g., "## Example heading 1" or "## Example heading 2"). Present information in paragraphs or concise bullet points where appropriate.
- **Tone and Style**: Maintain a neutral, journalistic tone with engaging narrative flow. Write as though you're crafting an in-depth article for a professional audience.
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability.
- **Length and Depth**: Provide comprehensive coverage of the topic. Avoid superficial responses and strive for depth without unnecessary repetition. Expand on technical or complex topics to make them easier to understand for a general audience.
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title.
- **Conclusion or Summary**: Include a concluding paragraph that synthesizes the provided information or suggests potential next steps, where appropriate.
### Citation Requirements
- Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`.
- Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]."
- Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context.
- Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]."
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources.
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation.
### Special Instructions
- If the query involves technical, historical, or complex topics, provide detailed background and explanatory sections to ensure clarity.
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.
- Provide explanations or historical context as needed to enhance understanding.
- End with a conclusion or overall perspective if relevant.
<context>
{context}
</context>
Current date & time in ISO format (UTC timezone) is: {date}.
`;

View File

@@ -1,65 +1,92 @@
import { BaseMessageLike } from '@langchain/core/messages';
export const webSearchRetrieverPrompt = `
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
There are several examples attached for your reference inside the below \`examples\` XML block
**Note**: All user messages are individual entities and should be treated as such do not mix conversations.
`;
<examples>
1. Follow up question: What is the capital of France
Rephrased question:\`
<question>
export const webSearchRetrieverFewShots: BaseMessageLike[] = [
[
'user',
`<conversation>
</conversation>
<query>
What is the capital of France
</query>`,
],
[
'assistant',
`<question>
Capital of france
</question>
\`
2. Hi, how are you?
Rephrased question\`
<question>
</question>`,
],
[
'user',
`<conversation>
</conversation>
<query>
Hi, how are you?
</query>`,
],
[
'assistant',
`<question>
not_needed
</question>
\`
3. Follow up question: What is Docker?
Rephrased question: \`
<question>
</question>`,
],
[
'user',
`<conversation>
</conversation>
<query>
What is Docker?
</query>`,
],
[
'assistant',
`<question>
What is Docker
</question>`,
],
[
'user',
`<conversation>
</conversation>
<query>
Can you tell me what is X from https://example.com
</query>`,
],
[
'assistant',
`<question>
What is X?
</question>
\`
4. Follow up question: Can you tell me what is X from https://example.com
Rephrased question: \`
<question>
Can you tell me what is X?
</question>
<links>
https://example.com
</links>
\`
5. Follow up question: Summarize the content from https://example.com
Rephrased question: \`
<question>
</links>`,
],
[
'user',
`<conversation>
</conversation>
<query>
Summarize the content from https://example.com
</query>`,
],
[
'assistant',
`<question>
summarize
</question>
<links>
https://example.com
</links>
\`
</examples>
Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
<conversation>
{chat_history}
</conversation>
Follow up question: {query}
Rephrased question:
`;
</links>`,
],
];
export const webSearchResponsePrompt = `
You are Perplexica, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses.

View File

@@ -1,69 +0,0 @@
export const wolframAlphaSearchRetrieverPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
Example:
1. Follow up question: What is the atomic radius of S?
Rephrased: Atomic radius of S
2. Follow up question: What is linear algebra?
Rephrased: Linear algebra
3. Follow up question: What is the third law of thermodynamics?
Rephrased: Third law of thermodynamics
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
export const wolframAlphaSearchResponsePrompt = `
You are Perplexica, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses.
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically.
- **Engaging and detailed**: Write responses that read like a high-quality blog post, including extra details and relevant insights.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included.
- **Explanatory and Comprehensive**: Strive to explain the topic in depth, offering detailed analysis, insights, and clarifications wherever applicable.
### Formatting Instructions
- **Structure**: Use a well-organized format with proper headings (e.g., "## Example heading 1" or "## Example heading 2"). Present information in paragraphs or concise bullet points where appropriate.
- **Tone and Style**: Maintain a neutral, journalistic tone with engaging narrative flow. Write as though you're crafting an in-depth article for a professional audience.
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability.
- **Length and Depth**: Provide comprehensive coverage of the topic. Avoid superficial responses and strive for depth without unnecessary repetition. Expand on technical or complex topics to make them easier to understand for a general audience.
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title.
- **Conclusion or Summary**: Include a concluding paragraph that synthesizes the provided information or suggests potential next steps, where appropriate.
### Citation Requirements
- Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`.
- Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]."
- Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context.
- Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]."
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources.
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation.
### Special Instructions
- If the query involves technical, historical, or complex topics, provide detailed background and explanatory sections to ensure clarity.
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.
- Provide explanations or historical context as needed to enhance understanding.
- End with a conclusion or overall perspective if relevant.
<context>
{context}
</context>
Current date & time in ISO format (UTC timezone) is: {date}.
`;

View File

@@ -1,69 +0,0 @@
export const youtubeSearchRetrieverPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
Example:
1. Follow up question: How does an A.C work?
Rephrased: A.C working
2. Follow up question: Linear algebra explanation video
Rephrased: What is linear algebra?
3. Follow up question: What is theory of relativity?
Rephrased: What is theory of relativity?
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
export const youtubeSearchResponsePrompt = `
You are Perplexica, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses.
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically.
- **Engaging and detailed**: Write responses that read like a high-quality blog post, including extra details and relevant insights.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included.
- **Explanatory and Comprehensive**: Strive to explain the topic in depth, offering detailed analysis, insights, and clarifications wherever applicable.
### Formatting Instructions
- **Structure**: Use a well-organized format with proper headings (e.g., "## Example heading 1" or "## Example heading 2"). Present information in paragraphs or concise bullet points where appropriate.
- **Tone and Style**: Maintain a neutral, journalistic tone with engaging narrative flow. Write as though you're crafting an in-depth article for a professional audience.
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability.
- **Length and Depth**: Provide comprehensive coverage of the topic. Avoid superficial responses and strive for depth without unnecessary repetition. Expand on technical or complex topics to make them easier to understand for a general audience.
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title.
- **Conclusion or Summary**: Include a concluding paragraph that synthesizes the provided information or suggests potential next steps, where appropriate.
### Citation Requirements
- Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`.
- Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]."
- Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context.
- Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]."
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources.
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation.
### Special Instructions
- If the query involves technical, historical, or complex topics, provide detailed background and explanatory sections to ensure clarity.
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcrip
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.
- Provide explanations or historical context as needed to enhance understanding.
- End with a conclusion or overall perspective if relevant.
<context>
{context}
</context>
Current date & time in ISO format (UTC timezone) is: {date}.
`;

View File

@@ -0,0 +1,94 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getAimlApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
import axios from 'axios';
export const PROVIDER_INFO = {
key: 'aimlapi',
displayName: 'AI/ML API',
};
interface AimlApiModel {
id: string;
name?: string;
type?: string;
}
const API_URL = 'https://api.aimlapi.com';
export const loadAimlApiChatModels = async () => {
const apiKey = getAimlApiKey();
if (!apiKey) return {};
try {
const response = await axios.get(`${API_URL}/models`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: AimlApiModel) => {
if (model.type === 'chat-completion') {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
apiKey: apiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
baseURL: API_URL,
},
}) as unknown as BaseChatModel,
};
}
});
return chatModels;
} catch (err) {
console.error(`Error loading AI/ML API models: ${err}`);
return {};
}
};
export const loadAimlApiEmbeddingModels = async () => {
const apiKey = getAimlApiKey();
if (!apiKey) return {};
try {
const response = await axios.get(`${API_URL}/models`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
});
const embeddingModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: AimlApiModel) => {
if (model.type === 'embedding') {
embeddingModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
apiKey: apiKey,
modelName: model.id,
configuration: {
baseURL: API_URL,
},
}) as unknown as Embeddings,
};
}
});
return embeddingModels;
} catch (err) {
console.error(`Error loading AI/ML API embeddings models: ${err}`);
return {};
}
};

View File

@@ -9,6 +9,18 @@ export const PROVIDER_INFO = {
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 4.1 Opus',
key: 'claude-opus-4-1-20250805',
},
{
displayName: 'Claude 4 Opus',
key: 'claude-opus-4-20250514',
},
{
displayName: 'Claude 4 Sonnet',
key: 'claude-sonnet-4-20250514',
},
{
displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219',

View File

@@ -31,7 +31,7 @@ export const loadDeepseekChatModels = async () => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
apiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {

View File

@@ -14,8 +14,16 @@ import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-exp-03-25',
displayName: 'Gemini 2.5 Flash',
key: 'gemini-2.5-flash',
},
{
displayName: 'Gemini 2.5 Flash-Lite',
key: 'gemini-2.5-flash-lite',
},
{
displayName: 'Gemini 2.5 Pro',
key: 'gemini-2.5-pro',
},
{
displayName: 'Gemini 2.0 Flash',
@@ -67,7 +75,7 @@ export const loadGeminiChatModels = async () => {
displayName: model.displayName,
model: new ChatGoogleGenerativeAI({
apiKey: geminiApiKey,
modelName: model.key,
model: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
};
@@ -100,7 +108,7 @@ export const loadGeminiEmbeddingModels = async () => {
return embeddingModels;
} catch (err) {
console.error(`Error loading OpenAI embeddings models: ${err}`);
console.error(`Error loading Gemini embeddings models: ${err}`);
return {};
}
};

View File

@@ -1,4 +1,4 @@
import { ChatOpenAI } from '@langchain/openai';
import { ChatGroq } from '@langchain/groq';
import { getGroqApiKey } from '../config';
import { ChatModel } from '.';
@@ -28,13 +28,10 @@ export const loadGroqChatModels = async () => {
groqChatModels.forEach((model: any) => {
chatModels[model.id] = {
displayName: model.id,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
modelName: model.id,
model: new ChatGroq({
apiKey: groqApiKey,
model: model.id,
temperature: 0.7,
configuration: {
baseURL: 'https://api.groq.com/openai/v1',
},
}) as unknown as BaseChatModel,
};
});

View File

@@ -35,11 +35,21 @@ import {
loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo,
} from './deepseek';
import {
loadAimlApiChatModels,
loadAimlApiEmbeddingModels,
PROVIDER_INFO as AimlApiInfo,
} from './aimlapi';
import {
loadLMStudioChatModels,
loadLMStudioEmbeddingsModels,
PROVIDER_INFO as LMStudioInfo,
} from './lmstudio';
import {
loadLemonadeChatModels,
loadLemonadeEmbeddingModels,
PROVIDER_INFO as LemonadeInfo,
} from './lemonade';
export const PROVIDER_METADATA = {
openai: OpenAIInfo,
@@ -49,7 +59,9 @@ export const PROVIDER_METADATA = {
gemini: GeminiInfo,
transformers: TransformersInfo,
deepseek: DeepseekInfo,
aimlapi: AimlApiInfo,
lmstudio: LMStudioInfo,
lemonade: LemonadeInfo,
custom_openai: {
key: 'custom_openai',
displayName: 'Custom OpenAI',
@@ -76,7 +88,9 @@ export const chatModelProviders: Record<
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels,
aimlapi: loadAimlApiChatModels,
lmstudio: loadLMStudioChatModels,
lemonade: loadLemonadeChatModels,
};
export const embeddingModelProviders: Record<
@@ -87,7 +101,9 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels,
aimlapi: loadAimlApiEmbeddingModels,
lmstudio: loadLMStudioEmbeddingsModels,
lemonade: loadLemonadeEmbeddingModels,
};
export const getAvailableChatModelProviders = async () => {
@@ -110,9 +126,24 @@ export const getAvailableChatModelProviders = async () => {
[customOpenAiModelName]: {
displayName: customOpenAiModelName,
model: new ChatOpenAI({
openAIApiKey: customOpenAiApiKey,
apiKey: customOpenAiApiKey,
modelName: customOpenAiModelName,
temperature: 0.7,
...(() => {
const temperatureRestrictedModels = [
'gpt-5-nano',
'gpt-5',
'gpt-5-mini',
'o1',
'o3',
'o3-mini',
'o4-mini',
];
const isTemperatureRestricted =
temperatureRestrictedModels.some((restrictedModel) =>
customOpenAiModelName.includes(restrictedModel),
);
return isTemperatureRestricted ? {} : { temperature: 0.7 };
})(),
configuration: {
baseURL: customOpenAiApiUrl,
},

View File

@@ -0,0 +1,94 @@
import axios from 'axios';
import { getLemonadeApiEndpoint, getLemonadeApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'lemonade',
displayName: 'Lemonade',
};
import { ChatOpenAI } from '@langchain/openai';
import { OpenAIEmbeddings } from '@langchain/openai';
export const loadLemonadeChatModels = async () => {
const lemonadeApiEndpoint = getLemonadeApiEndpoint();
const lemonadeApiKey = getLemonadeApiKey();
if (!lemonadeApiEndpoint) return {};
try {
const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, {
headers: {
'Content-Type': 'application/json',
...(lemonadeApiKey
? { Authorization: `Bearer ${lemonadeApiKey}` }
: {}),
},
});
const { data: models } = res.data;
const chatModels: Record<string, ChatModel> = {};
models.forEach((model: any) => {
chatModels[model.id] = {
displayName: model.id,
model: new ChatOpenAI({
apiKey: lemonadeApiKey || 'lemonade-key',
modelName: model.id,
temperature: 0.7,
configuration: {
baseURL: `${lemonadeApiEndpoint}/api/v1`,
},
}),
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Lemonade models: ${err}`);
return {};
}
};
export const loadLemonadeEmbeddingModels = async () => {
const lemonadeApiEndpoint = getLemonadeApiEndpoint();
const lemonadeApiKey = getLemonadeApiKey();
if (!lemonadeApiEndpoint) return {};
try {
const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, {
headers: {
'Content-Type': 'application/json',
...(lemonadeApiKey
? { Authorization: `Bearer ${lemonadeApiKey}` }
: {}),
},
});
const { data: models } = res.data;
const embeddingModels: Record<string, EmbeddingModel> = {};
// Filter models that support embeddings (if Lemonade provides this info)
// For now, we'll assume all models can be used for embeddings
models.forEach((model: any) => {
embeddingModels[model.id] = {
displayName: model.id,
model: new OpenAIEmbeddings({
apiKey: lemonadeApiKey || 'lemonade-key',
modelName: model.id,
configuration: {
baseURL: `${lemonadeApiEndpoint}/api/v1`,
},
}),
};
});
return embeddingModels;
} catch (err) {
console.error(`Error loading Lemonade embedding models: ${err}`);
return {};
}
};

View File

@@ -47,7 +47,7 @@ export const loadLMStudioChatModels = async () => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
apiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
@@ -83,7 +83,7 @@ export const loadLMStudioEmbeddingsModels = async () => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
apiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},

View File

@@ -1,16 +1,17 @@
import axios from 'axios';
import { getKeepAlive, getOllamaApiEndpoint } from '../config';
import { getKeepAlive, getOllamaApiEndpoint, getOllamaApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { ChatOllama } from '@langchain/ollama';
import { OllamaEmbeddings } from '@langchain/ollama';
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
const ollamaApiKey = getOllamaApiKey();
if (!ollamaApiEndpoint) return {};
@@ -33,6 +34,9 @@ export const loadOllamaChatModels = async () => {
model: model.model,
temperature: 0.7,
keepAlive: getKeepAlive(),
...(ollamaApiKey
? { headers: { Authorization: `Bearer ${ollamaApiKey}` } }
: {}),
}),
};
});
@@ -46,6 +50,7 @@ export const loadOllamaChatModels = async () => {
export const loadOllamaEmbeddingModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();
const ollamaApiKey = getOllamaApiKey();
if (!ollamaApiEndpoint) return {};
@@ -66,6 +71,9 @@ export const loadOllamaEmbeddingModels = async () => {
model: new OllamaEmbeddings({
baseUrl: ollamaApiEndpoint,
model: model.model,
...(ollamaApiKey
? { headers: { Authorization: `Bearer ${ollamaApiKey}` } }
: {}),
}),
};
});

View File

@@ -26,6 +26,10 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4o (2024-05-13)',
key: 'gpt-4o-2024-05-13',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
@@ -42,6 +46,34 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT 4.1',
key: 'gpt-4.1',
},
{
displayName: 'GPT 5 nano',
key: 'gpt-5-nano',
},
{
displayName: 'GPT 5',
key: 'gpt-5',
},
{
displayName: 'GPT 5 Mini',
key: 'gpt-5-mini',
},
{
displayName: 'o1',
key: 'o1',
},
{
displayName: 'o3',
key: 'o3',
},
{
displayName: 'o3 Mini',
key: 'o3-mini',
},
{
displayName: 'o4 Mini',
key: 'o4-mini',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [
@@ -64,13 +96,33 @@ export const loadOpenAIChatModels = async () => {
const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => {
// Models that only support temperature = 1
const temperatureRestrictedModels = [
'gpt-5-nano',
'gpt-5',
'gpt-5-mini',
'o1',
'o3',
'o3-mini',
'o4-mini',
];
const isTemperatureRestricted = temperatureRestrictedModels.some(
(restrictedModel) => model.key.includes(restrictedModel),
);
const modelConfig: any = {
apiKey: openaiApiKey,
modelName: model.key,
};
// Only add temperature if the model supports it
if (!isTemperatureRestricted) {
modelConfig.temperature = 0.7;
}
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: openaiApiKey,
modelName: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
model: new ChatOpenAI(modelConfig) as unknown as BaseChatModel,
};
});
@@ -93,7 +145,7 @@ export const loadOpenAIEmbeddingModels = async () => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey: openaiApiKey,
apiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};

View File

@@ -6,54 +6,54 @@ export const searchHandlers: Record<string, MetaSearchAgent> = {
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: true,
}),
academicSearch: new MetaSearchAgent({
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
writingAssistant: new MetaSearchAgent({
activeEngines: [],
queryGeneratorPrompt: '',
queryGeneratorFewShots: [],
responsePrompt: prompts.writingAssistantPrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
summarizer: false,
}),
wolframAlphaSearch: new MetaSearchAgent({
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
summarizer: false,
}),
youtubeSearch: new MetaSearchAgent({
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
redditSearch: new MetaSearchAgent({
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
queryGeneratorFewShots: prompts.webSearchRetrieverFewShots,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
summarizer: false,
}),
};

View File

@@ -11,7 +11,7 @@ import {
RunnableMap,
RunnableSequence,
} from '@langchain/core/runnables';
import { BaseMessage } from '@langchain/core/messages';
import { BaseMessage, BaseMessageLike } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import LineListOutputParser from '../outputParsers/listLineOutputParser';
import LineOutputParser from '../outputParsers/lineOutputParser';
@@ -40,9 +40,9 @@ export interface MetaSearchAgentType {
interface Config {
searchWeb: boolean;
rerank: boolean;
summarizer: boolean;
rerankThreshold: number;
queryGeneratorPrompt: string;
queryGeneratorFewShots: BaseMessageLike[];
responsePrompt: string;
activeEngines: string[];
}
@@ -64,7 +64,22 @@ class MetaSearchAgent implements MetaSearchAgentType {
(llm as unknown as ChatOpenAI).temperature = 0;
return RunnableSequence.from([
PromptTemplate.fromTemplate(this.config.queryGeneratorPrompt),
ChatPromptTemplate.fromMessages([
['system', this.config.queryGeneratorPrompt],
...this.config.queryGeneratorFewShots,
[
'user',
`
<conversation>
{chat_history}
</conversation>
<query>
{query}
</query>
`,
],
]),
llm,
this.strParser,
RunnableLambda.from(async (input: string) => {
@@ -77,9 +92,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
});
const links = await linksOutputParser.parse(input);
let question = this.config.summarizer
? await questionOutputParser.parse(input)
: input;
let question = (await questionOutputParser.parse(input)) ?? input;
if (question === 'not_needed') {
return { query: '', docs: [] };
@@ -440,7 +453,6 @@ class MetaSearchAgent implements MetaSearchAgentType {
event.event === 'on_chain_end' &&
event.name === 'FinalSourceRetriever'
) {
``;
emitter.emit(
'data',
JSON.stringify({ type: 'sources', data: event.data.output }),

View File

@@ -1,8 +1,11 @@
import { BaseMessage } from '@langchain/core/messages';
import { BaseMessage, isAIMessage } from '@langchain/core/messages';
const formatChatHistoryAsString = (history: BaseMessage[]) => {
return history
.map((message) => `${message._getType()}: ${message.content}`)
.map(
(message) =>
`${isAIMessage(message) ? 'AI' : 'User'}: ${message.content}`,
)
.join('\n');
};

View File

@@ -2,15 +2,17 @@ import type { Config } from 'tailwindcss';
import type { DefaultColors } from 'tailwindcss/types/generated/colors';
const themeDark = (colors: DefaultColors) => ({
50: '#0a0a0a',
100: '#111111',
200: '#1c1c1c',
50: '#111116',
100: '#1f202b',
200: '#2d2f3f',
300: '#3a3c4c',
});
const themeLight = (colors: DefaultColors) => ({
50: '#fcfcf9',
100: '#f3f3ee',
200: '#e8e8e3',
50: '#ffffff',
100: '#f1f5f9',
200: '#c4c7c5',
300: '#9ca3af',
});
const config: Config = {

746
yarn.lock

File diff suppressed because it is too large Load Diff