Compare commits

..

16 Commits

Author SHA1 Message Date
ItzCrazyKns
57407112fb feat(package): bump version 2025-07-16 10:39:50 +05:30
ItzCrazyKns
b280cc2e01 Merge pull request #787 from chriswritescode-dev/IOS
Fix: IOS Input Zoom / Support PWA Home Screen App, closes #458
2025-07-15 22:10:01 +05:30
ItzCrazyKns
e6ebf892c5 feat(styles): update globals.css 2025-07-15 21:47:20 +05:30
ItzCrazyKns
b754641058 feat(gitignore): add certificates 2025-07-15 21:45:44 +05:30
ItzCrazyKns
722f4f760e feat(manifest): update icons & screenshots 2025-07-15 21:45:37 +05:30
ItzCrazyKns
01e04a209f feat(public): add screenshots & update icons 2025-07-15 21:45:24 +05:30
ItzCrazyKns
0299fd1ea0 Merge pull request #817 from kittrydge/patch-1
Update Linux ollama instructions in README.md
2025-07-15 20:23:02 +05:30
ItzCrazyKns
cf8dec53ca feat(chat-window): select provider if model's present, closes #803 2025-07-07 16:09:36 +05:30
ItzCrazyKns
d5c012d748 Revert "Update ChatWindow.tsx"
This reverts commit 2ccbd9a44c.
2025-07-07 15:52:39 +05:30
ItzCrazyKns
2ccbd9a44c Update ChatWindow.tsx 2025-07-05 22:00:06 +05:30
kittrydge
ccd89d48d9 Update Linux ollama instructions in README.md
When setting the OLLAMA_HOST environment variable, the port number must be specified ( see https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux )

Also, 'systemctl daemon-reload' needs to be called after changing a systemd unit file, and before the relevant systemd service is reloaded.
2025-07-01 18:00:26 -06:00
ItzCrazyKns
87d788ddef Update README.md 2025-06-30 19:55:23 +05:30
ItzCrazyKns
809b625a34 feat(widgets): fix size on smaller screens, closes #791 2025-06-30 15:42:41 +05:30
ItzCrazyKns
95c753a549 Merge branch 'pr/815' 2025-06-30 15:38:31 +05:30
D1m7asis
c6d084f5dc feat: add AIML API provider
Introduces support for the AI/ML API provider, including configuration options, chat and embedding model loading, and UI integration. Updates documentation and sample config to reflect the new provider.
2025-06-27 13:43:54 +02:00
Chris Scott
68c43ea372 Fix: IOS Input Zoom
config for theme consistency and iOS standalone mode
- Modified manifest.ts to ensure proper metadata

- Added display: standalone for iOS PWA behavior
2025-06-02 21:52:41 -04:00
21 changed files with 240 additions and 12 deletions

0
.assets/manifest.json Normal file
View File

2
.gitignore vendored
View File

@@ -37,3 +37,5 @@ Thumbs.db
# Db # Db
db.sqlite db.sqlite
/searxng /searxng
certificates

View File

@@ -16,7 +16,7 @@
<hr/> <hr/>
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT) [![Discord](https://dcbadge.limes.pink/api/server/26aArMy8tT?style=flat)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?) ![preview](.assets/perplexica-screenshot.png?)
@@ -90,7 +90,9 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**. - `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
**Note**: You can change these after starting Perplexica from the settings dialog. **Note**: You can change these after starting Perplexica from the settings dialog.
@@ -133,7 +135,7 @@ If you're encountering an Ollama connection error, it is likely due to the backe
3. **Linux Users - Expose Ollama to Network:** 3. **Linux Users - Expose Ollama to Network:**
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0"`. Then restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux) - Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0:11434"`. (Change the port number if you are using a different one.) Then reload the systemd manager configuration with `systemctl daemon-reload`, and restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Ensure that the port (default is 11434) is not blocked by your firewall. - Ensure that the port (default is 11434) is not blocked by your firewall.

View File

@@ -1,6 +1,6 @@
{ {
"name": "perplexica-frontend", "name": "perplexica-frontend",
"version": "1.11.0-rc1", "version": "1.11.0-rc2",
"license": "MIT", "license": "MIT",
"author": "ItzCrazyKns", "author": "ItzCrazyKns",
"scripts": { "scripts": {

BIN
public/icon-100.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 916 B

BIN
public/icon-50.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 515 B

BIN
public/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

BIN
public/screenshots/p1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

BIN
public/screenshots/p2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

View File

@@ -25,6 +25,9 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK] [MODELS.DEEPSEEK]
API_KEY = "" API_KEY = ""
[MODELS.AIMLAPI]
API_KEY = "" # Required to use AI/ML API chat and embedding models
[MODELS.LM_STUDIO] [MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 API_URL = "" # LM Studio API URL - http://host.docker.internal:1234

View File

@@ -8,6 +8,7 @@ import {
getOllamaApiEndpoint, getOllamaApiEndpoint,
getOpenaiApiKey, getOpenaiApiKey,
getDeepseekApiKey, getDeepseekApiKey,
getAimlApiKey,
getLMStudioApiEndpoint, getLMStudioApiEndpoint,
updateConfig, updateConfig,
} from '@/lib/config'; } from '@/lib/config';
@@ -57,6 +58,7 @@ export const GET = async (req: Request) => {
config['groqApiKey'] = getGroqApiKey(); config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey(); config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey(); config['deepseekApiKey'] = getDeepseekApiKey();
config['aimlApiKey'] = getAimlApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl(); config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName(); config['customOpenaiModelName'] = getCustomOpenaiModelName();
@@ -95,6 +97,9 @@ export const POST = async (req: Request) => {
DEEPSEEK: { DEEPSEEK: {
API_KEY: config.deepseekApiKey, API_KEY: config.deepseekApiKey,
}, },
AIMLAPI: {
API_KEY: config.aimlApiKey,
},
LM_STUDIO: { LM_STUDIO: {
API_URL: config.lmStudioApiUrl, API_URL: config.lmStudioApiUrl,
}, },

View File

@@ -11,3 +11,11 @@
display: none; display: none;
} }
} }
@media screen and (-webkit-min-device-pixel-ratio: 0) {
select,
textarea,
input {
font-size: 16px !important;
}
}

54
src/app/manifest.ts Normal file
View File

@@ -0,0 +1,54 @@
import type { MetadataRoute } from 'next';
export default function manifest(): MetadataRoute.Manifest {
return {
name: 'Perplexica - Chat with the internet',
short_name: 'Perplexica',
description:
'Perplexica is an AI powered chatbot that is connected to the internet.',
start_url: '/',
display: 'standalone',
background_color: '#0a0a0a',
theme_color: '#0a0a0a',
screenshots: [
{
src: '/screenshots/p1.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p2.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p1_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
{
src: '/screenshots/p2_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
],
icons: [
{
src: '/icon-50.png',
sizes: '50x50',
type: 'image/png' as const,
},
{
src: '/icon-100.png',
sizes: '100x100',
type: 'image/png',
},
{
src: '/icon.png',
sizes: '440x440',
type: 'image/png',
purpose: 'any',
},
],
};
}

View File

@@ -23,6 +23,7 @@ interface SettingsType {
ollamaApiUrl: string; ollamaApiUrl: string;
lmStudioApiUrl: string; lmStudioApiUrl: string;
deepseekApiKey: string; deepseekApiKey: string;
aimlApiKey: string;
customOpenaiApiKey: string; customOpenaiApiKey: string;
customOpenaiApiUrl: string; customOpenaiApiUrl: string;
customOpenaiModelName: string; customOpenaiModelName: string;
@@ -862,6 +863,25 @@ const Page = () => {
/> />
</div> </div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
AI/ML API Key
</p>
<Input
type="text"
placeholder="AI/ML API Key"
value={config.aimlApiKey}
isSaving={savingStates['aimlApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
aimlApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('aimlApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1"> <div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm"> <p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL LM Studio API URL

View File

@@ -82,14 +82,29 @@ const checkConfig = async (
) { ) {
if (!chatModel || !chatModelProvider) { if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders; const chatModelProviders = providers.chatModelProviders;
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider = if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
chatModelProvider || Object.keys(chatModelProviders)[0]; return toast.error('No chat models available');
} else {
chatModelProvider =
chatModelProvidersKeys.find(
(provider) =>
Object.keys(chatModelProviders[provider]).length > 0,
) || chatModelProvidersKeys[0];
}
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
} }
if (!embeddingModel || !embeddingModelProvider) { if (!embeddingModel || !embeddingModelProvider) {
@@ -117,7 +132,8 @@ const checkConfig = async (
if ( if (
Object.keys(chatModelProviders).length > 0 && Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider] (!chatModelProviders[chatModelProvider] ||
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
) { ) {
const chatModelProvidersKeys = Object.keys(chatModelProviders); const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider = chatModelProvider =
@@ -132,6 +148,16 @@ const checkConfig = async (
chatModelProvider && chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel] !chatModelProviders[chatModelProvider][chatModel]
) { ) {
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys( chatModel = Object.keys(
chatModelProviders[ chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0 Object.keys(chatModelProviders[chatModelProvider]).length > 0
@@ -139,6 +165,7 @@ const checkConfig = async (
: Object.keys(chatModelProviders)[0] : Object.keys(chatModelProviders)[0]
], ],
)[0]; )[0];
localStorage.setItem('chatModel', chatModel); localStorage.setItem('chatModel', chatModel);
} }

View File

@@ -51,10 +51,10 @@ const EmptyChat = ({
/> />
</div> </div>
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center"> <div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
<div className="flex-1 max-w-xs"> <div className="flex-1 w-full">
<WeatherWidget /> <WeatherWidget />
</div> </div>
<div className="flex-1 max-w-xs"> <div className="flex-1 w-full">
<NewsArticleWidget /> <NewsArticleWidget />
</div> </div>
</div> </div>

View File

@@ -35,6 +35,9 @@ interface Config {
DEEPSEEK: { DEEPSEEK: {
API_KEY: string; API_KEY: string;
}; };
AIMLAPI: {
API_KEY: string;
};
LM_STUDIO: { LM_STUDIO: {
API_URL: string; API_URL: string;
}; };
@@ -85,6 +88,8 @@ export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY; export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
export const getCustomOpenaiApiKey = () => export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY; loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;

View File

@@ -0,0 +1,94 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getAimlApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
import axios from 'axios';
export const PROVIDER_INFO = {
key: 'aimlapi',
displayName: 'AI/ML API',
};
interface AimlApiModel {
id: string;
name?: string;
type?: string;
}
const API_URL = 'https://api.aimlapi.com';
export const loadAimlApiChatModels = async () => {
const apiKey = getAimlApiKey();
if (!apiKey) return {};
try {
const response = await axios.get(`${API_URL}/models`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: AimlApiModel) => {
if (model.type === 'chat-completion') {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: apiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
baseURL: API_URL,
},
}) as unknown as BaseChatModel,
};
}
});
return chatModels;
} catch (err) {
console.error(`Error loading AI/ML API models: ${err}`);
return {};
}
};
export const loadAimlApiEmbeddingModels = async () => {
const apiKey = getAimlApiKey();
if (!apiKey) return {};
try {
const response = await axios.get(`${API_URL}/models`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
});
const embeddingModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: AimlApiModel) => {
if (model.type === 'embedding') {
embeddingModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: apiKey,
modelName: model.id,
configuration: {
baseURL: API_URL,
},
}) as unknown as Embeddings,
};
}
});
return embeddingModels;
} catch (err) {
console.error(`Error loading AI/ML API embeddings models: ${err}`);
return {};
}
};

View File

@@ -35,6 +35,11 @@ import {
loadDeepseekChatModels, loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo, PROVIDER_INFO as DeepseekInfo,
} from './deepseek'; } from './deepseek';
import {
loadAimlApiChatModels,
loadAimlApiEmbeddingModels,
PROVIDER_INFO as AimlApiInfo,
} from './aimlapi';
import { import {
loadLMStudioChatModels, loadLMStudioChatModels,
loadLMStudioEmbeddingsModels, loadLMStudioEmbeddingsModels,
@@ -49,6 +54,7 @@ export const PROVIDER_METADATA = {
gemini: GeminiInfo, gemini: GeminiInfo,
transformers: TransformersInfo, transformers: TransformersInfo,
deepseek: DeepseekInfo, deepseek: DeepseekInfo,
aimlapi: AimlApiInfo,
lmstudio: LMStudioInfo, lmstudio: LMStudioInfo,
custom_openai: { custom_openai: {
key: 'custom_openai', key: 'custom_openai',
@@ -76,6 +82,7 @@ export const chatModelProviders: Record<
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels, deepseek: loadDeepseekChatModels,
aimlapi: loadAimlApiChatModels,
lmstudio: loadLMStudioChatModels, lmstudio: loadLMStudioChatModels,
}; };
@@ -87,6 +94,7 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels, ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels, gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels, transformers: loadTransformersEmbeddingsModels,
aimlapi: loadAimlApiEmbeddingModels,
lmstudio: loadLMStudioEmbeddingsModels, lmstudio: loadLMStudioEmbeddingsModels,
}; };