Compare commits

...

18 Commits

Author SHA1 Message Date
ItzCrazyKns
df33229934 feat(custom-openai): use apiKey instead of openAIApiKey 2025-07-19 16:14:46 +05:30
ItzCrazyKns
49fafaa096 feat(metaSearchAgent): implement structured outputs 2025-07-19 16:10:04 +05:30
ItzCrazyKns
ca9b32a23b feat(ollama): use @langchain/ollama library 2025-07-19 16:09:46 +05:30
ItzCrazyKns
76e3ff4e02 feat(providers): switch to apiKey key 2025-07-19 16:09:21 +05:30
ItzCrazyKns
eabf3ca7d3 feat(modules): update langchain packages 2025-07-19 16:08:45 +05:30
ItzCrazyKns
94e6db10bb feat(weather): add other measurement units, closes #821 #790 2025-07-18 21:09:32 +05:30
ItzCrazyKns
26e1d5fec3 feat(routes): lint & beautify 2025-07-17 22:23:11 +05:30
ItzCrazyKns
66be87b688 Merge branch 'pr/827' 2025-07-17 22:22:50 +05:30
amoshydra
f7b4e32218 fix(discover): provide language when fetching
some engines provide empty response when no language is provided.

fix #618
2025-07-17 02:14:49 +08:00
ItzCrazyKns
57407112fb feat(package): bump version 2025-07-16 10:39:50 +05:30
ItzCrazyKns
b280cc2e01 Merge pull request #787 from chriswritescode-dev/IOS
Fix: IOS Input Zoom / Support PWA Home Screen App, closes #458
2025-07-15 22:10:01 +05:30
ItzCrazyKns
e6ebf892c5 feat(styles): update globals.css 2025-07-15 21:47:20 +05:30
ItzCrazyKns
b754641058 feat(gitignore): add certificates 2025-07-15 21:45:44 +05:30
ItzCrazyKns
722f4f760e feat(manifest): update icons & screenshots 2025-07-15 21:45:37 +05:30
ItzCrazyKns
01e04a209f feat(public): add screenshots & update icons 2025-07-15 21:45:24 +05:30
ItzCrazyKns
0299fd1ea0 Merge pull request #817 from kittrydge/patch-1
Update Linux ollama instructions in README.md
2025-07-15 20:23:02 +05:30
kittrydge
ccd89d48d9 Update Linux ollama instructions in README.md
When setting the OLLAMA_HOST environment variable, the port number must be specified ( see https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux )

Also, 'systemctl daemon-reload' needs to be called after changing a systemd unit file, and before the relevant systemd service is reloaded.
2025-07-01 18:00:26 -06:00
Chris Scott
68c43ea372 Fix: IOS Input Zoom
config for theme consistency and iOS standalone mode
- Modified manifest.ts to ensure proper metadata

- Added display: standalone for iOS PWA behavior
2025-06-02 21:52:41 -04:00
32 changed files with 559 additions and 562 deletions

0
.assets/manifest.json Normal file
View File

2
.gitignore vendored
View File

@@ -37,3 +37,5 @@ Thumbs.db
# Db
db.sqlite
/searxng
certificates

View File

@@ -135,7 +135,7 @@ If you're encountering an Ollama connection error, it is likely due to the backe
3. **Linux Users - Expose Ollama to Network:**
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0"`. Then restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0:11434"`. (Change the port number if you are using a different one.) Then reload the systemd manager configuration with `systemctl daemon-reload`, and restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Ensure that the port (default is 11434) is not blocked by your firewall.

View File

@@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.11.0-rc1",
"version": "1.11.0-rc2",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@@ -15,11 +15,12 @@
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/google-genai": "^0.1.12",
"@langchain/openai": "^0.0.25",
"@langchain/anthropic": "^0.3.24",
"@langchain/community": "^0.3.49",
"@langchain/core": "^0.3.66",
"@langchain/google-genai": "^0.2.15",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.6.2",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2",
@@ -31,7 +32,7 @@
"drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5",
"jspdf": "^3.0.1",
"langchain": "^0.1.30",
"langchain": "^0.3.30",
"lucide-react": "^0.363.0",
"mammoth": "^1.9.1",
"markdown-to-jsx": "^7.7.2",

BIN
public/icon-100.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 916 B

BIN
public/icon-50.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 515 B

BIN
public/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

BIN
public/screenshots/p1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

BIN
public/screenshots/p2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

View File

@@ -223,7 +223,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -36,6 +36,7 @@ export const GET = async (req: Request) => {
{
engines: ['bing news'],
pageno: 1,
language: 'en',
},
)
).results;
@@ -49,7 +50,11 @@ export const GET = async (req: Request) => {
data = (
await searchSearxng(
`site:${articleWebsites[Math.floor(Math.random() * articleWebsites.length)]} ${topics[Math.floor(Math.random() * topics.length)]}`,
{ engines: ['bing news'], pageno: 1 },
{
engines: ['bing news'],
pageno: 1,
language: 'en',
},
)
).results;
}

View File

@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -81,7 +81,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey:
apiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {

View File

@@ -48,7 +48,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(),
apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(),
temperature: 0.7,
configuration: {

View File

@@ -1,6 +1,7 @@
export const POST = async (req: Request) => {
try {
const body: { lat: number; lng: number } = await req.json();
const body: { lat: number; lng: number; temperatureUnit: 'C' | 'F' } =
await req.json();
if (!body.lat || !body.lng) {
return Response.json(
@@ -12,7 +13,7 @@ export const POST = async (req: Request) => {
}
const res = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto`,
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto${body.temperatureUnit === 'C' ? '' : '&temperature_unit=fahrenheit'}`,
);
const data = await res.json();
@@ -33,12 +34,14 @@ export const POST = async (req: Request) => {
humidity: number;
windSpeed: number;
icon: string;
temperatureUnit: 'C' | 'F';
} = {
temperature: data.current.temperature_2m,
condition: '',
humidity: data.current.relative_humidity_2m,
windSpeed: data.current.wind_speed_10m,
icon: '',
temperatureUnit: body.temperatureUnit,
};
const code = data.current.weather_code;

View File

@@ -11,3 +11,11 @@
display: none;
}
}
@media screen and (-webkit-min-device-pixel-ratio: 0) {
select,
textarea,
input {
font-size: 16px !important;
}
}

54
src/app/manifest.ts Normal file
View File

@@ -0,0 +1,54 @@
import type { MetadataRoute } from 'next';
export default function manifest(): MetadataRoute.Manifest {
return {
name: 'Perplexica - Chat with the internet',
short_name: 'Perplexica',
description:
'Perplexica is an AI powered chatbot that is connected to the internet.',
start_url: '/',
display: 'standalone',
background_color: '#0a0a0a',
theme_color: '#0a0a0a',
screenshots: [
{
src: '/screenshots/p1.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p2.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p1_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
{
src: '/screenshots/p2_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
],
icons: [
{
src: '/icon-50.png',
sizes: '50x50',
type: 'image/png' as const,
},
{
src: '/icon-100.png',
sizes: '100x100',
type: 'image/png',
},
{
src: '/icon.png',
sizes: '440x440',
type: 'image/png',
purpose: 'any',
},
],
};
}

View File

@@ -148,6 +148,7 @@ const Page = () => {
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [systemInstructions, setSystemInstructions] = useState<string>('');
const [temperatureUnit, setTemperatureUnit] = useState<'C' | 'F'>('C');
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
useEffect(() => {
@@ -210,6 +211,8 @@ const Page = () => {
setSystemInstructions(localStorage.getItem('systemInstructions')!);
setTemperatureUnit(localStorage.getItem('temperatureUnit')! as 'C' | 'F');
setIsLoading(false);
};
@@ -368,6 +371,8 @@ const Page = () => {
localStorage.setItem('embeddingModel', value);
} else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value);
} else if (key === 'temperatureUnit') {
localStorage.setItem('temperatureUnit', value.toString());
}
} catch (err) {
console.error('Failed to save:', err);
@@ -416,13 +421,35 @@ const Page = () => {
) : (
config && (
<div className="flex flex-col space-y-6 pb-28 lg:pb-8">
<SettingsSection title="Appearance">
<SettingsSection title="Preferences">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Theme
</p>
<ThemeSwitcher />
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Temperature Unit
</p>
<Select
value={temperatureUnit ?? undefined}
onChange={(e) => {
setTemperatureUnit(e.target.value as 'C' | 'F');
saveConfig('temperatureUnit', e.target.value);
}}
options={[
{
label: 'Celsius',
value: 'C',
},
{
label: 'Fahrenheit',
value: 'F',
},
]}
/>
</div>
</SettingsSection>
<SettingsSection title="Automatic Search">
@@ -516,7 +543,7 @@ const Page = () => {
<SettingsSection title="System Instructions">
<div className="flex flex-col space-y-4">
<Textarea
value={systemInstructions}
value={systemInstructions ?? undefined}
isSaving={savingStates['systemInstructions']}
onChange={(e) => {
setSystemInstructions(e.target.value);

View File

@@ -9,7 +9,9 @@ const WeatherWidget = () => {
humidity: 0,
windSpeed: 0,
icon: '',
temperatureUnit: 'C',
});
const [loading, setLoading] = useState(true);
useEffect(() => {
@@ -73,6 +75,7 @@ const WeatherWidget = () => {
body: JSON.stringify({
lat: location.latitude,
lng: location.longitude,
temperatureUnit: localStorage.getItem('temperatureUnit') ?? 'C',
}),
});
@@ -91,6 +94,7 @@ const WeatherWidget = () => {
humidity: data.humidity,
windSpeed: data.windSpeed,
icon: data.icon,
temperatureUnit: data.temperatureUnit,
});
setLoading(false);
});
@@ -125,7 +129,7 @@ const WeatherWidget = () => {
className="h-10 w-auto"
/>
<span className="text-base font-semibold text-black dark:text-white">
{data.temperature}°C
{data.temperature}°{data.temperatureUnit}
</span>
</div>
<div className="flex flex-col justify-between flex-1 h-full py-1">

View File

@@ -1,63 +1,41 @@
export const webSearchRetrieverPrompt = `
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
You are an AI question rephraser. You will be given a conversation and a follow-up question; rephrase it into a standalone question that another LLM can use to search the web.
There are several examples attached for your reference inside the below \`examples\` XML block
Return ONLY a JSON object that matches this schema:
query: string // the standalone question (or "summarize")
links: string[] // URLs extracted from the user query (empty if none)
searchRequired: boolean // true if web search is needed, false for greetings/simple writing tasks
searchMode: "" | "normal" | "news" // "" when searchRequired is false; "news" if the user asks for news/articles, otherwise "normal"
<examples>
1. Follow up question: What is the capital of France
Rephrased question:\`
<question>
Capital of france
</question>
\`
Rules
- Greetings / simple writing tasks → query:"", links:[], searchRequired:false, searchMode:""
- Summarizing a URL → query:"summarize", links:[url...], searchRequired:true, searchMode:"normal"
- Asking for news/articles → searchMode:"news"
Examples
1. Follow-up: What is the capital of France?
"query":"capital of France","links":[],"searchRequired":true,"searchMode":"normal"
2. Hi, how are you?
Rephrased question\`
<question>
not_needed
</question>
\`
"query":"","links":[],"searchRequired":false,"searchMode":""
3. Follow up question: What is Docker?
Rephrased question: \`
<question>
What is Docker
</question>
\`
3. Follow-up: What is Docker?
"query":"what is Docker","links":[],"searchRequired":true,"searchMode":"normal"
4. Follow up question: Can you tell me what is X from https://example.com
Rephrased question: \`
<question>
Can you tell me what is X?
</question>
4. Follow-up: Can you tell me what is X from https://example.com?
"query":"what is X","links":["https://example.com"],"searchRequired":true,"searchMode":"normal"
<links>
https://example.com
</links>
\`
5. Follow-up: Summarize the content from https://example.com
"query":"summarize","links":["https://example.com"],"searchRequired":true,"searchMode":"normal"
5. Follow up question: Summarize the content from https://example.com
Rephrased question: \`
<question>
summarize
</question>
<links>
https://example.com
</links>
\`
</examples>
Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
6. Follow-up: Latest news about AI
"query":"latest news about AI","links":[],"searchRequired":true,"searchMode":"news"
<conversation>
{chat_history}
</conversation>
Follow up question: {query}
Follow-up question: {query}
Rephrased question:
`;

View File

@@ -38,7 +38,7 @@ export const loadAimlApiChatModels = async () => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: apiKey,
apiKey: apiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
@@ -76,7 +76,7 @@ export const loadAimlApiEmbeddingModels = async () => {
embeddingModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: apiKey,
apiKey: apiKey,
modelName: model.id,
configuration: {
baseURL: API_URL,

View File

@@ -31,7 +31,7 @@ export const loadDeepseekChatModels = async () => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: deepseekApiKey,
apiKey: deepseekApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {

View File

@@ -29,12 +29,15 @@ export const loadGroqChatModels = async () => {
chatModels[model.id] = {
displayName: model.id,
model: new ChatOpenAI({
openAIApiKey: groqApiKey,
apiKey: groqApiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
baseURL: 'https://api.groq.com/openai/v1',
},
metadata: {
'model-type': 'groq',
},
}) as unknown as BaseChatModel,
};
});

View File

@@ -118,7 +118,7 @@ export const getAvailableChatModelProviders = async () => {
[customOpenAiModelName]: {
displayName: customOpenAiModelName,
model: new ChatOpenAI({
openAIApiKey: customOpenAiApiKey,
apiKey: customOpenAiApiKey,
modelName: customOpenAiModelName,
temperature: 0.7,
configuration: {

View File

@@ -47,7 +47,7 @@ export const loadLMStudioChatModels = async () => {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
apiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},
@@ -83,7 +83,7 @@ export const loadLMStudioEmbeddingsModels = async () => {
embeddingsModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio',
apiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(endpoint),
},

View File

@@ -6,8 +6,8 @@ export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
};
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { ChatOllama } from '@langchain/ollama';
import { OllamaEmbeddings } from '@langchain/ollama';
export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint();

View File

@@ -67,7 +67,7 @@ export const loadOpenAIChatModels = async () => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: openaiApiKey,
apiKey: openaiApiKey,
modelName: model.key,
temperature: 0.7,
}) as unknown as BaseChatModel,
@@ -93,7 +93,7 @@ export const loadOpenAIEmbeddingModels = async () => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey: openaiApiKey,
apiKey: openaiApiKey,
modelName: model.key,
}) as unknown as Embeddings,
};

View File

@@ -24,6 +24,7 @@ import computeSimilarity from '../utils/computeSimilarity';
import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events';
import { StreamEvent } from '@langchain/core/tracers/log_stream';
import { z } from 'zod';
export interface MetaSearchAgentType {
searchAndAnswer: (
@@ -52,6 +53,17 @@ type BasicChainInput = {
query: string;
};
const retrieverLLMOutputSchema = z.object({
query: z.string().describe('The query to search the web for.'),
links: z
.array(z.string())
.describe('The links to search/summarize if present'),
searchRequired: z
.boolean()
.describe('Wether there is a need to search the web'),
searchMode: z.enum(['', 'normal', 'news']).describe('The search mode.'),
});
class MetaSearchAgent implements MetaSearchAgentType {
private config: Config;
private strParser = new StringOutputParser();
@@ -62,26 +74,24 @@ class MetaSearchAgent implements MetaSearchAgentType {
private async createSearchRetrieverChain(llm: BaseChatModel) {
(llm as unknown as ChatOpenAI).temperature = 0;
return RunnableSequence.from([
PromptTemplate.fromTemplate(this.config.queryGeneratorPrompt),
Object.assign(
Object.create(Object.getPrototypeOf(llm)),
llm,
this.strParser,
RunnableLambda.from(async (input: string) => {
const linksOutputParser = new LineListOutputParser({
key: 'links',
});
).withStructuredOutput(retrieverLLMOutputSchema, {
...(llm.metadata?.['model-type'] === 'groq'
? {
method: 'json-object',
}
: {}),
}),
RunnableLambda.from(
async (input: z.infer<typeof retrieverLLMOutputSchema>) => {
let question = input.query;
const links = input.links;
const questionOutputParser = new LineOutputParser({
key: 'question',
});
const links = await linksOutputParser.parse(input);
let question = this.config.summarizer
? await questionOutputParser.parse(input)
: input;
if (question === 'not_needed') {
if (!input.searchRequired) {
return { query: '', docs: [] };
}
@@ -207,7 +217,10 @@ class MetaSearchAgent implements MetaSearchAgentType {
const res = await searchSearxng(question, {
language: 'en',
engines: this.config.activeEngines,
engines:
input.searchMode === 'normal'
? this.config.activeEngines
: ['bing news'],
});
const documents = res.results.map(
@@ -228,7 +241,8 @@ class MetaSearchAgent implements MetaSearchAgentType {
return { query: question, docs: documents };
}
}),
},
),
]);
}

680
yarn.lock

File diff suppressed because it is too large Load Diff