Compare commits

...

13 Commits

Author SHA1 Message Date
ItzCrazyKns
8dc24c2d1a Merge pull request #877 from ramkrishna2910/lemonade
Lemonade integration to run local LLMs with NPU and GPU acceleration
2025-09-18 21:15:20 +05:30
Ramakrishnan Sivakumar
8afcdd044c Update readme 2025-09-17 12:54:59 -07:00
Ramakrishnan Sivakumar
5b12e99335 update docs 2025-09-17 12:53:11 -07:00
Ramakrishnan Sivakumar
5b5e83a3a0 Add lemonade integration 2025-09-17 12:28:02 -07:00
ItzCrazyKns
6dd33aa33c Merge pull request #876 from fizikiukas/patch-1
Add deployment badge for Hostinger
2025-09-17 23:44:00 +05:30
Valentinas Čirba
e705952503 Add deployment badge for Hostinger 2025-09-17 18:16:35 +03:00
ItzCrazyKns
b8e4152e77 Merge pull request #857 from skoved/fix-light-mode
make file icon in attachment modal in chat page fit light theme better
2025-09-12 21:16:46 +05:30
ItzCrazyKns
c8ac9279bd Merge pull request #866 from agungbesti/feat/add-openai-models
feat: add new OpenAI models with proper temperature parameter handling
2025-09-12 21:15:09 +05:30
akubesti
6f367c34a8 feat: add gpt-5, gpt-5-mini, o3 models and remove gpt-5-chat-latest
- Add new OpenAI models: gpt-5, gpt-5-mini, and o3 series

- Fix temperature parameter handling for o3 models

- Update models list to ensure compatibility
2025-09-12 22:22:16 +07:00
akubesti
328b12ffbe feat: add new OpenAI models with proper temperature parameter handling
- Add GPT 4.1 series and o1/o3/o4 models with temperature compatibility fixes

- Remove gpt-5/gpt-5-mini models due to organization verification restrictions

- Fix 400 errors for models that only support default temperature values
2025-09-11 16:38:01 +07:00
skoved
d8486e90bb make file icon in attachment modal in chat page fit light theme better
make the file icon in the attachment modal for the chat page an off-white background so that it matches the light theme better and looks the same as the attachment modal on the home page
2025-08-27 09:43:09 -04:00
ItzCrazyKns
238bcaff2b Delete .github/FUNDING.yml 2025-08-27 16:23:39 +05:30
ItzCrazyKns
6f7c55b783 Update FUNDING.yml with Patreon username
Added Patreon username for funding support.
2025-08-27 16:19:53 +05:30
9 changed files with 236 additions and 12 deletions

View File

@@ -29,6 +29,7 @@
- [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended)
- [Non-Docker Installation](#non-docker-installation)
- [Ollama Connection Errors](#ollama-connection-errors)
- [Lemonade Connection Errors](#lemonade-connection-errors)
- [Using as a Search Engine](#using-as-a-search-engine)
- [Using Perplexica's API](#using-perplexicas-api)
- [Expose Perplexica to a network](#expose-perplexica-to-network)
@@ -89,7 +90,8 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
- `LEMONADE`: Your Lemonade API URL. Since Lemonade runs directly on your local machine (not in Docker), you should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Lemonade on port 8000, use `http://host.docker.internal:8000`. For other ports, adjust accordingly. **You need to fill this if you wish to use Lemonade's models**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.`
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
@@ -150,6 +152,25 @@ If you're encountering an Ollama connection error, it is likely due to the backe
- Ensure that the port (default is 11434) is not blocked by your firewall.
#### Lemonade Connection Errors
If you're encountering a Lemonade connection error, it is likely due to the backend being unable to connect to Lemonade's API. To fix this issue you can:
1. **Check your Lemonade API URL:** Ensure that the API URL is correctly set in the settings menu.
2. **Update API URL Based on OS:**
- **Windows:** Use `http://host.docker.internal:8000`
- **Mac:** Use `http://host.docker.internal:8000`
- **Linux:** Use `http://<private_ip_of_host>:8000`
Adjust the port number if you're using a different one.
3. **Ensure Lemonade Server is Running:**
- Make sure your Lemonade server is running and accessible on the configured port (default is 8000).
- Verify that Lemonade is configured to accept connections from all interfaces (`0.0.0.0`), not just localhost (`127.0.0.1`).
- Ensure that the port (default is 8000) is not blocked by your firewall.
## Using as a Search Engine
If you wish to use Perplexica as an alternative to traditional search engines like Google or Bing, or if you want to add a shortcut for quick access from your browser's search bar, follow these steps:
@@ -174,6 +195,8 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
[![Deploy to Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
[![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267)
[![Run on ClawCloud](https://raw.githubusercontent.com/ClawCloud/Run-Template/refs/heads/main/Run-on-ClawCloud.svg)](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
[![Deploy on Hostinger](https://assets.hostinger.com/vps/deploy.svg)](https://www.hostinger.com/vps/docker-hosting?compose_url=https://raw.githubusercontent.com/ItzCrazyKns/Perplexica/refs/heads/master/docker-compose.yaml)
## Upcoming Features

View File

@@ -31,5 +31,9 @@ API_KEY = "" # Required to use AI/ML API chat and embedding models
[MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
[MODELS.LEMONADE]
API_URL = "" # Lemonade API URL - http://host.docker.internal:8000
API_KEY = "" # Optional API key for Lemonade
[API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768

View File

@@ -10,6 +10,8 @@ import {
getDeepseekApiKey,
getAimlApiKey,
getLMStudioApiEndpoint,
getLemonadeApiEndpoint,
getLemonadeApiKey,
updateConfig,
getOllamaApiKey,
} from '@/lib/config';
@@ -56,6 +58,8 @@ export const GET = async (req: Request) => {
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['ollamaApiKey'] = getOllamaApiKey();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['lemonadeApiUrl'] = getLemonadeApiEndpoint();
config['lemonadeApiKey'] = getLemonadeApiKey();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
@@ -106,6 +110,10 @@ export const POST = async (req: Request) => {
LM_STUDIO: {
API_URL: config.lmStudioApiUrl,
},
LEMONADE: {
API_URL: config.lemonadeApiUrl,
API_KEY: config.lemonadeApiKey,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,

View File

@@ -23,6 +23,8 @@ interface SettingsType {
ollamaApiUrl: string;
ollamaApiKey: string;
lmStudioApiUrl: string;
lemonadeApiUrl: string;
lemonadeApiKey: string;
deepseekApiKey: string;
aimlApiKey: string;
customOpenaiApiKey: string;
@@ -953,6 +955,48 @@ const Page = () => {
</div>
</div>
</SettingsSection>
<SettingsSection title="Lemonade">
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Lemonade API URL
</p>
<Input
type="text"
placeholder="Lemonade API URL"
value={config.lemonadeApiUrl}
isSaving={savingStates['lemonadeApiUrl']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lemonadeApiUrl: e.target.value,
}));
}}
onSave={(value) => saveConfig('lemonadeApiUrl', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Lemonade API Key (Optional)
</p>
<Input
type="password"
placeholder="Lemonade API Key"
value={config.lemonadeApiKey}
isSaving={savingStates['lemonadeApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lemonadeApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('lemonadeApiKey', value)}
/>
</div>
</div>
</SettingsSection>
</div>
)
)}

View File

@@ -107,8 +107,8 @@ const AttachSmall = () => {
key={i}
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
>
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-white/70" />
<div className="bg-light-100 dark:bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
<File size={16} className="text-black/70 dark:text-white/70" />
</div>
<p className="text-black/70 dark:text-white/70 text-sm">
{file.fileName.length > 25

View File

@@ -42,6 +42,10 @@ interface Config {
LM_STUDIO: {
API_URL: string;
};
LEMONADE: {
API_URL: string;
API_KEY: string;
};
CUSTOM_OPENAI: {
API_URL: string;
API_KEY: string;
@@ -105,6 +109,11 @@ export const getCustomOpenaiModelName = () =>
export const getLMStudioApiEndpoint = () =>
loadConfig().MODELS.LM_STUDIO.API_URL;
export const getLemonadeApiEndpoint = () =>
loadConfig().MODELS.LEMONADE.API_URL;
export const getLemonadeApiKey = () => loadConfig().MODELS.LEMONADE.API_KEY;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;

View File

@@ -45,6 +45,11 @@ import {
loadLMStudioEmbeddingsModels,
PROVIDER_INFO as LMStudioInfo,
} from './lmstudio';
import {
loadLemonadeChatModels,
loadLemonadeEmbeddingModels,
PROVIDER_INFO as LemonadeInfo,
} from './lemonade';
export const PROVIDER_METADATA = {
openai: OpenAIInfo,
@@ -56,6 +61,7 @@ export const PROVIDER_METADATA = {
deepseek: DeepseekInfo,
aimlapi: AimlApiInfo,
lmstudio: LMStudioInfo,
lemonade: LemonadeInfo,
custom_openai: {
key: 'custom_openai',
displayName: 'Custom OpenAI',
@@ -84,6 +90,7 @@ export const chatModelProviders: Record<
deepseek: loadDeepseekChatModels,
aimlapi: loadAimlApiChatModels,
lmstudio: loadLMStudioChatModels,
lemonade: loadLemonadeChatModels,
};
export const embeddingModelProviders: Record<
@@ -96,6 +103,7 @@ export const embeddingModelProviders: Record<
transformers: loadTransformersEmbeddingsModels,
aimlapi: loadAimlApiEmbeddingModels,
lmstudio: loadLMStudioEmbeddingsModels,
lemonade: loadLemonadeEmbeddingModels,
};
export const getAvailableChatModelProviders = async () => {
@@ -120,7 +128,11 @@ export const getAvailableChatModelProviders = async () => {
model: new ChatOpenAI({
apiKey: customOpenAiApiKey,
modelName: customOpenAiModelName,
temperature: 0.7,
...((() => {
const temperatureRestrictedModels = ['gpt-5-nano','gpt-5','gpt-5-mini','o1', 'o3', 'o3-mini', 'o4-mini'];
const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => customOpenAiModelName.includes(restrictedModel));
return isTemperatureRestricted ? {} : { temperature: 0.7 };
})()),
configuration: {
baseURL: customOpenAiApiUrl,
},

View File

@@ -0,0 +1,94 @@
import axios from 'axios';
import { getLemonadeApiEndpoint, getLemonadeApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'lemonade',
displayName: 'Lemonade',
};
import { ChatOpenAI } from '@langchain/openai';
import { OpenAIEmbeddings } from '@langchain/openai';
export const loadLemonadeChatModels = async () => {
const lemonadeApiEndpoint = getLemonadeApiEndpoint();
const lemonadeApiKey = getLemonadeApiKey();
if (!lemonadeApiEndpoint) return {};
try {
const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, {
headers: {
'Content-Type': 'application/json',
...(lemonadeApiKey
? { Authorization: `Bearer ${lemonadeApiKey}` }
: {}),
},
});
const { data: models } = res.data;
const chatModels: Record<string, ChatModel> = {};
models.forEach((model: any) => {
chatModels[model.id] = {
displayName: model.id,
model: new ChatOpenAI({
apiKey: lemonadeApiKey || 'lemonade-key',
modelName: model.id,
temperature: 0.7,
configuration: {
baseURL: `${lemonadeApiEndpoint}/api/v1`,
},
}),
};
});
return chatModels;
} catch (err) {
console.error(`Error loading Lemonade models: ${err}`);
return {};
}
};
export const loadLemonadeEmbeddingModels = async () => {
const lemonadeApiEndpoint = getLemonadeApiEndpoint();
const lemonadeApiKey = getLemonadeApiKey();
if (!lemonadeApiEndpoint) return {};
try {
const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, {
headers: {
'Content-Type': 'application/json',
...(lemonadeApiKey
? { Authorization: `Bearer ${lemonadeApiKey}` }
: {}),
},
});
const { data: models } = res.data;
const embeddingModels: Record<string, EmbeddingModel> = {};
// Filter models that support embeddings (if Lemonade provides this info)
// For now, we'll assume all models can be used for embeddings
models.forEach((model: any) => {
embeddingModels[model.id] = {
displayName: model.id,
model: new OpenAIEmbeddings({
apiKey: lemonadeApiKey || 'lemonade-key',
modelName: model.id,
configuration: {
baseURL: `${lemonadeApiEndpoint}/api/v1`,
},
}),
};
});
return embeddingModels;
} catch (err) {
console.error(`Error loading Lemonade embedding models: ${err}`);
return {};
}
};

View File

@@ -26,6 +26,10 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT-4 omni',
key: 'gpt-4o',
},
{
displayName: 'GPT-4o (2024-05-13)',
key: 'gpt-4o-2024-05-13',
},
{
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
@@ -47,12 +51,28 @@ const openaiChatModels: Record<string, string>[] = [
key: 'gpt-5-nano',
},
{
displayName: 'GPT 5 mini',
displayName: 'GPT 5',
key: 'gpt-5',
},
{
displayName: 'GPT 5 Mini',
key: 'gpt-5-mini',
},
{
displayName: 'GPT 5',
key: 'gpt-5',
displayName: 'o1',
key: 'o1',
},
{
displayName: 'o3',
key: 'o3',
},
{
displayName: 'o3 Mini',
key: 'o3-mini',
},
{
displayName: 'o4 Mini',
key: 'o4-mini',
},
];
@@ -76,13 +96,23 @@ export const loadOpenAIChatModels = async () => {
const chatModels: Record<string, ChatModel> = {};
openaiChatModels.forEach((model) => {
// Models that only support temperature = 1
const temperatureRestrictedModels = ['gpt-5-nano','gpt-5','gpt-5-mini','o1', 'o3', 'o3-mini', 'o4-mini'];
const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => model.key.includes(restrictedModel));
const modelConfig: any = {
apiKey: openaiApiKey,
modelName: model.key,
};
// Only add temperature if the model supports it
if (!isTemperatureRestricted) {
modelConfig.temperature = 0.7;
}
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
apiKey: openaiApiKey,
modelName: model.key,
temperature: model.key.includes('gpt-5') ? 1 : 0.7,
}) as unknown as BaseChatModel,
model: new ChatOpenAI(modelConfig) as unknown as BaseChatModel,
};
});