mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-19 07:41:33 +00:00
Compare commits
18 Commits
30725b5d6d
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
8dc24c2d1a | ||
|
8afcdd044c | ||
|
5b12e99335 | ||
|
5b5e83a3a0 | ||
|
6dd33aa33c | ||
|
e705952503 | ||
|
b8e4152e77 | ||
|
c8ac9279bd | ||
|
6f367c34a8 | ||
|
328b12ffbe | ||
|
d8486e90bb | ||
|
238bcaff2b | ||
|
6f7c55b783 | ||
|
83a0cffe1b | ||
|
829ae59944 | ||
|
a546eb18a1 | ||
|
ff1ca56157 | ||
|
65fc881356 |
40
README.md
40
README.md
@@ -29,6 +29,7 @@
|
|||||||
- [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended)
|
- [Getting Started with Docker (Recommended)](#getting-started-with-docker-recommended)
|
||||||
- [Non-Docker Installation](#non-docker-installation)
|
- [Non-Docker Installation](#non-docker-installation)
|
||||||
- [Ollama Connection Errors](#ollama-connection-errors)
|
- [Ollama Connection Errors](#ollama-connection-errors)
|
||||||
|
- [Lemonade Connection Errors](#lemonade-connection-errors)
|
||||||
- [Using as a Search Engine](#using-as-a-search-engine)
|
- [Using as a Search Engine](#using-as-a-search-engine)
|
||||||
- [Using Perplexica's API](#using-perplexicas-api)
|
- [Using Perplexica's API](#using-perplexicas-api)
|
||||||
- [Expose Perplexica to a network](#expose-perplexica-to-network)
|
- [Expose Perplexica to a network](#expose-perplexica-to-network)
|
||||||
@@ -53,7 +54,7 @@ Want to know more about its architecture and how it works? You can read it [here
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- **Local LLMs**: You can make use local LLMs such as Llama3 and Mixtral using Ollama.
|
- **Local LLMs**: You can utilize local LLMs such as Qwen, DeepSeek, Llama, and Mistral.
|
||||||
- **Two Main Modes:**
|
- **Two Main Modes:**
|
||||||
- **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page.
|
- **Copilot Mode:** (In development) Boosts search by generating different queries to find more relevant internet sources. Like normal search instead of just using the context by SearxNG, it visits the top matches and tries to find relevant sources to the user's query directly from the page.
|
||||||
- **Normal Mode:** Processes your query and performs a web search.
|
- **Normal Mode:** Processes your query and performs a web search.
|
||||||
@@ -87,8 +88,10 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
|||||||
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
|
4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
|
||||||
|
|
||||||
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
|
- `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
|
||||||
|
- `CUSTOM_OPENAI`: Your OpenAI-API-compliant local server URL, model name, and API key. You should run your local server with host set to `0.0.0.0`, take note of which port number it is running on, and then use that port number to set `API_URL = http://host.docker.internal:PORT_NUMBER`. You must specify the model name, such as `MODEL_NAME = "unsloth/DeepSeek-R1-0528-Qwen3-8B-GGUF:Q4_K_XL"`. Finally, set `API_KEY` to the appropriate value. If you have not defined an API key, just put anything you want in-between the quotation marks: `API_KEY = "whatever-you-want-but-not-blank"` **You only need to configure these settings if you want to use a local OpenAI-compliant server, such as Llama.cpp's [`llama-server`](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md)**.
|
||||||
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
|
||||||
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
|
- `LEMONADE`: Your Lemonade API URL. Since Lemonade runs directly on your local machine (not in Docker), you should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Lemonade on port 8000, use `http://host.docker.internal:8000`. For other ports, adjust accordingly. **You need to fill this if you wish to use Lemonade's models**.
|
||||||
|
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.`
|
||||||
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
|
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
|
||||||
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
|
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
|
||||||
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
|
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
|
||||||
@@ -120,7 +123,17 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
|
|||||||
|
|
||||||
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
|
See the [installation documentation](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/installation) for more information like updating, etc.
|
||||||
|
|
||||||
### Ollama Connection Errors
|
### Troubleshooting
|
||||||
|
|
||||||
|
#### Local OpenAI-API-Compliant Servers
|
||||||
|
|
||||||
|
If Perplexica tells you that you haven't configured any chat model providers, ensure that:
|
||||||
|
|
||||||
|
1. Your server is running on `0.0.0.0` (not `127.0.0.1`) and on the same port you put in the API URL.
|
||||||
|
2. You have specified the correct model name loaded by your local LLM server.
|
||||||
|
3. You have specified the correct API key, or if one is not defined, you have put *something* in the API key field and not left it empty.
|
||||||
|
|
||||||
|
#### Ollama Connection Errors
|
||||||
|
|
||||||
If you're encountering an Ollama connection error, it is likely due to the backend being unable to connect to Ollama's API. To fix this issue you can:
|
If you're encountering an Ollama connection error, it is likely due to the backend being unable to connect to Ollama's API. To fix this issue you can:
|
||||||
|
|
||||||
@@ -139,6 +152,25 @@ If you're encountering an Ollama connection error, it is likely due to the backe
|
|||||||
|
|
||||||
- Ensure that the port (default is 11434) is not blocked by your firewall.
|
- Ensure that the port (default is 11434) is not blocked by your firewall.
|
||||||
|
|
||||||
|
#### Lemonade Connection Errors
|
||||||
|
|
||||||
|
If you're encountering a Lemonade connection error, it is likely due to the backend being unable to connect to Lemonade's API. To fix this issue you can:
|
||||||
|
|
||||||
|
1. **Check your Lemonade API URL:** Ensure that the API URL is correctly set in the settings menu.
|
||||||
|
2. **Update API URL Based on OS:**
|
||||||
|
|
||||||
|
- **Windows:** Use `http://host.docker.internal:8000`
|
||||||
|
- **Mac:** Use `http://host.docker.internal:8000`
|
||||||
|
- **Linux:** Use `http://<private_ip_of_host>:8000`
|
||||||
|
|
||||||
|
Adjust the port number if you're using a different one.
|
||||||
|
|
||||||
|
3. **Ensure Lemonade Server is Running:**
|
||||||
|
|
||||||
|
- Make sure your Lemonade server is running and accessible on the configured port (default is 8000).
|
||||||
|
- Verify that Lemonade is configured to accept connections from all interfaces (`0.0.0.0`), not just localhost (`127.0.0.1`).
|
||||||
|
- Ensure that the port (default is 8000) is not blocked by your firewall.
|
||||||
|
|
||||||
## Using as a Search Engine
|
## Using as a Search Engine
|
||||||
|
|
||||||
If you wish to use Perplexica as an alternative to traditional search engines like Google or Bing, or if you want to add a shortcut for quick access from your browser's search bar, follow these steps:
|
If you wish to use Perplexica as an alternative to traditional search engines like Google or Bing, or if you want to add a shortcut for quick access from your browser's search bar, follow these steps:
|
||||||
@@ -163,6 +195,8 @@ Perplexica runs on Next.js and handles all API requests. It works right away on
|
|||||||
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
[](https://usw.sealos.io/?openapp=system-template%3FtemplateName%3Dperplexica)
|
||||||
[](https://repocloud.io/details/?app_id=267)
|
[](https://repocloud.io/details/?app_id=267)
|
||||||
[](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
|
[](https://template.run.claw.cloud/?referralCode=U11MRQ8U9RM4&openapp=system-fastdeploy%3FtemplateName%3Dperplexica)
|
||||||
|
[](https://www.hostinger.com/vps/docker-hosting?compose_url=https://raw.githubusercontent.com/ItzCrazyKns/Perplexica/refs/heads/master/docker-compose.yaml)
|
||||||
|
|
||||||
|
|
||||||
## Upcoming Features
|
## Upcoming Features
|
||||||
|
|
||||||
|
@@ -31,5 +31,9 @@ API_KEY = "" # Required to use AI/ML API chat and embedding models
|
|||||||
[MODELS.LM_STUDIO]
|
[MODELS.LM_STUDIO]
|
||||||
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
|
||||||
|
|
||||||
|
[MODELS.LEMONADE]
|
||||||
|
API_URL = "" # Lemonade API URL - http://host.docker.internal:8000
|
||||||
|
API_KEY = "" # Optional API key for Lemonade
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
SEARXNG = "" # SearxNG API URL - http://localhost:32768
|
||||||
|
@@ -10,6 +10,8 @@ import {
|
|||||||
getDeepseekApiKey,
|
getDeepseekApiKey,
|
||||||
getAimlApiKey,
|
getAimlApiKey,
|
||||||
getLMStudioApiEndpoint,
|
getLMStudioApiEndpoint,
|
||||||
|
getLemonadeApiEndpoint,
|
||||||
|
getLemonadeApiKey,
|
||||||
updateConfig,
|
updateConfig,
|
||||||
getOllamaApiKey,
|
getOllamaApiKey,
|
||||||
} from '@/lib/config';
|
} from '@/lib/config';
|
||||||
@@ -56,6 +58,8 @@ export const GET = async (req: Request) => {
|
|||||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||||
config['ollamaApiKey'] = getOllamaApiKey();
|
config['ollamaApiKey'] = getOllamaApiKey();
|
||||||
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
||||||
|
config['lemonadeApiUrl'] = getLemonadeApiEndpoint();
|
||||||
|
config['lemonadeApiKey'] = getLemonadeApiKey();
|
||||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
config['geminiApiKey'] = getGeminiApiKey();
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
@@ -106,6 +110,10 @@ export const POST = async (req: Request) => {
|
|||||||
LM_STUDIO: {
|
LM_STUDIO: {
|
||||||
API_URL: config.lmStudioApiUrl,
|
API_URL: config.lmStudioApiUrl,
|
||||||
},
|
},
|
||||||
|
LEMONADE: {
|
||||||
|
API_URL: config.lemonadeApiUrl,
|
||||||
|
API_KEY: config.lemonadeApiKey,
|
||||||
|
},
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: config.customOpenaiApiUrl,
|
API_URL: config.customOpenaiApiUrl,
|
||||||
API_KEY: config.customOpenaiApiKey,
|
API_KEY: config.customOpenaiApiKey,
|
||||||
|
@@ -91,7 +91,7 @@ const Page = () => {
|
|||||||
'border-[0.1px] rounded-full text-sm px-3 py-1 text-nowrap transition duration-200 cursor-pointer',
|
'border-[0.1px] rounded-full text-sm px-3 py-1 text-nowrap transition duration-200 cursor-pointer',
|
||||||
activeTopic === t.key
|
activeTopic === t.key
|
||||||
? 'text-cyan-300 bg-cyan-300/30 border-cyan-300/60'
|
? 'text-cyan-300 bg-cyan-300/30 border-cyan-300/60'
|
||||||
: 'border-white/30 text-white/70 hover:text-white hover:border-white/40 hover:bg-white/5',
|
: 'border-black/30 dark:border-white/30 text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white hover:border-black/40 dark:hover:border-white/40 hover:bg-black/5 dark:hover:bg-white/5',
|
||||||
)}
|
)}
|
||||||
onClick={() => setActiveTopic(t.key)}
|
onClick={() => setActiveTopic(t.key)}
|
||||||
>
|
>
|
||||||
|
@@ -23,6 +23,8 @@ interface SettingsType {
|
|||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
ollamaApiKey: string;
|
ollamaApiKey: string;
|
||||||
lmStudioApiUrl: string;
|
lmStudioApiUrl: string;
|
||||||
|
lemonadeApiUrl: string;
|
||||||
|
lemonadeApiKey: string;
|
||||||
deepseekApiKey: string;
|
deepseekApiKey: string;
|
||||||
aimlApiKey: string;
|
aimlApiKey: string;
|
||||||
customOpenaiApiKey: string;
|
customOpenaiApiKey: string;
|
||||||
@@ -953,6 +955,48 @@ const Page = () => {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</SettingsSection>
|
</SettingsSection>
|
||||||
|
|
||||||
|
<SettingsSection title="Lemonade">
|
||||||
|
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
Lemonade API URL
|
||||||
|
</p>
|
||||||
|
<Input
|
||||||
|
type="text"
|
||||||
|
placeholder="Lemonade API URL"
|
||||||
|
value={config.lemonadeApiUrl}
|
||||||
|
isSaving={savingStates['lemonadeApiUrl']}
|
||||||
|
onChange={(e) => {
|
||||||
|
setConfig((prev) => ({
|
||||||
|
...prev!,
|
||||||
|
lemonadeApiUrl: e.target.value,
|
||||||
|
}));
|
||||||
|
}}
|
||||||
|
onSave={(value) => saveConfig('lemonadeApiUrl', value)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
Lemonade API Key (Optional)
|
||||||
|
</p>
|
||||||
|
<Input
|
||||||
|
type="password"
|
||||||
|
placeholder="Lemonade API Key"
|
||||||
|
value={config.lemonadeApiKey}
|
||||||
|
isSaving={savingStates['lemonadeApiKey']}
|
||||||
|
onChange={(e) => {
|
||||||
|
setConfig((prev) => ({
|
||||||
|
...prev!,
|
||||||
|
lemonadeApiKey: e.target.value,
|
||||||
|
}));
|
||||||
|
}}
|
||||||
|
onSave={(value) => saveConfig('lemonadeApiKey', value)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</SettingsSection>
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
)}
|
)}
|
||||||
|
@@ -132,8 +132,8 @@ const Attach = ({ showText }: { showText?: boolean }) => {
|
|||||||
key={i}
|
key={i}
|
||||||
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
|
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
|
||||||
>
|
>
|
||||||
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
|
<div className="bg-light-100 dark:bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
|
||||||
<File size={16} className="text-white/70" />
|
<File size={16} className="text-black/70 dark:text-white/70" />
|
||||||
</div>
|
</div>
|
||||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
{file.fileName.length > 25
|
{file.fileName.length > 25
|
||||||
|
@@ -107,8 +107,8 @@ const AttachSmall = () => {
|
|||||||
key={i}
|
key={i}
|
||||||
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
|
className="flex flex-row items-center justify-start w-full space-x-3 p-3"
|
||||||
>
|
>
|
||||||
<div className="bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
|
<div className="bg-light-100 dark:bg-dark-100 flex items-center justify-center w-10 h-10 rounded-md">
|
||||||
<File size={16} className="text-white/70" />
|
<File size={16} className="text-black/70 dark:text-white/70" />
|
||||||
</div>
|
</div>
|
||||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
{file.fileName.length > 25
|
{file.fileName.length > 25
|
||||||
|
@@ -42,6 +42,10 @@ interface Config {
|
|||||||
LM_STUDIO: {
|
LM_STUDIO: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
};
|
};
|
||||||
|
LEMONADE: {
|
||||||
|
API_URL: string;
|
||||||
|
API_KEY: string;
|
||||||
|
};
|
||||||
CUSTOM_OPENAI: {
|
CUSTOM_OPENAI: {
|
||||||
API_URL: string;
|
API_URL: string;
|
||||||
API_KEY: string;
|
API_KEY: string;
|
||||||
@@ -105,6 +109,11 @@ export const getCustomOpenaiModelName = () =>
|
|||||||
export const getLMStudioApiEndpoint = () =>
|
export const getLMStudioApiEndpoint = () =>
|
||||||
loadConfig().MODELS.LM_STUDIO.API_URL;
|
loadConfig().MODELS.LM_STUDIO.API_URL;
|
||||||
|
|
||||||
|
export const getLemonadeApiEndpoint = () =>
|
||||||
|
loadConfig().MODELS.LEMONADE.API_URL;
|
||||||
|
|
||||||
|
export const getLemonadeApiKey = () => loadConfig().MODELS.LEMONADE.API_KEY;
|
||||||
|
|
||||||
const mergeConfigs = (current: any, update: any): any => {
|
const mergeConfigs = (current: any, update: any): any => {
|
||||||
if (update === null || update === undefined) {
|
if (update === null || update === undefined) {
|
||||||
return current;
|
return current;
|
||||||
|
@@ -45,6 +45,11 @@ import {
|
|||||||
loadLMStudioEmbeddingsModels,
|
loadLMStudioEmbeddingsModels,
|
||||||
PROVIDER_INFO as LMStudioInfo,
|
PROVIDER_INFO as LMStudioInfo,
|
||||||
} from './lmstudio';
|
} from './lmstudio';
|
||||||
|
import {
|
||||||
|
loadLemonadeChatModels,
|
||||||
|
loadLemonadeEmbeddingModels,
|
||||||
|
PROVIDER_INFO as LemonadeInfo,
|
||||||
|
} from './lemonade';
|
||||||
|
|
||||||
export const PROVIDER_METADATA = {
|
export const PROVIDER_METADATA = {
|
||||||
openai: OpenAIInfo,
|
openai: OpenAIInfo,
|
||||||
@@ -56,6 +61,7 @@ export const PROVIDER_METADATA = {
|
|||||||
deepseek: DeepseekInfo,
|
deepseek: DeepseekInfo,
|
||||||
aimlapi: AimlApiInfo,
|
aimlapi: AimlApiInfo,
|
||||||
lmstudio: LMStudioInfo,
|
lmstudio: LMStudioInfo,
|
||||||
|
lemonade: LemonadeInfo,
|
||||||
custom_openai: {
|
custom_openai: {
|
||||||
key: 'custom_openai',
|
key: 'custom_openai',
|
||||||
displayName: 'Custom OpenAI',
|
displayName: 'Custom OpenAI',
|
||||||
@@ -84,6 +90,7 @@ export const chatModelProviders: Record<
|
|||||||
deepseek: loadDeepseekChatModels,
|
deepseek: loadDeepseekChatModels,
|
||||||
aimlapi: loadAimlApiChatModels,
|
aimlapi: loadAimlApiChatModels,
|
||||||
lmstudio: loadLMStudioChatModels,
|
lmstudio: loadLMStudioChatModels,
|
||||||
|
lemonade: loadLemonadeChatModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const embeddingModelProviders: Record<
|
export const embeddingModelProviders: Record<
|
||||||
@@ -96,6 +103,7 @@ export const embeddingModelProviders: Record<
|
|||||||
transformers: loadTransformersEmbeddingsModels,
|
transformers: loadTransformersEmbeddingsModels,
|
||||||
aimlapi: loadAimlApiEmbeddingModels,
|
aimlapi: loadAimlApiEmbeddingModels,
|
||||||
lmstudio: loadLMStudioEmbeddingsModels,
|
lmstudio: loadLMStudioEmbeddingsModels,
|
||||||
|
lemonade: loadLemonadeEmbeddingModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getAvailableChatModelProviders = async () => {
|
export const getAvailableChatModelProviders = async () => {
|
||||||
@@ -120,7 +128,11 @@ export const getAvailableChatModelProviders = async () => {
|
|||||||
model: new ChatOpenAI({
|
model: new ChatOpenAI({
|
||||||
apiKey: customOpenAiApiKey,
|
apiKey: customOpenAiApiKey,
|
||||||
modelName: customOpenAiModelName,
|
modelName: customOpenAiModelName,
|
||||||
temperature: 0.7,
|
...((() => {
|
||||||
|
const temperatureRestrictedModels = ['gpt-5-nano','gpt-5','gpt-5-mini','o1', 'o3', 'o3-mini', 'o4-mini'];
|
||||||
|
const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => customOpenAiModelName.includes(restrictedModel));
|
||||||
|
return isTemperatureRestricted ? {} : { temperature: 0.7 };
|
||||||
|
})()),
|
||||||
configuration: {
|
configuration: {
|
||||||
baseURL: customOpenAiApiUrl,
|
baseURL: customOpenAiApiUrl,
|
||||||
},
|
},
|
||||||
|
94
src/lib/providers/lemonade.ts
Normal file
94
src/lib/providers/lemonade.ts
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
import axios from 'axios';
|
||||||
|
import { getLemonadeApiEndpoint, getLemonadeApiKey } from '../config';
|
||||||
|
import { ChatModel, EmbeddingModel } from '.';
|
||||||
|
|
||||||
|
export const PROVIDER_INFO = {
|
||||||
|
key: 'lemonade',
|
||||||
|
displayName: 'Lemonade',
|
||||||
|
};
|
||||||
|
|
||||||
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
|
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||||
|
|
||||||
|
export const loadLemonadeChatModels = async () => {
|
||||||
|
const lemonadeApiEndpoint = getLemonadeApiEndpoint();
|
||||||
|
const lemonadeApiKey = getLemonadeApiKey();
|
||||||
|
|
||||||
|
if (!lemonadeApiEndpoint) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
...(lemonadeApiKey
|
||||||
|
? { Authorization: `Bearer ${lemonadeApiKey}` }
|
||||||
|
: {}),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const { data: models } = res.data;
|
||||||
|
|
||||||
|
const chatModels: Record<string, ChatModel> = {};
|
||||||
|
|
||||||
|
models.forEach((model: any) => {
|
||||||
|
chatModels[model.id] = {
|
||||||
|
displayName: model.id,
|
||||||
|
model: new ChatOpenAI({
|
||||||
|
apiKey: lemonadeApiKey || 'lemonade-key',
|
||||||
|
modelName: model.id,
|
||||||
|
temperature: 0.7,
|
||||||
|
configuration: {
|
||||||
|
baseURL: `${lemonadeApiEndpoint}/api/v1`,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading Lemonade models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadLemonadeEmbeddingModels = async () => {
|
||||||
|
const lemonadeApiEndpoint = getLemonadeApiEndpoint();
|
||||||
|
const lemonadeApiKey = getLemonadeApiKey();
|
||||||
|
|
||||||
|
if (!lemonadeApiEndpoint) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const res = await axios.get(`${lemonadeApiEndpoint}/api/v1/models`, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
...(lemonadeApiKey
|
||||||
|
? { Authorization: `Bearer ${lemonadeApiKey}` }
|
||||||
|
: {}),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const { data: models } = res.data;
|
||||||
|
|
||||||
|
const embeddingModels: Record<string, EmbeddingModel> = {};
|
||||||
|
|
||||||
|
// Filter models that support embeddings (if Lemonade provides this info)
|
||||||
|
// For now, we'll assume all models can be used for embeddings
|
||||||
|
models.forEach((model: any) => {
|
||||||
|
embeddingModels[model.id] = {
|
||||||
|
displayName: model.id,
|
||||||
|
model: new OpenAIEmbeddings({
|
||||||
|
apiKey: lemonadeApiKey || 'lemonade-key',
|
||||||
|
modelName: model.id,
|
||||||
|
configuration: {
|
||||||
|
baseURL: `${lemonadeApiEndpoint}/api/v1`,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
return embeddingModels;
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error loading Lemonade embedding models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
@@ -26,6 +26,10 @@ const openaiChatModels: Record<string, string>[] = [
|
|||||||
displayName: 'GPT-4 omni',
|
displayName: 'GPT-4 omni',
|
||||||
key: 'gpt-4o',
|
key: 'gpt-4o',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
displayName: 'GPT-4o (2024-05-13)',
|
||||||
|
key: 'gpt-4o-2024-05-13',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
displayName: 'GPT-4 omni mini',
|
displayName: 'GPT-4 omni mini',
|
||||||
key: 'gpt-4o-mini',
|
key: 'gpt-4o-mini',
|
||||||
@@ -47,12 +51,28 @@ const openaiChatModels: Record<string, string>[] = [
|
|||||||
key: 'gpt-5-nano',
|
key: 'gpt-5-nano',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
displayName: 'GPT 5 mini',
|
displayName: 'GPT 5',
|
||||||
|
key: 'gpt-5',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'GPT 5 Mini',
|
||||||
key: 'gpt-5-mini',
|
key: 'gpt-5-mini',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
displayName: 'GPT 5',
|
displayName: 'o1',
|
||||||
key: 'gpt-5',
|
key: 'o1',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'o3',
|
||||||
|
key: 'o3',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'o3 Mini',
|
||||||
|
key: 'o3-mini',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'o4 Mini',
|
||||||
|
key: 'o4-mini',
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -76,13 +96,23 @@ export const loadOpenAIChatModels = async () => {
|
|||||||
const chatModels: Record<string, ChatModel> = {};
|
const chatModels: Record<string, ChatModel> = {};
|
||||||
|
|
||||||
openaiChatModels.forEach((model) => {
|
openaiChatModels.forEach((model) => {
|
||||||
chatModels[model.key] = {
|
// Models that only support temperature = 1
|
||||||
displayName: model.displayName,
|
const temperatureRestrictedModels = ['gpt-5-nano','gpt-5','gpt-5-mini','o1', 'o3', 'o3-mini', 'o4-mini'];
|
||||||
model: new ChatOpenAI({
|
const isTemperatureRestricted = temperatureRestrictedModels.some(restrictedModel => model.key.includes(restrictedModel));
|
||||||
|
|
||||||
|
const modelConfig: any = {
|
||||||
apiKey: openaiApiKey,
|
apiKey: openaiApiKey,
|
||||||
modelName: model.key,
|
modelName: model.key,
|
||||||
temperature: model.key.includes('gpt-5') ? 1 : 0.7,
|
};
|
||||||
}) as unknown as BaseChatModel,
|
|
||||||
|
// Only add temperature if the model supports it
|
||||||
|
if (!isTemperatureRestricted) {
|
||||||
|
modelConfig.temperature = 0.7;
|
||||||
|
}
|
||||||
|
|
||||||
|
chatModels[model.key] = {
|
||||||
|
displayName: model.displayName,
|
||||||
|
model: new ChatOpenAI(modelConfig) as unknown as BaseChatModel,
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user