Compare commits

..

5 Commits

Author SHA1 Message Date
701819d018 Revert "Update README.md"
This reverts commit 68e151b2bd.
2025-05-13 20:14:08 +05:30
68e151b2bd Update README.md 2025-04-29 17:13:30 +05:30
06ff272541 feat(openai): add GPT 4.1 models 2025-04-29 13:10:14 +05:30
4154d5e4b1 Merge branch 'pr/629' 2025-04-23 20:35:52 +05:30
8aaee2c40c feat(app): support complex title 2025-02-15 16:48:21 +08:00
6 changed files with 22 additions and 75 deletions

View File

@ -84,18 +84,16 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
3. After cloning, navigate to the directory containing the project files. 3. After cloning, navigate to the directory containing the project files.
4. Update environment variables in `docker-compose.yml` file to configure `config.toml`. 4. Rename the `sample.config.toml` file to `config.toml`. For Docker setups, you need only fill in the following fields:
Example: - `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
Below section in `config.toml` can be configured using variables `MODELS_CUSTOM_OPENAI_API_KEY="sk-123456"`, `MODELS_CUSTOM_OPENAI_API_URL="http://localopenai:11134"` and `MODELS_CUSTOM_OPENAI_MODEL_NAME="meta-llama/llama-4"` **Note**: You can change these after starting Perplexica from the settings dialog.
```toml - `SIMILARITY_MEASURE`: The similarity measure to use (This is filled by default; you can leave it as is if you are unsure about it.)
[MODELS.CUSTOM_OPENAI]
API_KEY = "sk-123456"
API_URL = "http://localopenai:11134"
MODEL_NAME = "meta-llama/llama-4"
```
5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute: 5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute:

View File

@ -21,9 +21,7 @@ COPY --from=builder /home/perplexica/.next/static ./public/_next/static
COPY --from=builder /home/perplexica/.next/standalone ./ COPY --from=builder /home/perplexica/.next/standalone ./
COPY --from=builder /home/perplexica/data ./data COPY --from=builder /home/perplexica/data ./data
COPY sample.config.toml /home/perplexica/config.toml
COPY container_entrypoint.sh /home/perplexica/container_entrypoint.sh
RUN mkdir /home/perplexica/uploads RUN mkdir /home/perplexica/uploads
CMD ["bash", "/home/perplexica/container_entrypoint.sh"] CMD ["node", "server.js"]

View File

@ -1,49 +0,0 @@
#!/usr/bin/env bash
CONFIG_TOML_FILE=/home/perplexica/config.toml
TMP_FILE=${CONFIG_TOML_FILE}.tmp
touch $TMP_FILE
while IFS= read -r line; do
# Check if line is a section header (e.g., "[GENERAL]")
if [[ "$line" =~ ^\[([^]]+)\] ]]; then
current_section="${BASH_REMATCH[1]}"
echo "$line" >> "$TMP_FILE"
continue
fi
# Skip empty lines and comments
if [[ -z "$line" || "$line" =~ ^[[:space:]]*\# ]]; then
echo "$line" >> "$TMP_FILE"
continue
fi
# Extract key and value (handling quoted values)
key=$(echo "$line" | cut -d '=' -f 1 | xargs)
value=$(echo "$line" | cut -d '=' -f 2- | xargs)
# Construct the environment variable name in form of SECTION_KEY (e.g., GENERAL_SIMILARITY_MEASURE, MODELS_GEMINI_API_KEY)
current_section=$(echo "$current_section" | sed 's/\./_/')
env_var_name="${current_section}_${key}"
# Check if the environment variable exists
env_var_value=$(echo "${!env_var_name}")
if [ -n "$env_var_value" ]; then
new_value="$env_var_value"
echo "$key = $new_value" >> "$TMP_FILE"
else
# Keep original line if no env var exists
echo "$line" >> "$TMP_FILE"
fi
done < "$CONFIG_TOML_FILE"
# Replace the original file
mv "$TMP_FILE" "$CONFIG_TOML_FILE"
echo "Config file updated successfully."
# Start server
node server.js

View File

@ -16,19 +16,6 @@ services:
dockerfile: app.dockerfile dockerfile: app.dockerfile
environment: environment:
- SEARXNG_API_URL=http://searxng:8080 - SEARXNG_API_URL=http://searxng:8080
- GENERAL_SIMILARITY_MEASURE="cosine" # "cosine" or "dot"
- GENERAL_KEEP_ALIVE="5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
- MODELS_OPENAI_API_KEY=""
- MODELS_GROQ_API_KEY=""
- MODELS_ANTHROPIC_API_KEY=""
- MODELS_GEMINI_API_KEY=""
- MODELS_CUSTOM_OPENAI_API_KEY=""
- MODELS_CUSTOM_OPENAI_API_URL=""
- MODELS_CUSTOM_OPENAI_MODEL_NAME=""
- MODELS_OLLAMA_API_KEY="" # Ollama API URL - http://host.docker.internal:11434
- MODELS_DEEPSEEK_API_KEY=""
- MODELS_LM_STUDIO_API_KEY="" # LM Studio API URL - http://host.docker.internal:1234
- API_ENDPOINTS_SEARXNG="" # SearxNG API URL - http://localhost:32768
ports: ports:
- 3000:3000 - 3000:3000
networks: networks:
@ -36,6 +23,7 @@ services:
volumes: volumes:
- backend-dbstore:/home/perplexica/data - backend-dbstore:/home/perplexica/data
- uploads:/home/perplexica/uploads - uploads:/home/perplexica/uploads
- ./config.toml:/home/perplexica/config.toml
restart: unless-stopped restart: unless-stopped
networks: networks:

View File

@ -30,6 +30,18 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT-4 omni mini', displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini', key: 'gpt-4o-mini',
}, },
{
displayName: 'GPT 4.1 nano',
key: 'gpt-4.1-nano',
},
{
displayName: 'GPT 4.1 mini',
key: 'gpt-4.1-mini',
},
{
displayName: 'GPT 4.1',
key: 'gpt-4.1',
},
]; ];
const openaiEmbeddingModels: Record<string, string>[] = [ const openaiEmbeddingModels: Record<string, string>[] = [

View File

@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splittedText = await splitter.splitText(parsedText); const splittedText = await splitter.splitText(parsedText);
const title = res.data const title = res.data
.toString('utf8') .toString('utf8')
.match(/<title>(.*?)<\/title>/)?.[1]; .match(/<title.*>(.*?)<\/title>/)?.[1];
const linkDocs = splittedText.map((text) => { const linkDocs = splittedText.map((text) => {
return new Document({ return new Document({