mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-22 09:48:36 +00:00
- Integrate DeepSeek and LMStudio AI providers - Add message processing utilities for improved handling - Implement reasoning panel for message actions - Add logging functionality to UI - Update configurations and dependencies
34 lines
662 B
TOML
34 lines
662 B
TOML
[GENERAL]
|
|
PORT = 3001 # Port to run the server on
|
|
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
|
|
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
|
|
|
|
[MODELS.OPENAI]
|
|
API_KEY = ""
|
|
|
|
[MODELS.GROQ]
|
|
API_KEY = ""
|
|
|
|
[MODELS.ANTHROPIC]
|
|
API_KEY = ""
|
|
|
|
[MODELS.GEMINI]
|
|
API_KEY = ""
|
|
|
|
[MODELS.DEEPSEEK]
|
|
API_KEY = ""
|
|
|
|
[MODELS.OLLAMA]
|
|
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
|
|
|
[MODELS.LMSTUDIO]
|
|
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234
|
|
|
|
[MODELS.CUSTOM_OPENAI]
|
|
API_KEY = ""
|
|
API_URL = ""
|
|
MODEL_NAME = ""
|
|
|
|
[API_ENDPOINTS]
|
|
SEARXNG = "http://localhost:32768" # SearxNG API URL
|