mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-21 09:18:34 +00:00
Update sample.config.toml
This commit is contained in:
@ -3,12 +3,28 @@ PORT = 3001 # Port to run the server on
|
|||||||
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
|
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
|
||||||
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
|
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
|
||||||
|
|
||||||
[API_KEYS]
|
[MODELS.OPENAI]
|
||||||
OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef
|
API_KEY = ""
|
||||||
GROQ = "" # Groq API key - gsk_1234567890abcdef1234567890abcdef
|
|
||||||
ANTHROPIC = "" # Anthropic API key - sk-ant-1234567890abcdef1234567890abcdef
|
[MODELS.GROQ]
|
||||||
GEMINI = "" # Gemini API key - sk-1234567890abcdef1234567890abcdef
|
API_KEY = ""
|
||||||
|
|
||||||
|
[MODELS.ANTHROPIC]
|
||||||
|
API_KEY = ""
|
||||||
|
|
||||||
|
[MODELS.GEMINI]
|
||||||
|
API_KEY = ""
|
||||||
|
|
||||||
|
[MODELS.OLLAMA]
|
||||||
|
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
||||||
|
|
||||||
|
[MODELS.LMSTUDIO]
|
||||||
|
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234/v1
|
||||||
|
|
||||||
|
[MODELS.CUSTOM_OPENAI]
|
||||||
|
API_KEY = ""
|
||||||
|
API_URL = ""
|
||||||
|
MODEL_NAME = ""
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
||||||
OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434
|
|
Reference in New Issue
Block a user