Update sample.config.toml

This commit is contained in:
haddadrm
2025-02-15 15:23:32 +04:00
committed by GitHub
parent 42fa951fd6
commit 0c908618c9

View File

@ -3,12 +3,28 @@ PORT = 3001 # Port to run the server on
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
[API_KEYS]
OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef
GROQ = "" # Groq API key - gsk_1234567890abcdef1234567890abcdef
ANTHROPIC = "" # Anthropic API key - sk-ant-1234567890abcdef1234567890abcdef
GEMINI = "" # Gemini API key - sk-1234567890abcdef1234567890abcdef
[MODELS.OPENAI]
API_KEY = ""
[MODELS.GROQ]
API_KEY = ""
[MODELS.ANTHROPIC]
API_KEY = ""
[MODELS.GEMINI]
API_KEY = ""
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.LMSTUDIO]
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234/v1
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
API_URL = ""
MODEL_NAME = ""
[API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL
OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434