Perplexica/sample.config.toml
2025-07-22 18:28:11 +02:00

37 lines
740 B
TOML

[GENERAL]
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
[MODELS.OPENAI]
API_KEY = ""
[MODELS.GROQ]
API_KEY = ""
[MODELS.ANTHROPIC]
API_KEY = ""
[MODELS.GEMINI]
API_KEY = ""
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
API_URL = ""
MODEL_NAME = ""
[MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
API_KEY = ""
MODEL_NAME = ""
[MODELS.DEEPSEEK]
API_KEY = ""
[MODELS.AIMLAPI]
API_KEY = "" # Required to use AI/ML API chat and embedding models
[MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
[API_ENDPOINTS]
SEARXNG = "" # SearxNG API URL - http://localhost:32768