# CERTifAI LibreChat Configuration # Ollama backend for self-hosted LLM inference. version: 1.2.1 cache: true registration: socialLogins: - openid interface: privacyPolicy: externalUrl: http://localhost:8000/privacy termsOfService: externalUrl: http://localhost:8000/impressum endpointsMenu: true modelSelect: true parameters: true endpoints: ollama: titleModel: "current_model" # Use the Docker host network alias when running inside compose. # Override OLLAMA_URL in .env for external Ollama instances. url: "http://host.docker.internal:11434" models: fetch: true summarize: true forcePrompt: false dropParams: - stop - user - frequency_penalty - presence_penalty modelDisplayLabel: "CERTifAI Ollama"