# CERTifAI LibreChat Configuration # Ollama backend for self-hosted LLM inference. version: 1.2.8 cache: true registration: socialLogins: - openid interface: privacyPolicy: externalUrl: http://localhost:8000/privacy termsOfService: externalUrl: http://localhost:8000/impressum endpointsMenu: true modelSelect: true parameters: true endpoints: custom: - name: "Ollama" apiKey: "ollama" baseURL: "http://host.docker.internal:11434/v1/" models: default: - "llama3.1:8b" - "qwen3:30b-a3b" fetch: true titleConvo: true titleModel: "current_model" summarize: false summaryModel: "current_model" forcePrompt: false modelDisplayLabel: "CERTifAI Ollama" dropParams: - stop - user - frequency_penalty - presence_penalty