Replace mock token usage with real data from LiteLLM free-tier APIs (global/activity, global/activity/model, global/spend/models). Adds per-model breakdown table, loading/error states, usage data models with serde tests, and i18n keys for all five languages. Also includes: replace Ollama with LiteLLM proxy, update config, docker-compose, and provider infrastructure. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
35 lines
793 B
YAML
35 lines
793 B
YAML
# CERTifAI LibreChat Configuration
|
|
# LiteLLM proxy for unified multi-provider LLM access.
|
|
version: 1.2.8
|
|
|
|
cache: true
|
|
|
|
registration:
|
|
socialLogins:
|
|
- openid
|
|
|
|
interface:
|
|
privacyPolicy:
|
|
externalUrl: https://dash-dev.meghsakha.com/privacy
|
|
termsOfService:
|
|
externalUrl: https://dash-dev.meghsakha.com/impressum
|
|
endpointsMenu: true
|
|
modelSelect: true
|
|
parameters: true
|
|
|
|
endpoints:
|
|
custom:
|
|
- name: "LiteLLM"
|
|
apiKey: "${LITELLM_API_KEY}"
|
|
baseURL: "https://llm-dev.meghsakha.com/v1/"
|
|
models:
|
|
default:
|
|
- "Qwen3-Coder-30B-A3B-Instruct"
|
|
fetch: true
|
|
titleConvo: true
|
|
titleModel: "current_model"
|
|
summarize: false
|
|
summaryModel: "current_model"
|
|
forcePrompt: false
|
|
modelDisplayLabel: "CERTifAI LiteLLM"
|