feat: replaced ollama with litellm (#18)
Some checks failed
Some checks failed
Co-authored-by: Sharang Parnerkar <parnerkarsharang@gmail.com> Reviewed-on: #18
This commit was merged in pull request #18.
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
# CERTifAI LibreChat Configuration
|
||||
# Ollama backend for self-hosted LLM inference.
|
||||
# LiteLLM proxy for unified multi-provider LLM access.
|
||||
version: 1.2.8
|
||||
|
||||
cache: true
|
||||
@@ -19,22 +19,16 @@ interface:
|
||||
|
||||
endpoints:
|
||||
custom:
|
||||
- name: "Ollama"
|
||||
apiKey: "ollama"
|
||||
baseURL: "https://mac-mini-von-benjamin-2:11434/v1/"
|
||||
- name: "LiteLLM"
|
||||
apiKey: "${LITELLM_API_KEY}"
|
||||
baseURL: "https://llm-dev.meghsakha.com/v1/"
|
||||
models:
|
||||
default:
|
||||
- "llama3.1:8b"
|
||||
- "qwen3:30b-a3b"
|
||||
- "Qwen3-Coder-30B-A3B-Instruct"
|
||||
fetch: true
|
||||
titleConvo: true
|
||||
titleModel: "current_model"
|
||||
summarize: false
|
||||
summaryModel: "current_model"
|
||||
forcePrompt: false
|
||||
modelDisplayLabel: "CERTifAI Ollama"
|
||||
dropParams:
|
||||
- stop
|
||||
- user
|
||||
- frequency_penalty
|
||||
- presence_penalty
|
||||
modelDisplayLabel: "CERTifAI LiteLLM"
|
||||
|
||||
Reference in New Issue
Block a user