feat: use librechat instead of own chat (#14)
Co-authored-by: Sharang Parnerkar <parnerkarsharang@gmail.com> Reviewed-on: #14
This commit was merged in pull request #14.
This commit is contained in:
40
librechat/librechat.yaml
Normal file
40
librechat/librechat.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
# CERTifAI LibreChat Configuration
|
||||
# Ollama backend for self-hosted LLM inference.
|
||||
version: 1.2.8
|
||||
|
||||
cache: true
|
||||
|
||||
registration:
|
||||
socialLogins:
|
||||
- openid
|
||||
|
||||
interface:
|
||||
privacyPolicy:
|
||||
externalUrl: https://dash-dev.meghsakha.com/privacy
|
||||
termsOfService:
|
||||
externalUrl: https://dash-dev.meghsakha.com/impressum
|
||||
endpointsMenu: true
|
||||
modelSelect: true
|
||||
parameters: true
|
||||
|
||||
endpoints:
|
||||
custom:
|
||||
- name: "Ollama"
|
||||
apiKey: "ollama"
|
||||
baseURL: "https://mac-mini-von-benjamin-2:11434/v1/"
|
||||
models:
|
||||
default:
|
||||
- "llama3.1:8b"
|
||||
- "qwen3:30b-a3b"
|
||||
fetch: true
|
||||
titleConvo: true
|
||||
titleModel: "current_model"
|
||||
summarize: false
|
||||
summaryModel: "current_model"
|
||||
forcePrompt: false
|
||||
modelDisplayLabel: "CERTifAI Ollama"
|
||||
dropParams:
|
||||
- stop
|
||||
- user
|
||||
- frequency_penalty
|
||||
- presence_penalty
|
||||
Reference in New Issue
Block a user