Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 24s
CI / test-go-edu-search (push) Successful in 26s
CI / test-python-klausur (push) Failing after 2m0s
CI / test-python-agent-core (push) Successful in 13s
CI / test-nodejs-website (push) Successful in 20s
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
74 lines
2.0 KiB
Plaintext
74 lines
2.0 KiB
Plaintext
# =========================================================
|
|
# BreakPilot Lehrer — Environment Variables
|
|
# =========================================================
|
|
# Copy to .env and adjust values
|
|
# NOTE: Core must be running! These vars reference Core services.
|
|
|
|
# Database (same as Core)
|
|
POSTGRES_USER=breakpilot
|
|
POSTGRES_PASSWORD=breakpilot123
|
|
POSTGRES_DB=breakpilot_db
|
|
|
|
# Security
|
|
JWT_SECRET=your-super-secret-jwt-key-change-in-production
|
|
VAULT_TOKEN=breakpilot-dev-token
|
|
|
|
# MinIO (from Core)
|
|
MINIO_ROOT_USER=breakpilot
|
|
MINIO_ROOT_PASSWORD=breakpilot123
|
|
MINIO_BUCKET=breakpilot-rag
|
|
|
|
# Environment
|
|
ENVIRONMENT=development
|
|
TZ=Europe/Berlin
|
|
|
|
# LLM (Ollama on host)
|
|
OLLAMA_BASE_URL=http://host.docker.internal:11434
|
|
OLLAMA_ENABLED=true
|
|
OLLAMA_DEFAULT_MODEL=llama3.2
|
|
OLLAMA_VISION_MODEL=llama3.2-vision
|
|
OLLAMA_CORRECTION_MODEL=llama3.2
|
|
OLLAMA_TIMEOUT=120
|
|
|
|
# OCR-Pipeline: LLM-Review (Schritt 6)
|
|
# Kleine Modelle reichen fuer Zeichen-Korrekturen (0->O, 1->l, 5->S)
|
|
# Optionen: qwen3:0.6b, qwen3:1.7b, gemma3:1b, qwen3.5:35b-a3b
|
|
OLLAMA_REVIEW_MODEL=qwen3:0.6b
|
|
# Eintraege pro Ollama-Call. Groesser = weniger HTTP-Overhead.
|
|
OLLAMA_REVIEW_BATCH_SIZE=20
|
|
|
|
# OCR-Pipeline: Engine fuer Schritt 5 (Worterkennung)
|
|
# Optionen: auto (bevorzugt RapidOCR), rapid, tesseract,
|
|
# trocr-printed, trocr-handwritten, lighton
|
|
OCR_ENGINE=auto
|
|
|
|
# Klausur-HTR: Primaerem Modell fuer Handschriftenerkennung (qwen2.5vl bereits auf Mac Mini)
|
|
OLLAMA_HTR_MODEL=qwen2.5vl:32b
|
|
# HTR Fallback: genutzt wenn Ollama nicht erreichbar (auto-download ~340 MB)
|
|
HTR_FALLBACK_MODEL=trocr-large
|
|
|
|
# Anthropic (optional)
|
|
ANTHROPIC_API_KEY=
|
|
|
|
# vast.ai GPU (optional)
|
|
VAST_API_KEY=
|
|
VAST_INSTANCE_ID=
|
|
|
|
# Game
|
|
GAME_USE_DATABASE=true
|
|
GAME_REQUIRE_AUTH=false
|
|
GAME_REQUIRE_BILLING=false
|
|
GAME_LLM_MODEL=llama3.2
|
|
|
|
# Frontend URLs
|
|
NEXT_PUBLIC_API_URL=https://macmini:8001
|
|
NEXT_PUBLIC_KLAUSUR_SERVICE_URL=https://macmini:8086
|
|
NEXT_PUBLIC_VOICE_SERVICE_URL=wss://macmini:8091
|
|
|
|
# Session
|
|
SESSION_TTL_HOURS=24
|
|
|
|
# SMTP (uses Core Mailpit)
|
|
SMTP_HOST=bp-core-mailpit
|
|
SMTP_PORT=1025
|