A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
440 lines
14 KiB
Python
440 lines
14 KiB
Python
# ==============================================
|
|
# Breakpilot Drive - Quiz Generator Service
|
|
# ==============================================
|
|
# Generiert Quiz-Fragen dynamisch via LLM Gateway.
|
|
# Unterstuetzt Caching via Valkey fuer Performance.
|
|
|
|
import os
|
|
import json
|
|
import logging
|
|
from typing import Optional, List, Dict, Any
|
|
from dataclasses import dataclass
|
|
from enum import Enum
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Configuration
|
|
LLM_MODEL = os.getenv("GAME_LLM_MODEL", "llama-3.1-8b")
|
|
LLM_FALLBACK_MODEL = os.getenv("GAME_LLM_FALLBACK_MODEL", "claude-3-haiku")
|
|
CACHE_TTL = int(os.getenv("GAME_QUESTION_CACHE_TTL", "3600")) # 1 hour
|
|
|
|
|
|
class Subject(str, Enum):
|
|
"""Available subjects for quiz questions."""
|
|
MATH = "math"
|
|
GERMAN = "german"
|
|
ENGLISH = "english"
|
|
GENERAL = "general"
|
|
|
|
|
|
class QuizMode(str, Enum):
|
|
"""Quiz modes with different time constraints."""
|
|
QUICK = "quick" # 2-3 options, 3-5 seconds
|
|
PAUSE = "pause" # 4 options, unlimited time
|
|
|
|
|
|
@dataclass
|
|
class GeneratedQuestion:
|
|
"""Generated question from LLM."""
|
|
question_text: str
|
|
options: List[str]
|
|
correct_index: int
|
|
explanation: Optional[str] = None
|
|
difficulty: int = 3
|
|
subject: str = "general"
|
|
grade_level: int = 4
|
|
quiz_mode: str = "quick"
|
|
visual_trigger: Optional[str] = None
|
|
time_limit_seconds: Optional[float] = None
|
|
|
|
|
|
# ==============================================
|
|
# Prompt Templates
|
|
# ==============================================
|
|
|
|
QUICK_QUESTION_PROMPT = """Du bist ein Lehrer fuer Grundschulkinder (Klasse {grade}).
|
|
Erstelle eine SCHNELLE Quiz-Frage zum Thema "{subject}" mit Schwierigkeit {difficulty}/5.
|
|
|
|
Kontext: Das Kind faehrt in einem Autorennen-Spiel und sieht gerade ein(e) {visual_trigger}.
|
|
Die Frage soll zum visuellen Element passen und in 3-5 Sekunden beantwortbar sein.
|
|
|
|
Regeln:
|
|
- NUR 2-3 kurze Antwortoptionen
|
|
- Frage muss sehr kurz sein (max 10 Woerter)
|
|
- Antworten muessen eindeutig richtig/falsch sein
|
|
- Kindgerecht und motivierend
|
|
|
|
Antworte NUR im JSON-Format:
|
|
{{
|
|
"question_text": "Kurze Frage?",
|
|
"options": ["Antwort1", "Antwort2"],
|
|
"correct_index": 0,
|
|
"explanation": "Kurze Erklaerung"
|
|
}}"""
|
|
|
|
PAUSE_QUESTION_PROMPT = """Du bist ein Lehrer fuer Grundschulkinder (Klasse {grade}).
|
|
Erstelle eine DENKAUFGABE zum Thema "{subject}" mit Schwierigkeit {difficulty}/5.
|
|
|
|
Das Kind hat Zeit zum Nachdenken (Spiel ist pausiert).
|
|
Die Frage darf komplexer sein und Textverstaendnis erfordern.
|
|
|
|
Regeln:
|
|
- 4 Antwortoptionen
|
|
- Frage kann laenger sein (Textaufgabe erlaubt)
|
|
- Eine Option ist eindeutig richtig
|
|
- Kindgerecht formulieren
|
|
|
|
Antworte NUR im JSON-Format:
|
|
{{
|
|
"question_text": "Die vollstaendige Frage oder Aufgabe?",
|
|
"options": ["Option A", "Option B", "Option C", "Option D"],
|
|
"correct_index": 0,
|
|
"explanation": "Erklaerung warum diese Antwort richtig ist"
|
|
}}"""
|
|
|
|
SUBJECT_CONTEXTS = {
|
|
"math": {
|
|
"quick": ["Kopfrechnen", "Einmaleins", "Plus/Minus"],
|
|
"pause": ["Textaufgaben", "Geometrie", "Brueche", "Prozent"]
|
|
},
|
|
"german": {
|
|
"quick": ["Rechtschreibung", "Artikel"],
|
|
"pause": ["Grammatik", "Wortarten", "Satzglieder", "Zeitformen"]
|
|
},
|
|
"english": {
|
|
"quick": ["Vokabeln", "Farben", "Zahlen", "Tiere"],
|
|
"pause": ["Grammatik", "Saetze bilden", "Uebersetzung"]
|
|
},
|
|
"general": {
|
|
"quick": ["Allgemeinwissen"],
|
|
"pause": ["Sachkunde", "Natur", "Geographie"]
|
|
}
|
|
}
|
|
|
|
VISUAL_TRIGGER_THEMES = {
|
|
"bridge": {
|
|
"math": "Wie lang ist die Bruecke? Wie viele Autos passen drauf?",
|
|
"german": "Wie schreibt man Bruecke? Was reimt sich?",
|
|
"english": "What is this? Bridge vocabulary"
|
|
},
|
|
"tree": {
|
|
"math": "Wie viele Blaetter? Wie hoch ist der Baum?",
|
|
"german": "Nomen oder Verb? Einzahl/Mehrzahl",
|
|
"english": "Tree, leaf, branch vocabulary"
|
|
},
|
|
"house": {
|
|
"math": "Fenster zaehlen, Stockwerke",
|
|
"german": "Wortfamilie Haus",
|
|
"english": "House, room vocabulary"
|
|
},
|
|
"car": {
|
|
"math": "Raeder zaehlen, Geschwindigkeit",
|
|
"german": "Fahrzeug-Woerter",
|
|
"english": "Car, vehicle vocabulary"
|
|
},
|
|
"mountain": {
|
|
"math": "Hoehe, Entfernung",
|
|
"german": "Landschafts-Begriffe",
|
|
"english": "Mountain, hill vocabulary"
|
|
},
|
|
"river": {
|
|
"math": "Laenge, Breite",
|
|
"german": "Wasser-Woerter",
|
|
"english": "River, water vocabulary"
|
|
}
|
|
}
|
|
|
|
|
|
class QuizGenerator:
|
|
"""
|
|
Generates quiz questions using LLM Gateway.
|
|
|
|
Supports caching via Valkey for performance.
|
|
Falls back to static questions if LLM unavailable.
|
|
"""
|
|
|
|
def __init__(self):
|
|
self._llm_client = None
|
|
self._valkey_client = None
|
|
self._llm_available = False
|
|
self._cache_available = False
|
|
|
|
async def connect(self):
|
|
"""Initialize LLM and cache connections."""
|
|
await self._connect_llm()
|
|
await self._connect_cache()
|
|
|
|
async def _connect_llm(self):
|
|
"""Connect to LLM Gateway."""
|
|
try:
|
|
# Try to import LLM client from existing gateway
|
|
from llm_gateway.services.inference import InferenceService
|
|
self._llm_client = InferenceService()
|
|
self._llm_available = True
|
|
logger.info("Quiz Generator connected to LLM Gateway")
|
|
except ImportError:
|
|
logger.warning("LLM Gateway not available, using static questions")
|
|
self._llm_available = False
|
|
except Exception as e:
|
|
logger.warning(f"LLM connection failed: {e}")
|
|
self._llm_available = False
|
|
|
|
async def _connect_cache(self):
|
|
"""Connect to Valkey cache."""
|
|
try:
|
|
import redis.asyncio as redis
|
|
valkey_url = os.getenv("VALKEY_URL", "redis://localhost:6379")
|
|
self._valkey_client = redis.from_url(
|
|
valkey_url,
|
|
encoding="utf-8",
|
|
decode_responses=True,
|
|
)
|
|
await self._valkey_client.ping()
|
|
self._cache_available = True
|
|
logger.info("Quiz Generator connected to Valkey cache")
|
|
except Exception as e:
|
|
logger.warning(f"Valkey cache not available: {e}")
|
|
self._cache_available = False
|
|
|
|
def _get_cache_key(
|
|
self,
|
|
difficulty: int,
|
|
subject: str,
|
|
mode: str,
|
|
visual_trigger: Optional[str] = None
|
|
) -> str:
|
|
"""Generate cache key for questions."""
|
|
if visual_trigger:
|
|
return f"quiz:d{difficulty}:s{subject}:m{mode}:v{visual_trigger}"
|
|
return f"quiz:d{difficulty}:s{subject}:m{mode}"
|
|
|
|
async def get_cached_questions(
|
|
self,
|
|
difficulty: int,
|
|
subject: str,
|
|
mode: str,
|
|
count: int,
|
|
visual_trigger: Optional[str] = None
|
|
) -> List[GeneratedQuestion]:
|
|
"""Get questions from cache."""
|
|
if not self._cache_available:
|
|
return []
|
|
|
|
try:
|
|
cache_key = self._get_cache_key(difficulty, subject, mode, visual_trigger)
|
|
cached = await self._valkey_client.lrange(cache_key, 0, count - 1)
|
|
|
|
questions = []
|
|
for item in cached:
|
|
data = json.loads(item)
|
|
questions.append(GeneratedQuestion(**data))
|
|
|
|
return questions
|
|
except Exception as e:
|
|
logger.warning(f"Cache read failed: {e}")
|
|
return []
|
|
|
|
async def cache_questions(
|
|
self,
|
|
questions: List[GeneratedQuestion],
|
|
difficulty: int,
|
|
subject: str,
|
|
mode: str,
|
|
visual_trigger: Optional[str] = None
|
|
):
|
|
"""Store questions in cache."""
|
|
if not self._cache_available:
|
|
return
|
|
|
|
try:
|
|
cache_key = self._get_cache_key(difficulty, subject, mode, visual_trigger)
|
|
|
|
for q in questions:
|
|
data = {
|
|
"question_text": q.question_text,
|
|
"options": q.options,
|
|
"correct_index": q.correct_index,
|
|
"explanation": q.explanation,
|
|
"difficulty": q.difficulty,
|
|
"subject": q.subject,
|
|
"grade_level": q.grade_level,
|
|
"quiz_mode": q.quiz_mode,
|
|
"visual_trigger": q.visual_trigger,
|
|
"time_limit_seconds": q.time_limit_seconds,
|
|
}
|
|
await self._valkey_client.rpush(cache_key, json.dumps(data))
|
|
|
|
await self._valkey_client.expire(cache_key, CACHE_TTL)
|
|
except Exception as e:
|
|
logger.warning(f"Cache write failed: {e}")
|
|
|
|
async def generate_question(
|
|
self,
|
|
difficulty: int = 3,
|
|
subject: str = "general",
|
|
mode: str = "quick",
|
|
grade: int = 4,
|
|
visual_trigger: Optional[str] = None
|
|
) -> Optional[GeneratedQuestion]:
|
|
"""
|
|
Generate a single question using LLM.
|
|
|
|
Falls back to None if LLM unavailable (caller should use static questions).
|
|
"""
|
|
if not self._llm_available or not self._llm_client:
|
|
return None
|
|
|
|
# Select prompt template
|
|
if mode == "quick":
|
|
prompt = QUICK_QUESTION_PROMPT.format(
|
|
grade=grade,
|
|
subject=subject,
|
|
difficulty=difficulty,
|
|
visual_trigger=visual_trigger or "Strasse"
|
|
)
|
|
time_limit = 3.0 + (difficulty * 0.5) # 3.5 - 5.5 seconds
|
|
else:
|
|
prompt = PAUSE_QUESTION_PROMPT.format(
|
|
grade=grade,
|
|
subject=subject,
|
|
difficulty=difficulty
|
|
)
|
|
time_limit = None
|
|
|
|
try:
|
|
# Call LLM Gateway
|
|
response = await self._llm_client.chat_completion(
|
|
messages=[{"role": "user", "content": prompt}],
|
|
model=LLM_MODEL,
|
|
temperature=0.7,
|
|
max_tokens=500
|
|
)
|
|
|
|
# Parse JSON response
|
|
content = response.get("content", "")
|
|
|
|
# Extract JSON from response (handle markdown code blocks)
|
|
if "```json" in content:
|
|
content = content.split("```json")[1].split("```")[0]
|
|
elif "```" in content:
|
|
content = content.split("```")[1].split("```")[0]
|
|
|
|
data = json.loads(content.strip())
|
|
|
|
return GeneratedQuestion(
|
|
question_text=data["question_text"],
|
|
options=data["options"],
|
|
correct_index=data["correct_index"],
|
|
explanation=data.get("explanation"),
|
|
difficulty=difficulty,
|
|
subject=subject,
|
|
grade_level=grade,
|
|
quiz_mode=mode,
|
|
visual_trigger=visual_trigger,
|
|
time_limit_seconds=time_limit
|
|
)
|
|
|
|
except json.JSONDecodeError as e:
|
|
logger.warning(f"Failed to parse LLM response: {e}")
|
|
return None
|
|
except Exception as e:
|
|
logger.error(f"LLM question generation failed: {e}")
|
|
return None
|
|
|
|
async def generate_questions_batch(
|
|
self,
|
|
difficulty: int,
|
|
subject: str,
|
|
mode: str,
|
|
count: int,
|
|
grade: int = 4,
|
|
visual_trigger: Optional[str] = None
|
|
) -> List[GeneratedQuestion]:
|
|
"""Generate multiple questions."""
|
|
questions = []
|
|
|
|
for _ in range(count):
|
|
q = await self.generate_question(
|
|
difficulty=difficulty,
|
|
subject=subject,
|
|
mode=mode,
|
|
grade=grade,
|
|
visual_trigger=visual_trigger
|
|
)
|
|
if q:
|
|
questions.append(q)
|
|
|
|
return questions
|
|
|
|
async def get_questions(
|
|
self,
|
|
difficulty: int = 3,
|
|
subject: str = "general",
|
|
mode: str = "quick",
|
|
count: int = 5,
|
|
grade: int = 4,
|
|
visual_trigger: Optional[str] = None
|
|
) -> List[GeneratedQuestion]:
|
|
"""
|
|
Get questions with caching.
|
|
|
|
1. Check cache first
|
|
2. Generate new if not enough cached
|
|
3. Cache new questions
|
|
4. Return combined result
|
|
"""
|
|
# Try cache first
|
|
cached = await self.get_cached_questions(
|
|
difficulty, subject, mode, count, visual_trigger
|
|
)
|
|
|
|
if len(cached) >= count:
|
|
return cached[:count]
|
|
|
|
# Generate more questions
|
|
needed = count - len(cached)
|
|
new_questions = await self.generate_questions_batch(
|
|
difficulty=difficulty,
|
|
subject=subject,
|
|
mode=mode,
|
|
count=needed * 2, # Generate extra for cache
|
|
grade=grade,
|
|
visual_trigger=visual_trigger
|
|
)
|
|
|
|
# Cache new questions
|
|
if new_questions:
|
|
await self.cache_questions(
|
|
new_questions, difficulty, subject, mode, visual_trigger
|
|
)
|
|
|
|
# Combine and return
|
|
all_questions = cached + new_questions
|
|
return all_questions[:count]
|
|
|
|
def get_grade_for_difficulty(self, difficulty: int) -> int:
|
|
"""Map difficulty level to grade level."""
|
|
mapping = {
|
|
1: 2, # Klasse 2
|
|
2: 3, # Klasse 3
|
|
3: 4, # Klasse 4
|
|
4: 5, # Klasse 5
|
|
5: 6, # Klasse 6
|
|
}
|
|
return mapping.get(difficulty, 4)
|
|
|
|
|
|
# Global instance
|
|
_quiz_generator: Optional[QuizGenerator] = None
|
|
|
|
|
|
async def get_quiz_generator() -> QuizGenerator:
|
|
"""Get or create the global quiz generator instance."""
|
|
global _quiz_generator
|
|
|
|
if _quiz_generator is None:
|
|
_quiz_generator = QuizGenerator()
|
|
await _quiz_generator.connect()
|
|
|
|
return _quiz_generator
|