[split-required] Split 500-850 LOC files (batch 2)
backend-lehrer (10 files): - game/database.py (785 → 5), correction_api.py (683 → 4) - classroom_engine/antizipation.py (676 → 5) - llm_gateway schools/edu_search already done in prior batch klausur-service (12 files): - orientation_crop_api.py (694 → 5), pdf_export.py (677 → 4) - zeugnis_crawler.py (676 → 5), grid_editor_api.py (671 → 5) - eh_templates.py (658 → 5), mail/api.py (651 → 5) - qdrant_service.py (638 → 5), training_api.py (625 → 4) website (6 pages): - middleware (696 → 8), mail (733 → 6), consent (628 → 8) - compliance/risks (622 → 5), export (502 → 5), brandbook (629 → 7) studio-v2 (3 components): - B2BMigrationWizard (848 → 3), CleanupPanel (765 → 2) - dashboard-experimental (739 → 2) admin-lehrer (4 files): - uebersetzungen (769 → 4), manager (670 → 2) - ChunkBrowserQA (675 → 6), dsfa/page (674 → 5) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,676 +1,17 @@
|
||||
"""
|
||||
Antizipations-Engine fuer proaktive Vorschlaege (Phase 8b).
|
||||
|
||||
Die Engine sammelt Signale aus verschiedenen Quellen und generiert
|
||||
kontextbasierte Vorschlaege fuer Lehrer basierend auf definierten Regeln.
|
||||
|
||||
Architektur:
|
||||
1. SignalCollector - Sammelt Inputs (Zeit, Nutzung, Events)
|
||||
2. RuleEngine - Evaluiert Regeln gegen Signale
|
||||
3. SuggestionGenerator - Generiert priorisierte Vorschlaege
|
||||
Barrel re-export: all public symbols for backward compatibility.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Dict, Any, Optional
|
||||
from enum import Enum
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ==================== Enums & Types ====================
|
||||
|
||||
class SuggestionTone(str, Enum):
|
||||
"""Ton/Dringlichkeit eines Vorschlags."""
|
||||
HINT = "hint" # Sanfter Hinweis
|
||||
SUGGESTION = "suggestion" # Aktiver Vorschlag
|
||||
REMINDER = "reminder" # Erinnerung
|
||||
URGENT = "urgent" # Dringend
|
||||
|
||||
|
||||
class ContextType(str, Enum):
|
||||
"""Typ eines aktiven Kontexts."""
|
||||
EVENT_WINDOW = "event_window" # Event steht bevor
|
||||
ROUTINE = "routine" # Routine heute
|
||||
PHASE = "phase" # Makro-Phase bedingt
|
||||
TIME = "time" # Zeitbasiert (Ferien, Wochenende)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Signal:
|
||||
"""Ein einzelnes Signal aus einer Quelle."""
|
||||
name: str
|
||||
value: Any
|
||||
source: str # "calendar", "usage", "events", "routines"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ActiveContext:
|
||||
"""Ein aktiver Kontext der Vorschlaege beeinflusst."""
|
||||
id: str
|
||||
context_type: ContextType
|
||||
label: str
|
||||
data: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Suggestion:
|
||||
"""Ein generierter Vorschlag."""
|
||||
id: str
|
||||
title: str
|
||||
description: str
|
||||
tone: SuggestionTone
|
||||
action_url: Optional[str] = None
|
||||
badge: Optional[str] = None # z.B. "in 7 Tagen"
|
||||
priority: int = 50 # 0-100, hoeher = wichtiger
|
||||
rule_id: str = ""
|
||||
icon: str = "lightbulb"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Signals:
|
||||
"""Container fuer alle gesammelten Signale."""
|
||||
# Zeit/Kalender
|
||||
current_week: int = 1
|
||||
weeks_since_start: int = 0
|
||||
is_weekend: bool = False
|
||||
is_before_holidays: bool = False
|
||||
days_until_holidays: int = 999
|
||||
|
||||
# Makro-Phase
|
||||
macro_phase: str = "onboarding"
|
||||
onboarding_completed: bool = False
|
||||
|
||||
# Produktnutzung
|
||||
classes_count: int = 0
|
||||
has_classes: bool = False
|
||||
has_schedule: bool = False
|
||||
|
||||
# Events
|
||||
exams_scheduled_count: int = 0
|
||||
exams_in_7_days: List[Dict] = field(default_factory=list)
|
||||
exams_past_ungraded: List[Dict] = field(default_factory=list)
|
||||
upcoming_events: List[Dict] = field(default_factory=list)
|
||||
trips_in_30_days: List[Dict] = field(default_factory=list)
|
||||
parent_evenings_soon: List[Dict] = field(default_factory=list)
|
||||
|
||||
# Routinen
|
||||
routines_today: List[Dict] = field(default_factory=list)
|
||||
has_conference_today: bool = False
|
||||
|
||||
# Statistiken (aus Analytics)
|
||||
corrections_pending: int = 0
|
||||
grades_completion_ratio: float = 0.0
|
||||
|
||||
|
||||
# ==================== Signal Collector ====================
|
||||
|
||||
class SignalCollector:
|
||||
"""
|
||||
Sammelt Signale aus verschiedenen Quellen.
|
||||
|
||||
Quellen:
|
||||
- TeacherContext (Makro-Phase, Schuljahr)
|
||||
- SchoolyearEvents (Klausuren, Elternabende, etc.)
|
||||
- RecurringRoutines (Konferenzen heute)
|
||||
- Zeit/Kalender (Wochenende, Ferien)
|
||||
"""
|
||||
|
||||
def __init__(self, db_session=None):
|
||||
self.db = db_session
|
||||
|
||||
def collect(self, teacher_id: str) -> Signals:
|
||||
"""Sammelt alle Signale fuer einen Lehrer."""
|
||||
signals = Signals()
|
||||
|
||||
# Zeit-Signale
|
||||
self._collect_time_signals(signals)
|
||||
|
||||
if self.db:
|
||||
# Kontext-Signale
|
||||
self._collect_context_signals(signals, teacher_id)
|
||||
# Event-Signale
|
||||
self._collect_event_signals(signals, teacher_id)
|
||||
# Routine-Signale
|
||||
self._collect_routine_signals(signals, teacher_id)
|
||||
|
||||
return signals
|
||||
|
||||
def _collect_time_signals(self, signals: Signals):
|
||||
"""Sammelt zeitbasierte Signale."""
|
||||
now = datetime.utcnow()
|
||||
signals.is_weekend = now.weekday() >= 5
|
||||
|
||||
# TODO: Ferien-Kalender pro Bundesland integrieren
|
||||
# Fuer jetzt: Dummy-Werte
|
||||
signals.is_before_holidays = False
|
||||
signals.days_until_holidays = 999
|
||||
|
||||
def _collect_context_signals(self, signals: Signals, teacher_id: str):
|
||||
"""Sammelt Signale aus dem Teacher-Kontext."""
|
||||
from .repository import TeacherContextRepository
|
||||
|
||||
try:
|
||||
repo = TeacherContextRepository(self.db)
|
||||
context = repo.get_or_create(teacher_id)
|
||||
|
||||
signals.macro_phase = context.macro_phase.value
|
||||
signals.current_week = context.current_week or 1
|
||||
signals.onboarding_completed = context.onboarding_completed
|
||||
signals.has_classes = context.has_classes
|
||||
signals.has_schedule = context.has_schedule
|
||||
signals.classes_count = 1 if context.has_classes else 0
|
||||
|
||||
# Wochen seit Schuljahresstart berechnen
|
||||
if context.schoolyear_start:
|
||||
delta = datetime.utcnow() - context.schoolyear_start
|
||||
signals.weeks_since_start = max(0, delta.days // 7)
|
||||
|
||||
signals.is_before_holidays = context.is_before_holidays
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect context signals: {e}")
|
||||
|
||||
def _collect_event_signals(self, signals: Signals, teacher_id: str):
|
||||
"""Sammelt Signale aus Events."""
|
||||
from .repository import SchoolyearEventRepository
|
||||
|
||||
try:
|
||||
repo = SchoolyearEventRepository(self.db)
|
||||
now = datetime.utcnow()
|
||||
|
||||
# Alle anstehenden Events (30 Tage)
|
||||
upcoming = repo.get_upcoming(teacher_id, days=30, limit=20)
|
||||
signals.upcoming_events = [repo.to_dict(e) for e in upcoming]
|
||||
|
||||
# Klausuren in den naechsten 7 Tagen
|
||||
seven_days = now + timedelta(days=7)
|
||||
signals.exams_in_7_days = [
|
||||
repo.to_dict(e) for e in upcoming
|
||||
if e.event_type.value == "exam" and e.start_date <= seven_days
|
||||
]
|
||||
signals.exams_scheduled_count = len([
|
||||
e for e in upcoming if e.event_type.value == "exam"
|
||||
])
|
||||
|
||||
# Klassenfahrten in 30 Tagen
|
||||
signals.trips_in_30_days = [
|
||||
repo.to_dict(e) for e in upcoming
|
||||
if e.event_type.value == "trip"
|
||||
]
|
||||
|
||||
# Elternabende bald
|
||||
signals.parent_evenings_soon = [
|
||||
repo.to_dict(e) for e in upcoming
|
||||
if e.event_type.value in ("parent_evening", "parent_consultation")
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect event signals: {e}")
|
||||
|
||||
def _collect_routine_signals(self, signals: Signals, teacher_id: str):
|
||||
"""Sammelt Signale aus Routinen."""
|
||||
from .repository import RecurringRoutineRepository
|
||||
|
||||
try:
|
||||
repo = RecurringRoutineRepository(self.db)
|
||||
today_routines = repo.get_today(teacher_id)
|
||||
|
||||
signals.routines_today = [repo.to_dict(r) for r in today_routines]
|
||||
signals.has_conference_today = any(
|
||||
r.routine_type.value in ("teacher_conference", "subject_conference")
|
||||
for r in today_routines
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect routine signals: {e}")
|
||||
|
||||
|
||||
# ==================== Rule Engine ====================
|
||||
|
||||
@dataclass
|
||||
class Rule:
|
||||
"""Eine Regel die Signale zu Vorschlaegen mappt."""
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
"""Evaluiert die Regel und gibt einen Vorschlag zurueck oder None."""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class R01_CreateClasses(Rule):
|
||||
"""Klassen anlegen wenn noch keine vorhanden."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R01",
|
||||
name="Klassen anlegen",
|
||||
description="Empfiehlt Klassen anzulegen bei neuem Lehrer"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.macro_phase == "onboarding" and not signals.has_classes:
|
||||
return Suggestion(
|
||||
id="suggest_create_classes",
|
||||
title="Klassen anlegen",
|
||||
description="Legen Sie Ihre Klassen an, um den vollen Funktionsumfang zu nutzen.",
|
||||
tone=SuggestionTone.HINT,
|
||||
priority=90,
|
||||
rule_id=self.id,
|
||||
icon="group_add",
|
||||
action_url="/classes/new",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R02_PrepareRubric(Rule):
|
||||
"""Erwartungshorizont erstellen wenn Klausur in 7 Tagen."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R02",
|
||||
name="Erwartungshorizont",
|
||||
description="Empfiehlt Erwartungshorizont vor Klausur"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.exams_in_7_days:
|
||||
exam = signals.exams_in_7_days[0]
|
||||
# Pruefen ob Vorbereitung noch nicht erledigt
|
||||
if not exam.get("preparation_done", False):
|
||||
days = 7 # Vereinfacht
|
||||
return Suggestion(
|
||||
id=f"suggest_rubric_{exam['id'][:8]}",
|
||||
title="Erwartungshorizont erstellen",
|
||||
description=f"Klausur '{exam['title']}' steht bevor.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
badge=f"in {days} Tagen",
|
||||
priority=80,
|
||||
rule_id=self.id,
|
||||
icon="assignment",
|
||||
action_url=f"/exams/{exam['id']}/rubric",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R03_StartCorrection(Rule):
|
||||
"""Korrektur starten nach Klausur."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R03",
|
||||
name="Korrektur starten",
|
||||
description="Empfiehlt Korrektur nach Klausur"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.exams_past_ungraded:
|
||||
exam = signals.exams_past_ungraded[0]
|
||||
return Suggestion(
|
||||
id=f"suggest_correction_{exam['id'][:8]}",
|
||||
title="Korrektur-Setup starten",
|
||||
description=f"Klausur '{exam['title']}' ist geschrieben.",
|
||||
tone=SuggestionTone.HINT,
|
||||
badge="bereit",
|
||||
priority=75,
|
||||
rule_id=self.id,
|
||||
icon="rate_review",
|
||||
action_url=f"/exams/{exam['id']}/correct",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R05_PrepareAgenda(Rule):
|
||||
"""Agenda vorbereiten wenn Konferenz heute."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R05",
|
||||
name="Konferenz-Agenda",
|
||||
description="Empfiehlt Agenda wenn Konferenz heute"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.has_conference_today:
|
||||
# Finde die Konferenz
|
||||
conf = next(
|
||||
(r for r in signals.routines_today
|
||||
if r.get("routine_type") in ("teacher_conference", "subject_conference")),
|
||||
None
|
||||
)
|
||||
if conf:
|
||||
return Suggestion(
|
||||
id="suggest_agenda",
|
||||
title="Konferenz-Agenda vorbereiten",
|
||||
description=f"{conf.get('title', 'Konferenz')} heute.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
badge="heute",
|
||||
priority=70,
|
||||
rule_id=self.id,
|
||||
icon="event_note",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R07_PlanFirstExam(Rule):
|
||||
"""Erste Klausur planen nach 4 Wochen."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R07",
|
||||
name="Erste Arbeit planen",
|
||||
description="Empfiehlt erste Klausur nach Anlaufphase"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if (signals.weeks_since_start >= 4 and
|
||||
signals.exams_scheduled_count == 0 and
|
||||
signals.has_classes):
|
||||
return Suggestion(
|
||||
id="suggest_first_exam",
|
||||
title="Erste Klassenarbeit planen",
|
||||
description="Nach 4 Wochen Unterricht ist ein guter Zeitpunkt fuer die erste Leistungsueberpruefung.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
priority=60,
|
||||
rule_id=self.id,
|
||||
icon="quiz",
|
||||
action_url="/exams/new",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R08_CorrectionMode(Rule):
|
||||
"""Korrekturmodus vor Ferien."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R08",
|
||||
name="Ferien-Korrekturmodus",
|
||||
description="Empfiehlt Korrekturen vor Ferien"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.is_before_holidays and signals.corrections_pending > 0:
|
||||
return Suggestion(
|
||||
id="suggest_correction_mode",
|
||||
title="Ferien-Korrekturmodus",
|
||||
description=f"{signals.corrections_pending} Korrekturen noch offen vor den Ferien.",
|
||||
tone=SuggestionTone.REMINDER,
|
||||
badge=f"{signals.days_until_holidays}d bis Ferien",
|
||||
priority=65,
|
||||
rule_id=self.id,
|
||||
icon="grading",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R09_TripChecklist(Rule):
|
||||
"""Klassenfahrt-Checkliste wenn Fahrt in 30 Tagen."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R09",
|
||||
name="Klassenfahrt-Checkliste",
|
||||
description="Empfiehlt Checkliste vor Klassenfahrt"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.trips_in_30_days:
|
||||
trip = signals.trips_in_30_days[0]
|
||||
return Suggestion(
|
||||
id=f"suggest_trip_{trip['id'][:8]}",
|
||||
title="Klassenfahrt-Checkliste",
|
||||
description=f"'{trip['title']}' steht bevor.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
badge="in 30 Tagen",
|
||||
priority=55,
|
||||
rule_id=self.id,
|
||||
icon="luggage",
|
||||
action_url=f"/trips/{trip['id']}/checklist",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R10_CompleteGrades(Rule):
|
||||
"""Noten vervollstaendigen vor Halbjahresende."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R10",
|
||||
name="Noten vervollstaendigen",
|
||||
description="Empfiehlt Noten vor Notenschluss"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if (signals.macro_phase in ("halbjahresabschluss", "jahresabschluss") and
|
||||
signals.grades_completion_ratio < 0.8):
|
||||
pct = int(signals.grades_completion_ratio * 100)
|
||||
return Suggestion(
|
||||
id="suggest_complete_grades",
|
||||
title="Noten vervollstaendigen",
|
||||
description=f"Nur {pct}% der Noten eingetragen. Notenschluss naht!",
|
||||
tone=SuggestionTone.REMINDER,
|
||||
priority=85,
|
||||
rule_id=self.id,
|
||||
icon="calculate",
|
||||
action_url="/grades",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R11_SetupSchedule(Rule):
|
||||
"""Stundenplan einrichten wenn noch nicht vorhanden."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R11",
|
||||
name="Stundenplan einrichten",
|
||||
description="Empfiehlt Stundenplan-Setup"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.macro_phase == "onboarding" and not signals.has_schedule:
|
||||
return Suggestion(
|
||||
id="suggest_setup_schedule",
|
||||
title="Stundenplan einrichten",
|
||||
description="Richten Sie Ihren Stundenplan ein fuer personalisierte Vorschlaege.",
|
||||
tone=SuggestionTone.HINT,
|
||||
priority=85,
|
||||
rule_id=self.id,
|
||||
icon="calendar_month",
|
||||
action_url="/schedule/setup",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R12_ParentEvening(Rule):
|
||||
"""Elternabend vorbereiten."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R12",
|
||||
name="Elternabend vorbereiten",
|
||||
description="Empfiehlt Vorbereitung vor Elternabend"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.parent_evenings_soon:
|
||||
event = signals.parent_evenings_soon[0]
|
||||
return Suggestion(
|
||||
id=f"suggest_parent_{event['id'][:8]}",
|
||||
title="Elternabend vorbereiten",
|
||||
description=f"'{event['title']}' steht bevor.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
badge="bald",
|
||||
priority=65,
|
||||
rule_id=self.id,
|
||||
icon="family_restroom",
|
||||
action_url=f"/events/{event['id']}/prepare",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class RuleEngine:
|
||||
"""
|
||||
Evaluiert alle Regeln gegen die gesammelten Signale.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.rules: List[Rule] = [
|
||||
R01_CreateClasses(),
|
||||
R02_PrepareRubric(),
|
||||
R03_StartCorrection(),
|
||||
R05_PrepareAgenda(),
|
||||
R07_PlanFirstExam(),
|
||||
R08_CorrectionMode(),
|
||||
R09_TripChecklist(),
|
||||
R10_CompleteGrades(),
|
||||
R11_SetupSchedule(),
|
||||
R12_ParentEvening(),
|
||||
]
|
||||
|
||||
def evaluate(self, signals: Signals) -> List[Suggestion]:
|
||||
"""Evaluiert alle Regeln und gibt passende Vorschlaege zurueck."""
|
||||
suggestions = []
|
||||
|
||||
for rule in self.rules:
|
||||
try:
|
||||
suggestion = rule.evaluate(signals)
|
||||
if suggestion:
|
||||
suggestions.append(suggestion)
|
||||
except Exception as e:
|
||||
logger.warning(f"Rule {rule.id} failed: {e}")
|
||||
|
||||
# Nach Prioritaet sortieren (hoechste zuerst)
|
||||
suggestions.sort(key=lambda s: s.priority, reverse=True)
|
||||
|
||||
return suggestions
|
||||
|
||||
|
||||
# ==================== Suggestion Generator ====================
|
||||
|
||||
class SuggestionGenerator:
|
||||
"""
|
||||
Hauptklasse die Signale sammelt, Regeln evaluiert und
|
||||
Vorschlaege generiert.
|
||||
"""
|
||||
|
||||
def __init__(self, db_session=None):
|
||||
self.collector = SignalCollector(db_session)
|
||||
self.rule_engine = RuleEngine()
|
||||
|
||||
def generate(self, teacher_id: str, limit: int = 5) -> Dict[str, Any]:
|
||||
"""
|
||||
Generiert Vorschlaege fuer einen Lehrer.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"active_contexts": [...],
|
||||
"suggestions": [...],
|
||||
"signals_summary": {...}
|
||||
}
|
||||
"""
|
||||
# 1. Signale sammeln
|
||||
signals = self.collector.collect(teacher_id)
|
||||
|
||||
# 2. Regeln evaluieren
|
||||
all_suggestions = self.rule_engine.evaluate(signals)
|
||||
|
||||
# 3. Aktive Kontexte bestimmen
|
||||
active_contexts = self._determine_active_contexts(signals)
|
||||
|
||||
# 4. Top N Vorschlaege
|
||||
top_suggestions = all_suggestions[:limit]
|
||||
|
||||
return {
|
||||
"active_contexts": [
|
||||
{
|
||||
"id": ctx.id,
|
||||
"type": ctx.context_type.value,
|
||||
"label": ctx.label,
|
||||
}
|
||||
for ctx in active_contexts
|
||||
],
|
||||
"suggestions": [
|
||||
{
|
||||
"id": s.id,
|
||||
"title": s.title,
|
||||
"description": s.description,
|
||||
"tone": s.tone.value,
|
||||
"badge": s.badge,
|
||||
"priority": s.priority,
|
||||
"icon": s.icon,
|
||||
"action_url": s.action_url,
|
||||
}
|
||||
for s in top_suggestions
|
||||
],
|
||||
"signals_summary": {
|
||||
"macro_phase": signals.macro_phase,
|
||||
"current_week": signals.current_week,
|
||||
"has_classes": signals.has_classes,
|
||||
"exams_soon": len(signals.exams_in_7_days),
|
||||
"routines_today": len(signals.routines_today),
|
||||
},
|
||||
"total_suggestions": len(all_suggestions),
|
||||
}
|
||||
|
||||
def _determine_active_contexts(self, signals: Signals) -> List[ActiveContext]:
|
||||
"""Bestimmt die aktiven Kontexte basierend auf Signalen."""
|
||||
contexts = []
|
||||
|
||||
# Event-Kontexte
|
||||
if signals.exams_in_7_days:
|
||||
contexts.append(ActiveContext(
|
||||
id="EXAM_IN_7_DAYS",
|
||||
context_type=ContextType.EVENT_WINDOW,
|
||||
label="Klausur in 7 Tagen",
|
||||
))
|
||||
|
||||
if signals.trips_in_30_days:
|
||||
contexts.append(ActiveContext(
|
||||
id="TRIP_UPCOMING",
|
||||
context_type=ContextType.EVENT_WINDOW,
|
||||
label="Klassenfahrt geplant",
|
||||
))
|
||||
|
||||
# Routine-Kontexte
|
||||
if signals.has_conference_today:
|
||||
contexts.append(ActiveContext(
|
||||
id="CONFERENCE_TODAY",
|
||||
context_type=ContextType.ROUTINE,
|
||||
label="Konferenz heute",
|
||||
))
|
||||
|
||||
# Zeit-Kontexte
|
||||
if signals.is_weekend:
|
||||
contexts.append(ActiveContext(
|
||||
id="WEEKEND",
|
||||
context_type=ContextType.TIME,
|
||||
label="Wochenende",
|
||||
))
|
||||
|
||||
if signals.is_before_holidays:
|
||||
contexts.append(ActiveContext(
|
||||
id="BEFORE_HOLIDAYS",
|
||||
context_type=ContextType.TIME,
|
||||
label="Vor den Ferien",
|
||||
))
|
||||
|
||||
# Phase-Kontexte
|
||||
if signals.macro_phase == "onboarding":
|
||||
contexts.append(ActiveContext(
|
||||
id="ONBOARDING",
|
||||
context_type=ContextType.PHASE,
|
||||
label="Einrichtung",
|
||||
))
|
||||
elif signals.macro_phase in ("halbjahresabschluss", "jahresabschluss"):
|
||||
contexts.append(ActiveContext(
|
||||
id="GRADE_PERIOD",
|
||||
context_type=ContextType.PHASE,
|
||||
label="Notenphase",
|
||||
))
|
||||
|
||||
return contexts
|
||||
from .antizipation_models import ( # noqa: F401
|
||||
SuggestionTone,
|
||||
ContextType,
|
||||
Signal,
|
||||
ActiveContext,
|
||||
Suggestion,
|
||||
Signals,
|
||||
)
|
||||
from .antizipation_collector import SignalCollector # noqa: F401
|
||||
from .antizipation_rules import RuleEngine # noqa: F401
|
||||
from .antizipation_generator import SuggestionGenerator # noqa: F401
|
||||
|
||||
131
backend-lehrer/classroom_engine/antizipation_collector.py
Normal file
131
backend-lehrer/classroom_engine/antizipation_collector.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""
|
||||
Antizipation Engine - Signal collector.
|
||||
|
||||
Sammelt Signale aus verschiedenen Quellen:
|
||||
- TeacherContext (Makro-Phase, Schuljahr)
|
||||
- SchoolyearEvents (Klausuren, Elternabende, etc.)
|
||||
- RecurringRoutines (Konferenzen heute)
|
||||
- Zeit/Kalender (Wochenende, Ferien)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from .antizipation_models import Signals
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SignalCollector:
|
||||
"""
|
||||
Sammelt Signale aus verschiedenen Quellen.
|
||||
"""
|
||||
|
||||
def __init__(self, db_session=None):
|
||||
self.db = db_session
|
||||
|
||||
def collect(self, teacher_id: str) -> Signals:
|
||||
"""Sammelt alle Signale fuer einen Lehrer."""
|
||||
signals = Signals()
|
||||
|
||||
# Zeit-Signale
|
||||
self._collect_time_signals(signals)
|
||||
|
||||
if self.db:
|
||||
# Kontext-Signale
|
||||
self._collect_context_signals(signals, teacher_id)
|
||||
# Event-Signale
|
||||
self._collect_event_signals(signals, teacher_id)
|
||||
# Routine-Signale
|
||||
self._collect_routine_signals(signals, teacher_id)
|
||||
|
||||
return signals
|
||||
|
||||
def _collect_time_signals(self, signals: Signals):
|
||||
"""Sammelt zeitbasierte Signale."""
|
||||
now = datetime.utcnow()
|
||||
signals.is_weekend = now.weekday() >= 5
|
||||
|
||||
# TODO: Ferien-Kalender pro Bundesland integrieren
|
||||
# Fuer jetzt: Dummy-Werte
|
||||
signals.is_before_holidays = False
|
||||
signals.days_until_holidays = 999
|
||||
|
||||
def _collect_context_signals(self, signals: Signals, teacher_id: str):
|
||||
"""Sammelt Signale aus dem Teacher-Kontext."""
|
||||
from .repository import TeacherContextRepository
|
||||
|
||||
try:
|
||||
repo = TeacherContextRepository(self.db)
|
||||
context = repo.get_or_create(teacher_id)
|
||||
|
||||
signals.macro_phase = context.macro_phase.value
|
||||
signals.current_week = context.current_week or 1
|
||||
signals.onboarding_completed = context.onboarding_completed
|
||||
signals.has_classes = context.has_classes
|
||||
signals.has_schedule = context.has_schedule
|
||||
signals.classes_count = 1 if context.has_classes else 0
|
||||
|
||||
# Wochen seit Schuljahresstart berechnen
|
||||
if context.schoolyear_start:
|
||||
delta = datetime.utcnow() - context.schoolyear_start
|
||||
signals.weeks_since_start = max(0, delta.days // 7)
|
||||
|
||||
signals.is_before_holidays = context.is_before_holidays
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect context signals: {e}")
|
||||
|
||||
def _collect_event_signals(self, signals: Signals, teacher_id: str):
|
||||
"""Sammelt Signale aus Events."""
|
||||
from .repository import SchoolyearEventRepository
|
||||
|
||||
try:
|
||||
repo = SchoolyearEventRepository(self.db)
|
||||
now = datetime.utcnow()
|
||||
|
||||
# Alle anstehenden Events (30 Tage)
|
||||
upcoming = repo.get_upcoming(teacher_id, days=30, limit=20)
|
||||
signals.upcoming_events = [repo.to_dict(e) for e in upcoming]
|
||||
|
||||
# Klausuren in den naechsten 7 Tagen
|
||||
seven_days = now + timedelta(days=7)
|
||||
signals.exams_in_7_days = [
|
||||
repo.to_dict(e) for e in upcoming
|
||||
if e.event_type.value == "exam" and e.start_date <= seven_days
|
||||
]
|
||||
signals.exams_scheduled_count = len([
|
||||
e for e in upcoming if e.event_type.value == "exam"
|
||||
])
|
||||
|
||||
# Klassenfahrten in 30 Tagen
|
||||
signals.trips_in_30_days = [
|
||||
repo.to_dict(e) for e in upcoming
|
||||
if e.event_type.value == "trip"
|
||||
]
|
||||
|
||||
# Elternabende bald
|
||||
signals.parent_evenings_soon = [
|
||||
repo.to_dict(e) for e in upcoming
|
||||
if e.event_type.value in ("parent_evening", "parent_consultation")
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect event signals: {e}")
|
||||
|
||||
def _collect_routine_signals(self, signals: Signals, teacher_id: str):
|
||||
"""Sammelt Signale aus Routinen."""
|
||||
from .repository import RecurringRoutineRepository
|
||||
|
||||
try:
|
||||
repo = RecurringRoutineRepository(self.db)
|
||||
today_routines = repo.get_today(teacher_id)
|
||||
|
||||
signals.routines_today = [repo.to_dict(r) for r in today_routines]
|
||||
signals.has_conference_today = any(
|
||||
r.routine_type.value in ("teacher_conference", "subject_conference")
|
||||
for r in today_routines
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect routine signals: {e}")
|
||||
136
backend-lehrer/classroom_engine/antizipation_generator.py
Normal file
136
backend-lehrer/classroom_engine/antizipation_generator.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""
|
||||
Antizipation Engine - SuggestionGenerator.
|
||||
|
||||
Main class that collects signals, evaluates rules, and generates
|
||||
prioritized suggestions for teachers.
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from .antizipation_models import Signals, ActiveContext, ContextType
|
||||
from .antizipation_collector import SignalCollector
|
||||
from .antizipation_rules import RuleEngine
|
||||
|
||||
|
||||
class SuggestionGenerator:
|
||||
"""
|
||||
Hauptklasse die Signale sammelt, Regeln evaluiert und
|
||||
Vorschlaege generiert.
|
||||
"""
|
||||
|
||||
def __init__(self, db_session=None):
|
||||
self.collector = SignalCollector(db_session)
|
||||
self.rule_engine = RuleEngine()
|
||||
|
||||
def generate(self, teacher_id: str, limit: int = 5) -> Dict[str, Any]:
|
||||
"""
|
||||
Generiert Vorschlaege fuer einen Lehrer.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"active_contexts": [...],
|
||||
"suggestions": [...],
|
||||
"signals_summary": {...}
|
||||
}
|
||||
"""
|
||||
# 1. Signale sammeln
|
||||
signals = self.collector.collect(teacher_id)
|
||||
|
||||
# 2. Regeln evaluieren
|
||||
all_suggestions = self.rule_engine.evaluate(signals)
|
||||
|
||||
# 3. Aktive Kontexte bestimmen
|
||||
active_contexts = self._determine_active_contexts(signals)
|
||||
|
||||
# 4. Top N Vorschlaege
|
||||
top_suggestions = all_suggestions[:limit]
|
||||
|
||||
return {
|
||||
"active_contexts": [
|
||||
{
|
||||
"id": ctx.id,
|
||||
"type": ctx.context_type.value,
|
||||
"label": ctx.label,
|
||||
}
|
||||
for ctx in active_contexts
|
||||
],
|
||||
"suggestions": [
|
||||
{
|
||||
"id": s.id,
|
||||
"title": s.title,
|
||||
"description": s.description,
|
||||
"tone": s.tone.value,
|
||||
"badge": s.badge,
|
||||
"priority": s.priority,
|
||||
"icon": s.icon,
|
||||
"action_url": s.action_url,
|
||||
}
|
||||
for s in top_suggestions
|
||||
],
|
||||
"signals_summary": {
|
||||
"macro_phase": signals.macro_phase,
|
||||
"current_week": signals.current_week,
|
||||
"has_classes": signals.has_classes,
|
||||
"exams_soon": len(signals.exams_in_7_days),
|
||||
"routines_today": len(signals.routines_today),
|
||||
},
|
||||
"total_suggestions": len(all_suggestions),
|
||||
}
|
||||
|
||||
def _determine_active_contexts(self, signals: Signals) -> List[ActiveContext]:
|
||||
"""Bestimmt die aktiven Kontexte basierend auf Signalen."""
|
||||
contexts = []
|
||||
|
||||
# Event-Kontexte
|
||||
if signals.exams_in_7_days:
|
||||
contexts.append(ActiveContext(
|
||||
id="EXAM_IN_7_DAYS",
|
||||
context_type=ContextType.EVENT_WINDOW,
|
||||
label="Klausur in 7 Tagen",
|
||||
))
|
||||
|
||||
if signals.trips_in_30_days:
|
||||
contexts.append(ActiveContext(
|
||||
id="TRIP_UPCOMING",
|
||||
context_type=ContextType.EVENT_WINDOW,
|
||||
label="Klassenfahrt geplant",
|
||||
))
|
||||
|
||||
# Routine-Kontexte
|
||||
if signals.has_conference_today:
|
||||
contexts.append(ActiveContext(
|
||||
id="CONFERENCE_TODAY",
|
||||
context_type=ContextType.ROUTINE,
|
||||
label="Konferenz heute",
|
||||
))
|
||||
|
||||
# Zeit-Kontexte
|
||||
if signals.is_weekend:
|
||||
contexts.append(ActiveContext(
|
||||
id="WEEKEND",
|
||||
context_type=ContextType.TIME,
|
||||
label="Wochenende",
|
||||
))
|
||||
|
||||
if signals.is_before_holidays:
|
||||
contexts.append(ActiveContext(
|
||||
id="BEFORE_HOLIDAYS",
|
||||
context_type=ContextType.TIME,
|
||||
label="Vor den Ferien",
|
||||
))
|
||||
|
||||
# Phase-Kontexte
|
||||
if signals.macro_phase == "onboarding":
|
||||
contexts.append(ActiveContext(
|
||||
id="ONBOARDING",
|
||||
context_type=ContextType.PHASE,
|
||||
label="Einrichtung",
|
||||
))
|
||||
elif signals.macro_phase in ("halbjahresabschluss", "jahresabschluss"):
|
||||
contexts.append(ActiveContext(
|
||||
id="GRADE_PERIOD",
|
||||
context_type=ContextType.PHASE,
|
||||
label="Notenphase",
|
||||
))
|
||||
|
||||
return contexts
|
||||
93
backend-lehrer/classroom_engine/antizipation_models.py
Normal file
93
backend-lehrer/classroom_engine/antizipation_models.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""
|
||||
Antizipation Engine - Data models, enums, and signal container.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Any, Optional
|
||||
from enum import Enum
|
||||
|
||||
|
||||
# ==================== Enums & Types ====================
|
||||
|
||||
class SuggestionTone(str, Enum):
|
||||
"""Ton/Dringlichkeit eines Vorschlags."""
|
||||
HINT = "hint" # Sanfter Hinweis
|
||||
SUGGESTION = "suggestion" # Aktiver Vorschlag
|
||||
REMINDER = "reminder" # Erinnerung
|
||||
URGENT = "urgent" # Dringend
|
||||
|
||||
|
||||
class ContextType(str, Enum):
|
||||
"""Typ eines aktiven Kontexts."""
|
||||
EVENT_WINDOW = "event_window" # Event steht bevor
|
||||
ROUTINE = "routine" # Routine heute
|
||||
PHASE = "phase" # Makro-Phase bedingt
|
||||
TIME = "time" # Zeitbasiert (Ferien, Wochenende)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Signal:
|
||||
"""Ein einzelnes Signal aus einer Quelle."""
|
||||
name: str
|
||||
value: Any
|
||||
source: str # "calendar", "usage", "events", "routines"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ActiveContext:
|
||||
"""Ein aktiver Kontext der Vorschlaege beeinflusst."""
|
||||
id: str
|
||||
context_type: ContextType
|
||||
label: str
|
||||
data: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Suggestion:
|
||||
"""Ein generierter Vorschlag."""
|
||||
id: str
|
||||
title: str
|
||||
description: str
|
||||
tone: SuggestionTone
|
||||
action_url: Optional[str] = None
|
||||
badge: Optional[str] = None # z.B. "in 7 Tagen"
|
||||
priority: int = 50 # 0-100, hoeher = wichtiger
|
||||
rule_id: str = ""
|
||||
icon: str = "lightbulb"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Signals:
|
||||
"""Container fuer alle gesammelten Signale."""
|
||||
# Zeit/Kalender
|
||||
current_week: int = 1
|
||||
weeks_since_start: int = 0
|
||||
is_weekend: bool = False
|
||||
is_before_holidays: bool = False
|
||||
days_until_holidays: int = 999
|
||||
|
||||
# Makro-Phase
|
||||
macro_phase: str = "onboarding"
|
||||
onboarding_completed: bool = False
|
||||
|
||||
# Produktnutzung
|
||||
classes_count: int = 0
|
||||
has_classes: bool = False
|
||||
has_schedule: bool = False
|
||||
|
||||
# Events
|
||||
exams_scheduled_count: int = 0
|
||||
exams_in_7_days: List[Dict] = field(default_factory=list)
|
||||
exams_past_ungraded: List[Dict] = field(default_factory=list)
|
||||
upcoming_events: List[Dict] = field(default_factory=list)
|
||||
trips_in_30_days: List[Dict] = field(default_factory=list)
|
||||
parent_evenings_soon: List[Dict] = field(default_factory=list)
|
||||
|
||||
# Routinen
|
||||
routines_today: List[Dict] = field(default_factory=list)
|
||||
has_conference_today: bool = False
|
||||
|
||||
# Statistiken (aus Analytics)
|
||||
corrections_pending: int = 0
|
||||
grades_completion_ratio: float = 0.0
|
||||
340
backend-lehrer/classroom_engine/antizipation_rules.py
Normal file
340
backend-lehrer/classroom_engine/antizipation_rules.py
Normal file
@@ -0,0 +1,340 @@
|
||||
"""
|
||||
Antizipation Engine - Rule definitions and RuleEngine.
|
||||
|
||||
Each rule evaluates signals and optionally produces a Suggestion.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
from .antizipation_models import Signals, Suggestion, SuggestionTone
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ==================== Rule Base Class ====================
|
||||
|
||||
@dataclass
|
||||
class Rule:
|
||||
"""Eine Regel die Signale zu Vorschlaegen mappt."""
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
"""Evaluiert die Regel und gibt einen Vorschlag zurueck oder None."""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
# ==================== Rule Implementations ====================
|
||||
|
||||
class R01_CreateClasses(Rule):
|
||||
"""Klassen anlegen wenn noch keine vorhanden."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R01",
|
||||
name="Klassen anlegen",
|
||||
description="Empfiehlt Klassen anzulegen bei neuem Lehrer"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.macro_phase == "onboarding" and not signals.has_classes:
|
||||
return Suggestion(
|
||||
id="suggest_create_classes",
|
||||
title="Klassen anlegen",
|
||||
description="Legen Sie Ihre Klassen an, um den vollen Funktionsumfang zu nutzen.",
|
||||
tone=SuggestionTone.HINT,
|
||||
priority=90,
|
||||
rule_id=self.id,
|
||||
icon="group_add",
|
||||
action_url="/classes/new",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R02_PrepareRubric(Rule):
|
||||
"""Erwartungshorizont erstellen wenn Klausur in 7 Tagen."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R02",
|
||||
name="Erwartungshorizont",
|
||||
description="Empfiehlt Erwartungshorizont vor Klausur"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.exams_in_7_days:
|
||||
exam = signals.exams_in_7_days[0]
|
||||
# Pruefen ob Vorbereitung noch nicht erledigt
|
||||
if not exam.get("preparation_done", False):
|
||||
days = 7 # Vereinfacht
|
||||
return Suggestion(
|
||||
id=f"suggest_rubric_{exam['id'][:8]}",
|
||||
title="Erwartungshorizont erstellen",
|
||||
description=f"Klausur '{exam['title']}' steht bevor.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
badge=f"in {days} Tagen",
|
||||
priority=80,
|
||||
rule_id=self.id,
|
||||
icon="assignment",
|
||||
action_url=f"/exams/{exam['id']}/rubric",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R03_StartCorrection(Rule):
|
||||
"""Korrektur starten nach Klausur."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R03",
|
||||
name="Korrektur starten",
|
||||
description="Empfiehlt Korrektur nach Klausur"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.exams_past_ungraded:
|
||||
exam = signals.exams_past_ungraded[0]
|
||||
return Suggestion(
|
||||
id=f"suggest_correction_{exam['id'][:8]}",
|
||||
title="Korrektur-Setup starten",
|
||||
description=f"Klausur '{exam['title']}' ist geschrieben.",
|
||||
tone=SuggestionTone.HINT,
|
||||
badge="bereit",
|
||||
priority=75,
|
||||
rule_id=self.id,
|
||||
icon="rate_review",
|
||||
action_url=f"/exams/{exam['id']}/correct",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R05_PrepareAgenda(Rule):
|
||||
"""Agenda vorbereiten wenn Konferenz heute."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R05",
|
||||
name="Konferenz-Agenda",
|
||||
description="Empfiehlt Agenda wenn Konferenz heute"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.has_conference_today:
|
||||
# Finde die Konferenz
|
||||
conf = next(
|
||||
(r for r in signals.routines_today
|
||||
if r.get("routine_type") in ("teacher_conference", "subject_conference")),
|
||||
None
|
||||
)
|
||||
if conf:
|
||||
return Suggestion(
|
||||
id="suggest_agenda",
|
||||
title="Konferenz-Agenda vorbereiten",
|
||||
description=f"{conf.get('title', 'Konferenz')} heute.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
badge="heute",
|
||||
priority=70,
|
||||
rule_id=self.id,
|
||||
icon="event_note",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R07_PlanFirstExam(Rule):
|
||||
"""Erste Klausur planen nach 4 Wochen."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R07",
|
||||
name="Erste Arbeit planen",
|
||||
description="Empfiehlt erste Klausur nach Anlaufphase"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if (signals.weeks_since_start >= 4 and
|
||||
signals.exams_scheduled_count == 0 and
|
||||
signals.has_classes):
|
||||
return Suggestion(
|
||||
id="suggest_first_exam",
|
||||
title="Erste Klassenarbeit planen",
|
||||
description="Nach 4 Wochen Unterricht ist ein guter Zeitpunkt fuer die erste Leistungsueberpruefung.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
priority=60,
|
||||
rule_id=self.id,
|
||||
icon="quiz",
|
||||
action_url="/exams/new",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R08_CorrectionMode(Rule):
|
||||
"""Korrekturmodus vor Ferien."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R08",
|
||||
name="Ferien-Korrekturmodus",
|
||||
description="Empfiehlt Korrekturen vor Ferien"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.is_before_holidays and signals.corrections_pending > 0:
|
||||
return Suggestion(
|
||||
id="suggest_correction_mode",
|
||||
title="Ferien-Korrekturmodus",
|
||||
description=f"{signals.corrections_pending} Korrekturen noch offen vor den Ferien.",
|
||||
tone=SuggestionTone.REMINDER,
|
||||
badge=f"{signals.days_until_holidays}d bis Ferien",
|
||||
priority=65,
|
||||
rule_id=self.id,
|
||||
icon="grading",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R09_TripChecklist(Rule):
|
||||
"""Klassenfahrt-Checkliste wenn Fahrt in 30 Tagen."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R09",
|
||||
name="Klassenfahrt-Checkliste",
|
||||
description="Empfiehlt Checkliste vor Klassenfahrt"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.trips_in_30_days:
|
||||
trip = signals.trips_in_30_days[0]
|
||||
return Suggestion(
|
||||
id=f"suggest_trip_{trip['id'][:8]}",
|
||||
title="Klassenfahrt-Checkliste",
|
||||
description=f"'{trip['title']}' steht bevor.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
badge="in 30 Tagen",
|
||||
priority=55,
|
||||
rule_id=self.id,
|
||||
icon="luggage",
|
||||
action_url=f"/trips/{trip['id']}/checklist",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R10_CompleteGrades(Rule):
|
||||
"""Noten vervollstaendigen vor Halbjahresende."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R10",
|
||||
name="Noten vervollstaendigen",
|
||||
description="Empfiehlt Noten vor Notenschluss"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if (signals.macro_phase in ("halbjahresabschluss", "jahresabschluss") and
|
||||
signals.grades_completion_ratio < 0.8):
|
||||
pct = int(signals.grades_completion_ratio * 100)
|
||||
return Suggestion(
|
||||
id="suggest_complete_grades",
|
||||
title="Noten vervollstaendigen",
|
||||
description=f"Nur {pct}% der Noten eingetragen. Notenschluss naht!",
|
||||
tone=SuggestionTone.REMINDER,
|
||||
priority=85,
|
||||
rule_id=self.id,
|
||||
icon="calculate",
|
||||
action_url="/grades",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R11_SetupSchedule(Rule):
|
||||
"""Stundenplan einrichten wenn noch nicht vorhanden."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R11",
|
||||
name="Stundenplan einrichten",
|
||||
description="Empfiehlt Stundenplan-Setup"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.macro_phase == "onboarding" and not signals.has_schedule:
|
||||
return Suggestion(
|
||||
id="suggest_setup_schedule",
|
||||
title="Stundenplan einrichten",
|
||||
description="Richten Sie Ihren Stundenplan ein fuer personalisierte Vorschlaege.",
|
||||
tone=SuggestionTone.HINT,
|
||||
priority=85,
|
||||
rule_id=self.id,
|
||||
icon="calendar_month",
|
||||
action_url="/schedule/setup",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class R12_ParentEvening(Rule):
|
||||
"""Elternabend vorbereiten."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="R12",
|
||||
name="Elternabend vorbereiten",
|
||||
description="Empfiehlt Vorbereitung vor Elternabend"
|
||||
)
|
||||
|
||||
def evaluate(self, signals: Signals) -> Optional[Suggestion]:
|
||||
if signals.parent_evenings_soon:
|
||||
event = signals.parent_evenings_soon[0]
|
||||
return Suggestion(
|
||||
id=f"suggest_parent_{event['id'][:8]}",
|
||||
title="Elternabend vorbereiten",
|
||||
description=f"'{event['title']}' steht bevor.",
|
||||
tone=SuggestionTone.SUGGESTION,
|
||||
badge="bald",
|
||||
priority=65,
|
||||
rule_id=self.id,
|
||||
icon="family_restroom",
|
||||
action_url=f"/events/{event['id']}/prepare",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# ==================== Rule Engine ====================
|
||||
|
||||
class RuleEngine:
|
||||
"""
|
||||
Evaluiert alle Regeln gegen die gesammelten Signale.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.rules: List[Rule] = [
|
||||
R01_CreateClasses(),
|
||||
R02_PrepareRubric(),
|
||||
R03_StartCorrection(),
|
||||
R05_PrepareAgenda(),
|
||||
R07_PlanFirstExam(),
|
||||
R08_CorrectionMode(),
|
||||
R09_TripChecklist(),
|
||||
R10_CompleteGrades(),
|
||||
R11_SetupSchedule(),
|
||||
R12_ParentEvening(),
|
||||
]
|
||||
|
||||
def evaluate(self, signals: Signals) -> List[Suggestion]:
|
||||
"""Evaluiert alle Regeln und gibt passende Vorschlaege zurueck."""
|
||||
suggestions = []
|
||||
|
||||
for rule in self.rules:
|
||||
try:
|
||||
suggestion = rule.evaluate(signals)
|
||||
if suggestion:
|
||||
suggestions.append(suggestion)
|
||||
except Exception as e:
|
||||
logger.warning(f"Rule {rule.id} failed: {e}")
|
||||
|
||||
# Nach Prioritaet sortieren (hoechste zuerst)
|
||||
suggestions.sort(key=lambda s: s.priority, reverse=True)
|
||||
|
||||
return suggestions
|
||||
@@ -1,683 +1,23 @@
|
||||
"""
|
||||
Correction API - REST API für Klassenarbeits-Korrektur.
|
||||
Correction API - REST API fuer Klassenarbeits-Korrektur.
|
||||
|
||||
Workflow:
|
||||
1. Upload: Gescannte Klassenarbeit hochladen
|
||||
2. OCR: Text aus Handschrift extrahieren
|
||||
3. Analyse: Antworten analysieren und bewerten
|
||||
4. Feedback: KI-generiertes Feedback erstellen
|
||||
5. Export: Korrigierte Arbeit als PDF exportieren
|
||||
|
||||
Integriert:
|
||||
- FileProcessor für OCR
|
||||
- PDFService für Export
|
||||
- LLM für Analyse und Feedback
|
||||
Barrel re-export: router and all public symbols.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Any, Optional
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import APIRouter, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
# FileProcessor requires OpenCV with libGL - make optional for CI
|
||||
try:
|
||||
from services.file_processor import FileProcessor, ProcessingResult
|
||||
_ocr_available = True
|
||||
except (ImportError, OSError):
|
||||
FileProcessor = None # type: ignore
|
||||
ProcessingResult = None # type: ignore
|
||||
_ocr_available = False
|
||||
|
||||
# PDF service requires WeasyPrint with system libraries - make optional for CI
|
||||
try:
|
||||
from services.pdf_service import PDFService, CorrectionData, StudentInfo
|
||||
_pdf_available = True
|
||||
except (ImportError, OSError):
|
||||
PDFService = None # type: ignore
|
||||
CorrectionData = None # type: ignore
|
||||
StudentInfo = None # type: ignore
|
||||
_pdf_available = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/corrections",
|
||||
tags=["corrections"],
|
||||
from correction_endpoints import router # noqa: F401
|
||||
from correction_models import ( # noqa: F401
|
||||
CorrectionStatus,
|
||||
AnswerEvaluation,
|
||||
CorrectionCreate,
|
||||
CorrectionUpdate,
|
||||
Correction,
|
||||
CorrectionResponse,
|
||||
OCRResponse,
|
||||
AnalysisResponse,
|
||||
)
|
||||
from correction_helpers import ( # noqa: F401
|
||||
corrections_store,
|
||||
calculate_grade,
|
||||
generate_ai_feedback,
|
||||
process_ocr,
|
||||
)
|
||||
|
||||
# Upload directory
|
||||
UPLOAD_DIR = Path("/tmp/corrections")
|
||||
UPLOAD_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Enums and Models
|
||||
# ============================================================================
|
||||
|
||||
class CorrectionStatus(str, Enum):
|
||||
"""Status einer Korrektur."""
|
||||
UPLOADED = "uploaded" # Datei hochgeladen
|
||||
PROCESSING = "processing" # OCR läuft
|
||||
OCR_COMPLETE = "ocr_complete" # OCR abgeschlossen
|
||||
ANALYZING = "analyzing" # Analyse läuft
|
||||
ANALYZED = "analyzed" # Analyse abgeschlossen
|
||||
REVIEWING = "reviewing" # Lehrkraft prüft
|
||||
COMPLETED = "completed" # Korrektur abgeschlossen
|
||||
ERROR = "error" # Fehler aufgetreten
|
||||
|
||||
|
||||
class AnswerEvaluation(BaseModel):
|
||||
"""Bewertung einer einzelnen Antwort."""
|
||||
question_number: int
|
||||
extracted_text: str
|
||||
points_possible: float
|
||||
points_awarded: float
|
||||
feedback: str
|
||||
is_correct: bool
|
||||
confidence: float # 0-1, wie sicher die OCR/Analyse ist
|
||||
|
||||
|
||||
class CorrectionCreate(BaseModel):
|
||||
"""Request zum Erstellen einer neuen Korrektur."""
|
||||
student_id: str
|
||||
student_name: str
|
||||
class_name: str
|
||||
exam_title: str
|
||||
subject: str
|
||||
max_points: float = Field(default=100.0, ge=0)
|
||||
expected_answers: Optional[Dict[str, str]] = None # Musterlösung
|
||||
|
||||
|
||||
class CorrectionUpdate(BaseModel):
|
||||
"""Request zum Aktualisieren einer Korrektur."""
|
||||
evaluations: Optional[List[AnswerEvaluation]] = None
|
||||
total_points: Optional[float] = None
|
||||
grade: Optional[str] = None
|
||||
teacher_notes: Optional[str] = None
|
||||
status: Optional[CorrectionStatus] = None
|
||||
|
||||
|
||||
class Correction(BaseModel):
|
||||
"""Eine Korrektur."""
|
||||
id: str
|
||||
student_id: str
|
||||
student_name: str
|
||||
class_name: str
|
||||
exam_title: str
|
||||
subject: str
|
||||
max_points: float
|
||||
total_points: float = 0.0
|
||||
percentage: float = 0.0
|
||||
grade: Optional[str] = None
|
||||
status: CorrectionStatus
|
||||
file_path: Optional[str] = None
|
||||
extracted_text: Optional[str] = None
|
||||
evaluations: List[AnswerEvaluation] = []
|
||||
teacher_notes: Optional[str] = None
|
||||
ai_feedback: Optional[str] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
|
||||
class CorrectionResponse(BaseModel):
|
||||
"""Response für eine Korrektur."""
|
||||
success: bool
|
||||
correction: Optional[Correction] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class OCRResponse(BaseModel):
|
||||
"""Response für OCR-Ergebnis."""
|
||||
success: bool
|
||||
extracted_text: Optional[str] = None
|
||||
regions: List[Dict[str, Any]] = []
|
||||
confidence: float = 0.0
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class AnalysisResponse(BaseModel):
|
||||
"""Response für Analyse-Ergebnis."""
|
||||
success: bool
|
||||
evaluations: List[AnswerEvaluation] = []
|
||||
total_points: float = 0.0
|
||||
percentage: float = 0.0
|
||||
suggested_grade: Optional[str] = None
|
||||
ai_feedback: Optional[str] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# In-Memory Storage (später durch DB ersetzen)
|
||||
# ============================================================================
|
||||
|
||||
_corrections: Dict[str, Correction] = {}
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Helper Functions
|
||||
# ============================================================================
|
||||
|
||||
def _calculate_grade(percentage: float) -> str:
|
||||
"""Berechnet Note aus Prozent (deutsches System)."""
|
||||
if percentage >= 92:
|
||||
return "1"
|
||||
elif percentage >= 81:
|
||||
return "2"
|
||||
elif percentage >= 67:
|
||||
return "3"
|
||||
elif percentage >= 50:
|
||||
return "4"
|
||||
elif percentage >= 30:
|
||||
return "5"
|
||||
else:
|
||||
return "6"
|
||||
|
||||
|
||||
def _generate_ai_feedback(
|
||||
evaluations: List[AnswerEvaluation],
|
||||
total_points: float,
|
||||
max_points: float,
|
||||
subject: str
|
||||
) -> str:
|
||||
"""Generiert KI-Feedback basierend auf Bewertung."""
|
||||
# Ohne LLM: Einfaches Template-basiertes Feedback
|
||||
percentage = (total_points / max_points * 100) if max_points > 0 else 0
|
||||
correct_count = sum(1 for e in evaluations if e.is_correct)
|
||||
total_count = len(evaluations)
|
||||
|
||||
if percentage >= 90:
|
||||
intro = "Hervorragende Leistung!"
|
||||
elif percentage >= 75:
|
||||
intro = "Gute Arbeit!"
|
||||
elif percentage >= 60:
|
||||
intro = "Insgesamt eine solide Leistung."
|
||||
elif percentage >= 50:
|
||||
intro = "Die Arbeit zeigt Grundkenntnisse, aber es gibt Verbesserungsbedarf."
|
||||
else:
|
||||
intro = "Es sind deutliche Wissenslücken erkennbar."
|
||||
|
||||
# Finde Verbesserungsbereiche
|
||||
weak_areas = [e for e in evaluations if not e.is_correct]
|
||||
strengths = [e for e in evaluations if e.is_correct and e.confidence > 0.8]
|
||||
|
||||
feedback_parts = [intro]
|
||||
|
||||
if strengths:
|
||||
feedback_parts.append(
|
||||
f"Besonders gut gelöst: Aufgabe(n) {', '.join(str(s.question_number) for s in strengths[:3])}."
|
||||
)
|
||||
|
||||
if weak_areas:
|
||||
feedback_parts.append(
|
||||
f"Übungsbedarf bei: Aufgabe(n) {', '.join(str(w.question_number) for w in weak_areas[:3])}."
|
||||
)
|
||||
|
||||
feedback_parts.append(
|
||||
f"Ergebnis: {correct_count} von {total_count} Aufgaben korrekt ({percentage:.1f}%)."
|
||||
)
|
||||
|
||||
return " ".join(feedback_parts)
|
||||
|
||||
|
||||
async def _process_ocr(correction_id: str, file_path: str):
|
||||
"""Background Task für OCR-Verarbeitung."""
|
||||
correction = _corrections.get(correction_id)
|
||||
if not correction:
|
||||
return
|
||||
|
||||
try:
|
||||
correction.status = CorrectionStatus.PROCESSING
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
# OCR durchführen
|
||||
processor = FileProcessor()
|
||||
result = processor.process_file(file_path)
|
||||
|
||||
if result.success and result.text:
|
||||
correction.extracted_text = result.text
|
||||
correction.status = CorrectionStatus.OCR_COMPLETE
|
||||
else:
|
||||
correction.status = CorrectionStatus.ERROR
|
||||
|
||||
correction.updated_at = datetime.utcnow()
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"OCR error for {correction_id}: {e}")
|
||||
correction.status = CorrectionStatus.ERROR
|
||||
correction.updated_at = datetime.utcnow()
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# API Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/", response_model=CorrectionResponse)
|
||||
async def create_correction(data: CorrectionCreate):
|
||||
"""
|
||||
Erstellt eine neue Korrektur.
|
||||
|
||||
Noch ohne Datei - diese wird separat hochgeladen.
|
||||
"""
|
||||
correction_id = str(uuid.uuid4())
|
||||
now = datetime.utcnow()
|
||||
|
||||
correction = Correction(
|
||||
id=correction_id,
|
||||
student_id=data.student_id,
|
||||
student_name=data.student_name,
|
||||
class_name=data.class_name,
|
||||
exam_title=data.exam_title,
|
||||
subject=data.subject,
|
||||
max_points=data.max_points,
|
||||
status=CorrectionStatus.UPLOADED,
|
||||
created_at=now,
|
||||
updated_at=now
|
||||
)
|
||||
|
||||
_corrections[correction_id] = correction
|
||||
logger.info(f"Created correction {correction_id} for {data.student_name}")
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
|
||||
@router.post("/{correction_id}/upload", response_model=CorrectionResponse)
|
||||
async def upload_exam(
|
||||
correction_id: str,
|
||||
background_tasks: BackgroundTasks,
|
||||
file: UploadFile = File(...)
|
||||
):
|
||||
"""
|
||||
Lädt gescannte Klassenarbeit hoch und startet OCR.
|
||||
|
||||
Unterstützte Formate: PDF, PNG, JPG, JPEG
|
||||
"""
|
||||
correction = _corrections.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
# Validiere Dateiformat
|
||||
allowed_extensions = {".pdf", ".png", ".jpg", ".jpeg"}
|
||||
file_ext = Path(file.filename).suffix.lower() if file.filename else ""
|
||||
|
||||
if file_ext not in allowed_extensions:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Ungültiges Dateiformat. Erlaubt: {', '.join(allowed_extensions)}"
|
||||
)
|
||||
|
||||
# Speichere Datei
|
||||
file_path = UPLOAD_DIR / f"{correction_id}{file_ext}"
|
||||
|
||||
try:
|
||||
content = await file.read()
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(content)
|
||||
|
||||
correction.file_path = str(file_path)
|
||||
correction.updated_at = datetime.utcnow()
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
# Starte OCR im Hintergrund
|
||||
background_tasks.add_task(_process_ocr, correction_id, str(file_path))
|
||||
|
||||
logger.info(f"Uploaded file for correction {correction_id}: {file.filename}")
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Upload error: {e}")
|
||||
return CorrectionResponse(success=False, error=str(e))
|
||||
|
||||
|
||||
@router.get("/{correction_id}", response_model=CorrectionResponse)
|
||||
async def get_correction(correction_id: str):
|
||||
"""Ruft eine Korrektur ab."""
|
||||
correction = _corrections.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
|
||||
@router.get("/", response_model=Dict[str, Any])
|
||||
async def list_corrections(
|
||||
class_name: Optional[str] = None,
|
||||
status: Optional[CorrectionStatus] = None,
|
||||
limit: int = 50
|
||||
):
|
||||
"""Listet Korrekturen auf, optional gefiltert."""
|
||||
corrections = list(_corrections.values())
|
||||
|
||||
if class_name:
|
||||
corrections = [c for c in corrections if c.class_name == class_name]
|
||||
|
||||
if status:
|
||||
corrections = [c for c in corrections if c.status == status]
|
||||
|
||||
# Sortiere nach Erstellungsdatum (neueste zuerst)
|
||||
corrections.sort(key=lambda x: x.created_at, reverse=True)
|
||||
|
||||
return {
|
||||
"total": len(corrections),
|
||||
"corrections": [c.dict() for c in corrections[:limit]]
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{correction_id}/analyze", response_model=AnalysisResponse)
|
||||
async def analyze_correction(
|
||||
correction_id: str,
|
||||
expected_answers: Optional[Dict[str, str]] = None
|
||||
):
|
||||
"""
|
||||
Analysiert die extrahierten Antworten.
|
||||
|
||||
Optional mit Musterlösung für automatische Bewertung.
|
||||
"""
|
||||
correction = _corrections.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
if correction.status not in [CorrectionStatus.OCR_COMPLETE, CorrectionStatus.ANALYZED]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Korrektur im falschen Status: {correction.status}"
|
||||
)
|
||||
|
||||
if not correction.extracted_text:
|
||||
raise HTTPException(status_code=400, detail="Kein extrahierter Text vorhanden")
|
||||
|
||||
try:
|
||||
correction.status = CorrectionStatus.ANALYZING
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
# Einfache Analyse ohne LLM
|
||||
# Teile Text in Abschnitte (simuliert Aufgabenerkennung)
|
||||
text_parts = correction.extracted_text.split('\n\n')
|
||||
evaluations = []
|
||||
|
||||
for i, part in enumerate(text_parts[:10], start=1): # Max 10 Aufgaben
|
||||
if len(part.strip()) < 5:
|
||||
continue
|
||||
|
||||
# Simulierte Bewertung
|
||||
# In Produktion würde hier LLM-basierte Analyse stattfinden
|
||||
expected = expected_answers.get(str(i), "") if expected_answers else ""
|
||||
|
||||
# Einfacher Textvergleich (in Produktion: semantischer Vergleich)
|
||||
is_correct = bool(expected and expected.lower() in part.lower())
|
||||
points = correction.max_points / len(text_parts) if text_parts else 0
|
||||
|
||||
evaluation = AnswerEvaluation(
|
||||
question_number=i,
|
||||
extracted_text=part[:200], # Kürzen für Response
|
||||
points_possible=points,
|
||||
points_awarded=points if is_correct else points * 0.5, # Teilpunkte
|
||||
feedback=f"Antwort zu Aufgabe {i}" + (" korrekt." if is_correct else " mit Verbesserungsbedarf."),
|
||||
is_correct=is_correct,
|
||||
confidence=0.7 # Simulierte Confidence
|
||||
)
|
||||
evaluations.append(evaluation)
|
||||
|
||||
# Berechne Gesamtergebnis
|
||||
total_points = sum(e.points_awarded for e in evaluations)
|
||||
percentage = (total_points / correction.max_points * 100) if correction.max_points > 0 else 0
|
||||
suggested_grade = _calculate_grade(percentage)
|
||||
|
||||
# Generiere Feedback
|
||||
ai_feedback = _generate_ai_feedback(
|
||||
evaluations, total_points, correction.max_points, correction.subject
|
||||
)
|
||||
|
||||
# Aktualisiere Korrektur
|
||||
correction.evaluations = evaluations
|
||||
correction.total_points = total_points
|
||||
correction.percentage = percentage
|
||||
correction.grade = suggested_grade
|
||||
correction.ai_feedback = ai_feedback
|
||||
correction.status = CorrectionStatus.ANALYZED
|
||||
correction.updated_at = datetime.utcnow()
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
logger.info(f"Analysis complete for {correction_id}: {total_points}/{correction.max_points}")
|
||||
|
||||
return AnalysisResponse(
|
||||
success=True,
|
||||
evaluations=evaluations,
|
||||
total_points=total_points,
|
||||
percentage=percentage,
|
||||
suggested_grade=suggested_grade,
|
||||
ai_feedback=ai_feedback
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Analysis error: {e}")
|
||||
correction.status = CorrectionStatus.ERROR
|
||||
_corrections[correction_id] = correction
|
||||
return AnalysisResponse(success=False, error=str(e))
|
||||
|
||||
|
||||
@router.put("/{correction_id}", response_model=CorrectionResponse)
|
||||
async def update_correction(correction_id: str, data: CorrectionUpdate):
|
||||
"""
|
||||
Aktualisiert eine Korrektur.
|
||||
|
||||
Ermöglicht manuelle Anpassungen durch die Lehrkraft.
|
||||
"""
|
||||
correction = _corrections.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
if data.evaluations is not None:
|
||||
correction.evaluations = data.evaluations
|
||||
correction.total_points = sum(e.points_awarded for e in data.evaluations)
|
||||
correction.percentage = (
|
||||
correction.total_points / correction.max_points * 100
|
||||
) if correction.max_points > 0 else 0
|
||||
|
||||
if data.total_points is not None:
|
||||
correction.total_points = data.total_points
|
||||
correction.percentage = (
|
||||
data.total_points / correction.max_points * 100
|
||||
) if correction.max_points > 0 else 0
|
||||
|
||||
if data.grade is not None:
|
||||
correction.grade = data.grade
|
||||
|
||||
if data.teacher_notes is not None:
|
||||
correction.teacher_notes = data.teacher_notes
|
||||
|
||||
if data.status is not None:
|
||||
correction.status = data.status
|
||||
|
||||
correction.updated_at = datetime.utcnow()
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
|
||||
@router.post("/{correction_id}/complete", response_model=CorrectionResponse)
|
||||
async def complete_correction(correction_id: str):
|
||||
"""Markiert Korrektur als abgeschlossen."""
|
||||
correction = _corrections.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
correction.status = CorrectionStatus.COMPLETED
|
||||
correction.updated_at = datetime.utcnow()
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
logger.info(f"Correction {correction_id} completed: {correction.grade}")
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
|
||||
@router.get("/{correction_id}/export-pdf")
|
||||
async def export_correction_pdf(correction_id: str):
|
||||
"""
|
||||
Exportiert korrigierte Arbeit als PDF.
|
||||
|
||||
Enthält:
|
||||
- Originalscan
|
||||
- Bewertungen
|
||||
- Feedback
|
||||
- Gesamtergebnis
|
||||
"""
|
||||
correction = _corrections.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
try:
|
||||
pdf_service = PDFService()
|
||||
|
||||
# Erstelle CorrectionData
|
||||
correction_data = CorrectionData(
|
||||
student=StudentInfo(
|
||||
student_id=correction.student_id,
|
||||
name=correction.student_name,
|
||||
class_name=correction.class_name
|
||||
),
|
||||
exam_title=correction.exam_title,
|
||||
subject=correction.subject,
|
||||
date=correction.created_at.strftime("%d.%m.%Y"),
|
||||
max_points=correction.max_points,
|
||||
achieved_points=correction.total_points,
|
||||
grade=correction.grade or "",
|
||||
percentage=correction.percentage,
|
||||
corrections=[
|
||||
{
|
||||
"question": f"Aufgabe {e.question_number}",
|
||||
"answer": e.extracted_text,
|
||||
"points": f"{e.points_awarded}/{e.points_possible}",
|
||||
"feedback": e.feedback
|
||||
}
|
||||
for e in correction.evaluations
|
||||
],
|
||||
teacher_notes=correction.teacher_notes or "",
|
||||
ai_feedback=correction.ai_feedback or ""
|
||||
)
|
||||
|
||||
# Generiere PDF
|
||||
pdf_bytes = pdf_service.generate_correction_pdf(correction_data)
|
||||
|
||||
from fastapi.responses import Response
|
||||
|
||||
return Response(
|
||||
content=pdf_bytes,
|
||||
media_type="application/pdf",
|
||||
headers={
|
||||
"Content-Disposition": f'attachment; filename="korrektur_{correction.student_name}_{correction.exam_title}.pdf"'
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"PDF export error: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"PDF-Export fehlgeschlagen: {str(e)}")
|
||||
|
||||
|
||||
@router.delete("/{correction_id}")
|
||||
async def delete_correction(correction_id: str):
|
||||
"""Löscht eine Korrektur."""
|
||||
if correction_id not in _corrections:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
correction = _corrections[correction_id]
|
||||
|
||||
# Lösche auch die hochgeladene Datei
|
||||
if correction.file_path and os.path.exists(correction.file_path):
|
||||
try:
|
||||
os.remove(correction.file_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not delete file {correction.file_path}: {e}")
|
||||
|
||||
del _corrections[correction_id]
|
||||
logger.info(f"Deleted correction {correction_id}")
|
||||
|
||||
return {"status": "deleted", "id": correction_id}
|
||||
|
||||
|
||||
@router.get("/class/{class_name}/summary")
|
||||
async def get_class_summary(class_name: str):
|
||||
"""
|
||||
Gibt Zusammenfassung für eine Klasse zurück.
|
||||
|
||||
Enthält Statistiken über alle Korrekturen der Klasse.
|
||||
"""
|
||||
class_corrections = [
|
||||
c for c in _corrections.values()
|
||||
if c.class_name == class_name and c.status == CorrectionStatus.COMPLETED
|
||||
]
|
||||
|
||||
if not class_corrections:
|
||||
return {
|
||||
"class_name": class_name,
|
||||
"total_students": 0,
|
||||
"average_percentage": 0,
|
||||
"grade_distribution": {},
|
||||
"corrections": []
|
||||
}
|
||||
|
||||
# Berechne Statistiken
|
||||
percentages = [c.percentage for c in class_corrections]
|
||||
average_percentage = sum(percentages) / len(percentages) if percentages else 0
|
||||
|
||||
# Notenverteilung
|
||||
grade_distribution = {}
|
||||
for c in class_corrections:
|
||||
grade = c.grade or "?"
|
||||
grade_distribution[grade] = grade_distribution.get(grade, 0) + 1
|
||||
|
||||
return {
|
||||
"class_name": class_name,
|
||||
"total_students": len(class_corrections),
|
||||
"average_percentage": round(average_percentage, 1),
|
||||
"average_points": round(
|
||||
sum(c.total_points for c in class_corrections) / len(class_corrections), 1
|
||||
),
|
||||
"grade_distribution": grade_distribution,
|
||||
"corrections": [
|
||||
{
|
||||
"id": c.id,
|
||||
"student_name": c.student_name,
|
||||
"total_points": c.total_points,
|
||||
"percentage": c.percentage,
|
||||
"grade": c.grade
|
||||
}
|
||||
for c in sorted(class_corrections, key=lambda x: x.student_name)
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{correction_id}/ocr/retry", response_model=CorrectionResponse)
|
||||
async def retry_ocr(correction_id: str, background_tasks: BackgroundTasks):
|
||||
"""
|
||||
Wiederholt OCR-Verarbeitung.
|
||||
|
||||
Nützlich wenn erste Verarbeitung fehlgeschlagen ist.
|
||||
"""
|
||||
correction = _corrections.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
if not correction.file_path:
|
||||
raise HTTPException(status_code=400, detail="Keine Datei vorhanden")
|
||||
|
||||
if not os.path.exists(correction.file_path):
|
||||
raise HTTPException(status_code=400, detail="Datei nicht mehr vorhanden")
|
||||
|
||||
# Starte OCR erneut
|
||||
correction.status = CorrectionStatus.UPLOADED
|
||||
correction.extracted_text = None
|
||||
correction.updated_at = datetime.utcnow()
|
||||
_corrections[correction_id] = correction
|
||||
|
||||
background_tasks.add_task(_process_ocr, correction_id, correction.file_path)
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
474
backend-lehrer/correction_endpoints.py
Normal file
474
backend-lehrer/correction_endpoints.py
Normal file
@@ -0,0 +1,474 @@
|
||||
"""
|
||||
Correction API - REST endpoint handlers.
|
||||
|
||||
Workflow:
|
||||
1. Upload: Gescannte Klassenarbeit hochladen
|
||||
2. OCR: Text aus Handschrift extrahieren
|
||||
3. Analyse: Antworten analysieren und bewerten
|
||||
4. Feedback: KI-generiertes Feedback erstellen
|
||||
5. Export: Korrigierte Arbeit als PDF exportieren
|
||||
"""
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, Optional
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import APIRouter, HTTPException, UploadFile, File, BackgroundTasks
|
||||
|
||||
from correction_models import (
|
||||
CorrectionStatus,
|
||||
AnswerEvaluation,
|
||||
CorrectionCreate,
|
||||
CorrectionUpdate,
|
||||
Correction,
|
||||
CorrectionResponse,
|
||||
AnalysisResponse,
|
||||
UPLOAD_DIR,
|
||||
)
|
||||
from correction_helpers import (
|
||||
corrections_store,
|
||||
calculate_grade,
|
||||
generate_ai_feedback,
|
||||
process_ocr,
|
||||
PDFService,
|
||||
CorrectionData,
|
||||
StudentInfo,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/corrections",
|
||||
tags=["corrections"],
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# API Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.post("/", response_model=CorrectionResponse)
|
||||
async def create_correction(data: CorrectionCreate):
|
||||
"""
|
||||
Erstellt eine neue Korrektur.
|
||||
|
||||
Noch ohne Datei - diese wird separat hochgeladen.
|
||||
"""
|
||||
correction_id = str(uuid.uuid4())
|
||||
now = datetime.utcnow()
|
||||
|
||||
correction = Correction(
|
||||
id=correction_id,
|
||||
student_id=data.student_id,
|
||||
student_name=data.student_name,
|
||||
class_name=data.class_name,
|
||||
exam_title=data.exam_title,
|
||||
subject=data.subject,
|
||||
max_points=data.max_points,
|
||||
status=CorrectionStatus.UPLOADED,
|
||||
created_at=now,
|
||||
updated_at=now
|
||||
)
|
||||
|
||||
corrections_store[correction_id] = correction
|
||||
logger.info(f"Created correction {correction_id} for {data.student_name}")
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
|
||||
@router.post("/{correction_id}/upload", response_model=CorrectionResponse)
|
||||
async def upload_exam(
|
||||
correction_id: str,
|
||||
background_tasks: BackgroundTasks,
|
||||
file: UploadFile = File(...)
|
||||
):
|
||||
"""
|
||||
Laedt gescannte Klassenarbeit hoch und startet OCR.
|
||||
|
||||
Unterstuetzte Formate: PDF, PNG, JPG, JPEG
|
||||
"""
|
||||
correction = corrections_store.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
# Validiere Dateiformat
|
||||
allowed_extensions = {".pdf", ".png", ".jpg", ".jpeg"}
|
||||
file_ext = Path(file.filename).suffix.lower() if file.filename else ""
|
||||
|
||||
if file_ext not in allowed_extensions:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Ungueltiges Dateiformat. Erlaubt: {', '.join(allowed_extensions)}"
|
||||
)
|
||||
|
||||
# Speichere Datei
|
||||
file_path = UPLOAD_DIR / f"{correction_id}{file_ext}"
|
||||
|
||||
try:
|
||||
content = await file.read()
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(content)
|
||||
|
||||
correction.file_path = str(file_path)
|
||||
correction.updated_at = datetime.utcnow()
|
||||
corrections_store[correction_id] = correction
|
||||
|
||||
# Starte OCR im Hintergrund
|
||||
background_tasks.add_task(process_ocr, correction_id, str(file_path))
|
||||
|
||||
logger.info(f"Uploaded file for correction {correction_id}: {file.filename}")
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Upload error: {e}")
|
||||
return CorrectionResponse(success=False, error=str(e))
|
||||
|
||||
|
||||
@router.get("/{correction_id}", response_model=CorrectionResponse)
|
||||
async def get_correction(correction_id: str):
|
||||
"""Ruft eine Korrektur ab."""
|
||||
correction = corrections_store.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
|
||||
@router.get("/", response_model=Dict[str, Any])
|
||||
async def list_corrections(
|
||||
class_name: Optional[str] = None,
|
||||
status: Optional[CorrectionStatus] = None,
|
||||
limit: int = 50
|
||||
):
|
||||
"""Listet Korrekturen auf, optional gefiltert."""
|
||||
corrections = list(corrections_store.values())
|
||||
|
||||
if class_name:
|
||||
corrections = [c for c in corrections if c.class_name == class_name]
|
||||
|
||||
if status:
|
||||
corrections = [c for c in corrections if c.status == status]
|
||||
|
||||
# Sortiere nach Erstellungsdatum (neueste zuerst)
|
||||
corrections.sort(key=lambda x: x.created_at, reverse=True)
|
||||
|
||||
return {
|
||||
"total": len(corrections),
|
||||
"corrections": [c.dict() for c in corrections[:limit]]
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{correction_id}/analyze", response_model=AnalysisResponse)
|
||||
async def analyze_correction(
|
||||
correction_id: str,
|
||||
expected_answers: Optional[Dict[str, str]] = None
|
||||
):
|
||||
"""
|
||||
Analysiert die extrahierten Antworten.
|
||||
|
||||
Optional mit Musterloesung fuer automatische Bewertung.
|
||||
"""
|
||||
correction = corrections_store.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
if correction.status not in [CorrectionStatus.OCR_COMPLETE, CorrectionStatus.ANALYZED]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Korrektur im falschen Status: {correction.status}"
|
||||
)
|
||||
|
||||
if not correction.extracted_text:
|
||||
raise HTTPException(status_code=400, detail="Kein extrahierter Text vorhanden")
|
||||
|
||||
try:
|
||||
correction.status = CorrectionStatus.ANALYZING
|
||||
corrections_store[correction_id] = correction
|
||||
|
||||
# Einfache Analyse ohne LLM
|
||||
# Teile Text in Abschnitte (simuliert Aufgabenerkennung)
|
||||
text_parts = correction.extracted_text.split('\n\n')
|
||||
evaluations = []
|
||||
|
||||
for i, part in enumerate(text_parts[:10], start=1): # Max 10 Aufgaben
|
||||
if len(part.strip()) < 5:
|
||||
continue
|
||||
|
||||
# Simulierte Bewertung
|
||||
# In Produktion wuerde hier LLM-basierte Analyse stattfinden
|
||||
expected = expected_answers.get(str(i), "") if expected_answers else ""
|
||||
|
||||
# Einfacher Textvergleich (in Produktion: semantischer Vergleich)
|
||||
is_correct = bool(expected and expected.lower() in part.lower())
|
||||
points = correction.max_points / len(text_parts) if text_parts else 0
|
||||
|
||||
evaluation = AnswerEvaluation(
|
||||
question_number=i,
|
||||
extracted_text=part[:200], # Kuerzen fuer Response
|
||||
points_possible=points,
|
||||
points_awarded=points if is_correct else points * 0.5, # Teilpunkte
|
||||
feedback=f"Antwort zu Aufgabe {i}" + (" korrekt." if is_correct else " mit Verbesserungsbedarf."),
|
||||
is_correct=is_correct,
|
||||
confidence=0.7 # Simulierte Confidence
|
||||
)
|
||||
evaluations.append(evaluation)
|
||||
|
||||
# Berechne Gesamtergebnis
|
||||
total_points = sum(e.points_awarded for e in evaluations)
|
||||
percentage = (total_points / correction.max_points * 100) if correction.max_points > 0 else 0
|
||||
suggested_grade = calculate_grade(percentage)
|
||||
|
||||
# Generiere Feedback
|
||||
ai_feedback = generate_ai_feedback(
|
||||
evaluations, total_points, correction.max_points, correction.subject
|
||||
)
|
||||
|
||||
# Aktualisiere Korrektur
|
||||
correction.evaluations = evaluations
|
||||
correction.total_points = total_points
|
||||
correction.percentage = percentage
|
||||
correction.grade = suggested_grade
|
||||
correction.ai_feedback = ai_feedback
|
||||
correction.status = CorrectionStatus.ANALYZED
|
||||
correction.updated_at = datetime.utcnow()
|
||||
corrections_store[correction_id] = correction
|
||||
|
||||
logger.info(f"Analysis complete for {correction_id}: {total_points}/{correction.max_points}")
|
||||
|
||||
return AnalysisResponse(
|
||||
success=True,
|
||||
evaluations=evaluations,
|
||||
total_points=total_points,
|
||||
percentage=percentage,
|
||||
suggested_grade=suggested_grade,
|
||||
ai_feedback=ai_feedback
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Analysis error: {e}")
|
||||
correction.status = CorrectionStatus.ERROR
|
||||
corrections_store[correction_id] = correction
|
||||
return AnalysisResponse(success=False, error=str(e))
|
||||
|
||||
|
||||
@router.put("/{correction_id}", response_model=CorrectionResponse)
|
||||
async def update_correction(correction_id: str, data: CorrectionUpdate):
|
||||
"""
|
||||
Aktualisiert eine Korrektur.
|
||||
|
||||
Ermoeglicht manuelle Anpassungen durch die Lehrkraft.
|
||||
"""
|
||||
correction = corrections_store.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
if data.evaluations is not None:
|
||||
correction.evaluations = data.evaluations
|
||||
correction.total_points = sum(e.points_awarded for e in data.evaluations)
|
||||
correction.percentage = (
|
||||
correction.total_points / correction.max_points * 100
|
||||
) if correction.max_points > 0 else 0
|
||||
|
||||
if data.total_points is not None:
|
||||
correction.total_points = data.total_points
|
||||
correction.percentage = (
|
||||
data.total_points / correction.max_points * 100
|
||||
) if correction.max_points > 0 else 0
|
||||
|
||||
if data.grade is not None:
|
||||
correction.grade = data.grade
|
||||
|
||||
if data.teacher_notes is not None:
|
||||
correction.teacher_notes = data.teacher_notes
|
||||
|
||||
if data.status is not None:
|
||||
correction.status = data.status
|
||||
|
||||
correction.updated_at = datetime.utcnow()
|
||||
corrections_store[correction_id] = correction
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
|
||||
@router.post("/{correction_id}/complete", response_model=CorrectionResponse)
|
||||
async def complete_correction(correction_id: str):
|
||||
"""Markiert Korrektur als abgeschlossen."""
|
||||
correction = corrections_store.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
correction.status = CorrectionStatus.COMPLETED
|
||||
correction.updated_at = datetime.utcnow()
|
||||
corrections_store[correction_id] = correction
|
||||
|
||||
logger.info(f"Correction {correction_id} completed: {correction.grade}")
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
|
||||
|
||||
@router.get("/{correction_id}/export-pdf")
|
||||
async def export_correction_pdf(correction_id: str):
|
||||
"""
|
||||
Exportiert korrigierte Arbeit als PDF.
|
||||
|
||||
Enthaelt:
|
||||
- Originalscan
|
||||
- Bewertungen
|
||||
- Feedback
|
||||
- Gesamtergebnis
|
||||
"""
|
||||
correction = corrections_store.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
try:
|
||||
pdf_service = PDFService()
|
||||
|
||||
# Erstelle CorrectionData
|
||||
correction_data = CorrectionData(
|
||||
student=StudentInfo(
|
||||
student_id=correction.student_id,
|
||||
name=correction.student_name,
|
||||
class_name=correction.class_name
|
||||
),
|
||||
exam_title=correction.exam_title,
|
||||
subject=correction.subject,
|
||||
date=correction.created_at.strftime("%d.%m.%Y"),
|
||||
max_points=correction.max_points,
|
||||
achieved_points=correction.total_points,
|
||||
grade=correction.grade or "",
|
||||
percentage=correction.percentage,
|
||||
corrections=[
|
||||
{
|
||||
"question": f"Aufgabe {e.question_number}",
|
||||
"answer": e.extracted_text,
|
||||
"points": f"{e.points_awarded}/{e.points_possible}",
|
||||
"feedback": e.feedback
|
||||
}
|
||||
for e in correction.evaluations
|
||||
],
|
||||
teacher_notes=correction.teacher_notes or "",
|
||||
ai_feedback=correction.ai_feedback or ""
|
||||
)
|
||||
|
||||
# Generiere PDF
|
||||
pdf_bytes = pdf_service.generate_correction_pdf(correction_data)
|
||||
|
||||
from fastapi.responses import Response
|
||||
|
||||
return Response(
|
||||
content=pdf_bytes,
|
||||
media_type="application/pdf",
|
||||
headers={
|
||||
"Content-Disposition": f'attachment; filename="korrektur_{correction.student_name}_{correction.exam_title}.pdf"'
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"PDF export error: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"PDF-Export fehlgeschlagen: {str(e)}")
|
||||
|
||||
|
||||
@router.delete("/{correction_id}")
|
||||
async def delete_correction(correction_id: str):
|
||||
"""Loescht eine Korrektur."""
|
||||
if correction_id not in corrections_store:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
correction = corrections_store[correction_id]
|
||||
|
||||
# Loesche auch die hochgeladene Datei
|
||||
if correction.file_path and os.path.exists(correction.file_path):
|
||||
try:
|
||||
os.remove(correction.file_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not delete file {correction.file_path}: {e}")
|
||||
|
||||
del corrections_store[correction_id]
|
||||
logger.info(f"Deleted correction {correction_id}")
|
||||
|
||||
return {"status": "deleted", "id": correction_id}
|
||||
|
||||
|
||||
@router.get("/class/{class_name}/summary")
|
||||
async def get_class_summary(class_name: str):
|
||||
"""
|
||||
Gibt Zusammenfassung fuer eine Klasse zurueck.
|
||||
|
||||
Enthaelt Statistiken ueber alle Korrekturen der Klasse.
|
||||
"""
|
||||
class_corrections = [
|
||||
c for c in corrections_store.values()
|
||||
if c.class_name == class_name and c.status == CorrectionStatus.COMPLETED
|
||||
]
|
||||
|
||||
if not class_corrections:
|
||||
return {
|
||||
"class_name": class_name,
|
||||
"total_students": 0,
|
||||
"average_percentage": 0,
|
||||
"grade_distribution": {},
|
||||
"corrections": []
|
||||
}
|
||||
|
||||
# Berechne Statistiken
|
||||
percentages = [c.percentage for c in class_corrections]
|
||||
average_percentage = sum(percentages) / len(percentages) if percentages else 0
|
||||
|
||||
# Notenverteilung
|
||||
grade_distribution = {}
|
||||
for c in class_corrections:
|
||||
grade = c.grade or "?"
|
||||
grade_distribution[grade] = grade_distribution.get(grade, 0) + 1
|
||||
|
||||
return {
|
||||
"class_name": class_name,
|
||||
"total_students": len(class_corrections),
|
||||
"average_percentage": round(average_percentage, 1),
|
||||
"average_points": round(
|
||||
sum(c.total_points for c in class_corrections) / len(class_corrections), 1
|
||||
),
|
||||
"grade_distribution": grade_distribution,
|
||||
"corrections": [
|
||||
{
|
||||
"id": c.id,
|
||||
"student_name": c.student_name,
|
||||
"total_points": c.total_points,
|
||||
"percentage": c.percentage,
|
||||
"grade": c.grade
|
||||
}
|
||||
for c in sorted(class_corrections, key=lambda x: x.student_name)
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{correction_id}/ocr/retry", response_model=CorrectionResponse)
|
||||
async def retry_ocr(correction_id: str, background_tasks: BackgroundTasks):
|
||||
"""
|
||||
Wiederholt OCR-Verarbeitung.
|
||||
|
||||
Nuetzlich wenn erste Verarbeitung fehlgeschlagen ist.
|
||||
"""
|
||||
correction = corrections_store.get(correction_id)
|
||||
if not correction:
|
||||
raise HTTPException(status_code=404, detail="Korrektur nicht gefunden")
|
||||
|
||||
if not correction.file_path:
|
||||
raise HTTPException(status_code=400, detail="Keine Datei vorhanden")
|
||||
|
||||
if not os.path.exists(correction.file_path):
|
||||
raise HTTPException(status_code=400, detail="Datei nicht mehr vorhanden")
|
||||
|
||||
# Starte OCR erneut
|
||||
correction.status = CorrectionStatus.UPLOADED
|
||||
correction.extracted_text = None
|
||||
correction.updated_at = datetime.utcnow()
|
||||
corrections_store[correction_id] = correction
|
||||
|
||||
background_tasks.add_task(process_ocr, correction_id, correction.file_path)
|
||||
|
||||
return CorrectionResponse(success=True, correction=correction)
|
||||
134
backend-lehrer/correction_helpers.py
Normal file
134
backend-lehrer/correction_helpers.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""
|
||||
Correction API - Helper functions for grading, feedback, and OCR processing.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List, Dict
|
||||
|
||||
from correction_models import AnswerEvaluation, CorrectionStatus, Correction
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# FileProcessor requires OpenCV with libGL - make optional for CI
|
||||
try:
|
||||
from services.file_processor import FileProcessor, ProcessingResult
|
||||
_ocr_available = True
|
||||
except (ImportError, OSError):
|
||||
FileProcessor = None # type: ignore
|
||||
ProcessingResult = None # type: ignore
|
||||
_ocr_available = False
|
||||
|
||||
# PDF service requires WeasyPrint with system libraries - make optional for CI
|
||||
try:
|
||||
from services.pdf_service import PDFService, CorrectionData, StudentInfo
|
||||
_pdf_available = True
|
||||
except (ImportError, OSError):
|
||||
PDFService = None # type: ignore
|
||||
CorrectionData = None # type: ignore
|
||||
StudentInfo = None # type: ignore
|
||||
_pdf_available = False
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# In-Memory Storage (spaeter durch DB ersetzen)
|
||||
# ============================================================================
|
||||
|
||||
corrections_store: Dict[str, Correction] = {}
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Helper Functions
|
||||
# ============================================================================
|
||||
|
||||
def calculate_grade(percentage: float) -> str:
|
||||
"""Berechnet Note aus Prozent (deutsches System)."""
|
||||
if percentage >= 92:
|
||||
return "1"
|
||||
elif percentage >= 81:
|
||||
return "2"
|
||||
elif percentage >= 67:
|
||||
return "3"
|
||||
elif percentage >= 50:
|
||||
return "4"
|
||||
elif percentage >= 30:
|
||||
return "5"
|
||||
else:
|
||||
return "6"
|
||||
|
||||
|
||||
def generate_ai_feedback(
|
||||
evaluations: List[AnswerEvaluation],
|
||||
total_points: float,
|
||||
max_points: float,
|
||||
subject: str
|
||||
) -> str:
|
||||
"""Generiert KI-Feedback basierend auf Bewertung."""
|
||||
# Ohne LLM: Einfaches Template-basiertes Feedback
|
||||
percentage = (total_points / max_points * 100) if max_points > 0 else 0
|
||||
correct_count = sum(1 for e in evaluations if e.is_correct)
|
||||
total_count = len(evaluations)
|
||||
|
||||
if percentage >= 90:
|
||||
intro = "Hervorragende Leistung!"
|
||||
elif percentage >= 75:
|
||||
intro = "Gute Arbeit!"
|
||||
elif percentage >= 60:
|
||||
intro = "Insgesamt eine solide Leistung."
|
||||
elif percentage >= 50:
|
||||
intro = "Die Arbeit zeigt Grundkenntnisse, aber es gibt Verbesserungsbedarf."
|
||||
else:
|
||||
intro = "Es sind deutliche Wissensluecken erkennbar."
|
||||
|
||||
# Finde Verbesserungsbereiche
|
||||
weak_areas = [e for e in evaluations if not e.is_correct]
|
||||
strengths = [e for e in evaluations if e.is_correct and e.confidence > 0.8]
|
||||
|
||||
feedback_parts = [intro]
|
||||
|
||||
if strengths:
|
||||
feedback_parts.append(
|
||||
f"Besonders gut geloest: Aufgabe(n) {', '.join(str(s.question_number) for s in strengths[:3])}."
|
||||
)
|
||||
|
||||
if weak_areas:
|
||||
feedback_parts.append(
|
||||
f"Uebungsbedarf bei: Aufgabe(n) {', '.join(str(w.question_number) for w in weak_areas[:3])}."
|
||||
)
|
||||
|
||||
feedback_parts.append(
|
||||
f"Ergebnis: {correct_count} von {total_count} Aufgaben korrekt ({percentage:.1f}%)."
|
||||
)
|
||||
|
||||
return " ".join(feedback_parts)
|
||||
|
||||
|
||||
async def process_ocr(correction_id: str, file_path: str):
|
||||
"""Background Task fuer OCR-Verarbeitung."""
|
||||
from datetime import datetime
|
||||
|
||||
correction = corrections_store.get(correction_id)
|
||||
if not correction:
|
||||
return
|
||||
|
||||
try:
|
||||
correction.status = CorrectionStatus.PROCESSING
|
||||
corrections_store[correction_id] = correction
|
||||
|
||||
# OCR durchfuehren
|
||||
processor = FileProcessor()
|
||||
result = processor.process_file(file_path)
|
||||
|
||||
if result.success and result.text:
|
||||
correction.extracted_text = result.text
|
||||
correction.status = CorrectionStatus.OCR_COMPLETE
|
||||
else:
|
||||
correction.status = CorrectionStatus.ERROR
|
||||
|
||||
correction.updated_at = datetime.utcnow()
|
||||
corrections_store[correction_id] = correction
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"OCR error for {correction_id}: {e}")
|
||||
correction.status = CorrectionStatus.ERROR
|
||||
correction.updated_at = datetime.utcnow()
|
||||
corrections_store[correction_id] = correction
|
||||
111
backend-lehrer/correction_models.py
Normal file
111
backend-lehrer/correction_models.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""
|
||||
Correction API - Pydantic models and enums.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Any, Optional
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
# Upload directory
|
||||
UPLOAD_DIR = Path("/tmp/corrections")
|
||||
UPLOAD_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Enums and Models
|
||||
# ============================================================================
|
||||
|
||||
class CorrectionStatus(str, Enum):
|
||||
"""Status einer Korrektur."""
|
||||
UPLOADED = "uploaded" # Datei hochgeladen
|
||||
PROCESSING = "processing" # OCR laeuft
|
||||
OCR_COMPLETE = "ocr_complete" # OCR abgeschlossen
|
||||
ANALYZING = "analyzing" # Analyse laeuft
|
||||
ANALYZED = "analyzed" # Analyse abgeschlossen
|
||||
REVIEWING = "reviewing" # Lehrkraft prueft
|
||||
COMPLETED = "completed" # Korrektur abgeschlossen
|
||||
ERROR = "error" # Fehler aufgetreten
|
||||
|
||||
|
||||
class AnswerEvaluation(BaseModel):
|
||||
"""Bewertung einer einzelnen Antwort."""
|
||||
question_number: int
|
||||
extracted_text: str
|
||||
points_possible: float
|
||||
points_awarded: float
|
||||
feedback: str
|
||||
is_correct: bool
|
||||
confidence: float # 0-1, wie sicher die OCR/Analyse ist
|
||||
|
||||
|
||||
class CorrectionCreate(BaseModel):
|
||||
"""Request zum Erstellen einer neuen Korrektur."""
|
||||
student_id: str
|
||||
student_name: str
|
||||
class_name: str
|
||||
exam_title: str
|
||||
subject: str
|
||||
max_points: float = Field(default=100.0, ge=0)
|
||||
expected_answers: Optional[Dict[str, str]] = None # Musterloesung
|
||||
|
||||
|
||||
class CorrectionUpdate(BaseModel):
|
||||
"""Request zum Aktualisieren einer Korrektur."""
|
||||
evaluations: Optional[List[AnswerEvaluation]] = None
|
||||
total_points: Optional[float] = None
|
||||
grade: Optional[str] = None
|
||||
teacher_notes: Optional[str] = None
|
||||
status: Optional[CorrectionStatus] = None
|
||||
|
||||
|
||||
class Correction(BaseModel):
|
||||
"""Eine Korrektur."""
|
||||
id: str
|
||||
student_id: str
|
||||
student_name: str
|
||||
class_name: str
|
||||
exam_title: str
|
||||
subject: str
|
||||
max_points: float
|
||||
total_points: float = 0.0
|
||||
percentage: float = 0.0
|
||||
grade: Optional[str] = None
|
||||
status: CorrectionStatus
|
||||
file_path: Optional[str] = None
|
||||
extracted_text: Optional[str] = None
|
||||
evaluations: List[AnswerEvaluation] = []
|
||||
teacher_notes: Optional[str] = None
|
||||
ai_feedback: Optional[str] = None
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
|
||||
class CorrectionResponse(BaseModel):
|
||||
"""Response fuer eine Korrektur."""
|
||||
success: bool
|
||||
correction: Optional[Correction] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class OCRResponse(BaseModel):
|
||||
"""Response fuer OCR-Ergebnis."""
|
||||
success: bool
|
||||
extracted_text: Optional[str] = None
|
||||
regions: List[Dict[str, Any]] = []
|
||||
confidence: float = 0.0
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class AnalysisResponse(BaseModel):
|
||||
"""Response fuer Analyse-Ergebnis."""
|
||||
success: bool
|
||||
evaluations: List[AnswerEvaluation] = []
|
||||
total_points: float = 0.0
|
||||
percentage: float = 0.0
|
||||
suggested_grade: Optional[str] = None
|
||||
ai_feedback: Optional[str] = None
|
||||
error: Optional[str] = None
|
||||
@@ -3,149 +3,29 @@
|
||||
# ==============================================
|
||||
# Async PostgreSQL database access for game sessions
|
||||
# and student learning state.
|
||||
#
|
||||
# Barrel re-export: all public symbols are importable from here.
|
||||
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional, List, Dict, Any
|
||||
from dataclasses import dataclass, field
|
||||
from enum import IntEnum
|
||||
from typing import Optional
|
||||
|
||||
from .database_models import (
|
||||
GAME_DB_URL,
|
||||
LearningLevel,
|
||||
StudentLearningState,
|
||||
GameSessionRecord,
|
||||
GameQuizAnswer,
|
||||
Achievement,
|
||||
ACHIEVEMENTS,
|
||||
)
|
||||
from .database_learning import LearningStateMixin
|
||||
from .database_sessions import SessionsMixin
|
||||
from .database_extras import ExtrasMixin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Database URL from environment
|
||||
GAME_DB_URL = os.getenv(
|
||||
"DATABASE_URL",
|
||||
"postgresql://breakpilot:breakpilot123@localhost:5432/breakpilot"
|
||||
)
|
||||
|
||||
|
||||
class LearningLevel(IntEnum):
|
||||
"""Learning level enum mapping to grade ranges."""
|
||||
BEGINNER = 1 # Klasse 2-3
|
||||
ELEMENTARY = 2 # Klasse 3-4
|
||||
INTERMEDIATE = 3 # Klasse 4-5
|
||||
ADVANCED = 4 # Klasse 5-6
|
||||
EXPERT = 5 # Klasse 6+
|
||||
|
||||
|
||||
@dataclass
|
||||
class StudentLearningState:
|
||||
"""Student learning state data model."""
|
||||
id: Optional[str] = None
|
||||
student_id: str = ""
|
||||
overall_level: int = 3
|
||||
math_level: float = 3.0
|
||||
german_level: float = 3.0
|
||||
english_level: float = 3.0
|
||||
total_play_time_minutes: int = 0
|
||||
total_sessions: int = 0
|
||||
questions_answered: int = 0
|
||||
questions_correct: int = 0
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary."""
|
||||
return {
|
||||
"id": self.id,
|
||||
"student_id": self.student_id,
|
||||
"overall_level": self.overall_level,
|
||||
"math_level": self.math_level,
|
||||
"german_level": self.german_level,
|
||||
"english_level": self.english_level,
|
||||
"total_play_time_minutes": self.total_play_time_minutes,
|
||||
"total_sessions": self.total_sessions,
|
||||
"questions_answered": self.questions_answered,
|
||||
"questions_correct": self.questions_correct,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
@property
|
||||
def accuracy(self) -> float:
|
||||
"""Calculate overall accuracy percentage."""
|
||||
if self.questions_answered == 0:
|
||||
return 0.0
|
||||
return self.questions_correct / self.questions_answered
|
||||
|
||||
|
||||
@dataclass
|
||||
class GameSessionRecord:
|
||||
"""Game session record for database storage."""
|
||||
id: Optional[str] = None
|
||||
student_id: str = ""
|
||||
game_mode: str = "video"
|
||||
duration_seconds: int = 0
|
||||
distance_traveled: float = 0.0
|
||||
score: int = 0
|
||||
questions_answered: int = 0
|
||||
questions_correct: int = 0
|
||||
difficulty_level: int = 3
|
||||
started_at: Optional[datetime] = None
|
||||
ended_at: Optional[datetime] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class GameQuizAnswer:
|
||||
"""Individual quiz answer record."""
|
||||
id: Optional[str] = None
|
||||
session_id: Optional[str] = None
|
||||
question_id: str = ""
|
||||
subject: str = ""
|
||||
difficulty: int = 3
|
||||
is_correct: bool = False
|
||||
answer_time_ms: int = 0
|
||||
created_at: Optional[datetime] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class Achievement:
|
||||
"""Achievement definition and unlock status."""
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
icon: str = "star"
|
||||
category: str = "general" # general, streak, accuracy, time, score
|
||||
threshold: int = 1
|
||||
unlocked: bool = False
|
||||
unlocked_at: Optional[datetime] = None
|
||||
progress: int = 0
|
||||
|
||||
|
||||
# Achievement definitions (static, not in DB)
|
||||
ACHIEVEMENTS = [
|
||||
# Erste Schritte
|
||||
Achievement(id="first_game", name="Erste Fahrt", description="Spiele dein erstes Spiel", icon="rocket", category="general", threshold=1),
|
||||
Achievement(id="five_games", name="Regelmaessiger Fahrer", description="Spiele 5 Spiele", icon="car", category="general", threshold=5),
|
||||
Achievement(id="twenty_games", name="Erfahrener Pilot", description="Spiele 20 Spiele", icon="trophy", category="general", threshold=20),
|
||||
|
||||
# Serien
|
||||
Achievement(id="streak_3", name="Guter Start", description="3 richtige Antworten hintereinander", icon="fire", category="streak", threshold=3),
|
||||
Achievement(id="streak_5", name="Auf Feuer", description="5 richtige Antworten hintereinander", icon="fire", category="streak", threshold=5),
|
||||
Achievement(id="streak_10", name="Unaufhaltsam", description="10 richtige Antworten hintereinander", icon="fire", category="streak", threshold=10),
|
||||
|
||||
# Genauigkeit
|
||||
Achievement(id="perfect_game", name="Perfektes Spiel", description="100% richtig in einem Spiel (min. 5 Fragen)", icon="star", category="accuracy", threshold=100),
|
||||
Achievement(id="accuracy_80", name="Scharfschuetze", description="80% Gesamtgenauigkeit (min. 50 Fragen)", icon="target", category="accuracy", threshold=80),
|
||||
|
||||
# Zeit
|
||||
Achievement(id="play_30min", name="Ausdauer", description="30 Minuten Gesamtspielzeit", icon="clock", category="time", threshold=30),
|
||||
Achievement(id="play_60min", name="Marathon", description="60 Minuten Gesamtspielzeit", icon="clock", category="time", threshold=60),
|
||||
|
||||
# Score
|
||||
Achievement(id="score_5000", name="Punktejaeger", description="5.000 Punkte in einem Spiel", icon="gem", category="score", threshold=5000),
|
||||
Achievement(id="score_10000", name="Highscore Hero", description="10.000 Punkte in einem Spiel", icon="crown", category="score", threshold=10000),
|
||||
|
||||
# Level
|
||||
Achievement(id="level_up", name="Aufsteiger", description="Erreiche Level 2", icon="arrow-up", category="level", threshold=2),
|
||||
Achievement(id="master", name="Meister", description="Erreiche Level 5", icon="medal", category="level", threshold=5),
|
||||
]
|
||||
|
||||
|
||||
class GameDatabase:
|
||||
class GameDatabase(LearningStateMixin, SessionsMixin, ExtrasMixin):
|
||||
"""
|
||||
Async database access for Breakpilot Drive game data.
|
||||
|
||||
@@ -187,588 +67,6 @@ class GameDatabase:
|
||||
if not self._connected:
|
||||
await self.connect()
|
||||
|
||||
# ==============================================
|
||||
# Learning State Methods
|
||||
# ==============================================
|
||||
|
||||
async def get_learning_state(self, student_id: str) -> Optional[StudentLearningState]:
|
||||
"""Get learning state for a student."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return None
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
row = await conn.fetchrow(
|
||||
"""
|
||||
SELECT id, student_id, overall_level, math_level, german_level,
|
||||
english_level, total_play_time_minutes, total_sessions,
|
||||
questions_answered, questions_correct, created_at, updated_at
|
||||
FROM student_learning_state
|
||||
WHERE student_id = $1
|
||||
""",
|
||||
student_id
|
||||
)
|
||||
|
||||
if row:
|
||||
return StudentLearningState(
|
||||
id=str(row["id"]),
|
||||
student_id=str(row["student_id"]),
|
||||
overall_level=row["overall_level"],
|
||||
math_level=float(row["math_level"]),
|
||||
german_level=float(row["german_level"]),
|
||||
english_level=float(row["english_level"]),
|
||||
total_play_time_minutes=row["total_play_time_minutes"],
|
||||
total_sessions=row["total_sessions"],
|
||||
questions_answered=row["questions_answered"] or 0,
|
||||
questions_correct=row["questions_correct"] or 0,
|
||||
created_at=row["created_at"],
|
||||
updated_at=row["updated_at"],
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get learning state: {e}")
|
||||
|
||||
return None
|
||||
|
||||
async def create_or_update_learning_state(
|
||||
self,
|
||||
student_id: str,
|
||||
overall_level: int = 3,
|
||||
math_level: float = 3.0,
|
||||
german_level: float = 3.0,
|
||||
english_level: float = 3.0,
|
||||
) -> Optional[StudentLearningState]:
|
||||
"""Create or update learning state for a student."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return None
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
row = await conn.fetchrow(
|
||||
"""
|
||||
INSERT INTO student_learning_state (
|
||||
student_id, overall_level, math_level, german_level, english_level
|
||||
) VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT (student_id) DO UPDATE SET
|
||||
overall_level = EXCLUDED.overall_level,
|
||||
math_level = EXCLUDED.math_level,
|
||||
german_level = EXCLUDED.german_level,
|
||||
english_level = EXCLUDED.english_level,
|
||||
updated_at = NOW()
|
||||
RETURNING id, student_id, overall_level, math_level, german_level,
|
||||
english_level, total_play_time_minutes, total_sessions,
|
||||
questions_answered, questions_correct, created_at, updated_at
|
||||
""",
|
||||
student_id, overall_level, math_level, german_level, english_level
|
||||
)
|
||||
|
||||
if row:
|
||||
return StudentLearningState(
|
||||
id=str(row["id"]),
|
||||
student_id=str(row["student_id"]),
|
||||
overall_level=row["overall_level"],
|
||||
math_level=float(row["math_level"]),
|
||||
german_level=float(row["german_level"]),
|
||||
english_level=float(row["english_level"]),
|
||||
total_play_time_minutes=row["total_play_time_minutes"],
|
||||
total_sessions=row["total_sessions"],
|
||||
questions_answered=row["questions_answered"] or 0,
|
||||
questions_correct=row["questions_correct"] or 0,
|
||||
created_at=row["created_at"],
|
||||
updated_at=row["updated_at"],
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create/update learning state: {e}")
|
||||
|
||||
return None
|
||||
|
||||
async def update_learning_stats(
|
||||
self,
|
||||
student_id: str,
|
||||
duration_minutes: int,
|
||||
questions_answered: int,
|
||||
questions_correct: int,
|
||||
new_level: Optional[int] = None,
|
||||
) -> bool:
|
||||
"""Update learning stats after a game session."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return False
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
if new_level is not None:
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE student_learning_state SET
|
||||
total_play_time_minutes = total_play_time_minutes + $2,
|
||||
total_sessions = total_sessions + 1,
|
||||
questions_answered = COALESCE(questions_answered, 0) + $3,
|
||||
questions_correct = COALESCE(questions_correct, 0) + $4,
|
||||
overall_level = $5,
|
||||
updated_at = NOW()
|
||||
WHERE student_id = $1
|
||||
""",
|
||||
student_id, duration_minutes, questions_answered,
|
||||
questions_correct, new_level
|
||||
)
|
||||
else:
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE student_learning_state SET
|
||||
total_play_time_minutes = total_play_time_minutes + $2,
|
||||
total_sessions = total_sessions + 1,
|
||||
questions_answered = COALESCE(questions_answered, 0) + $3,
|
||||
questions_correct = COALESCE(questions_correct, 0) + $4,
|
||||
updated_at = NOW()
|
||||
WHERE student_id = $1
|
||||
""",
|
||||
student_id, duration_minutes, questions_answered, questions_correct
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update learning stats: {e}")
|
||||
|
||||
return False
|
||||
|
||||
# ==============================================
|
||||
# Game Session Methods
|
||||
# ==============================================
|
||||
|
||||
async def save_game_session(
|
||||
self,
|
||||
student_id: str,
|
||||
game_mode: str,
|
||||
duration_seconds: int,
|
||||
distance_traveled: float,
|
||||
score: int,
|
||||
questions_answered: int,
|
||||
questions_correct: int,
|
||||
difficulty_level: int,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> Optional[str]:
|
||||
"""Save a game session and return the session ID."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return None
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
row = await conn.fetchrow(
|
||||
"""
|
||||
INSERT INTO game_sessions (
|
||||
student_id, game_mode, duration_seconds, distance_traveled,
|
||||
score, questions_answered, questions_correct, difficulty_level,
|
||||
started_at, ended_at, metadata
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8,
|
||||
NOW() - make_interval(secs => $3), NOW(), $9)
|
||||
RETURNING id
|
||||
""",
|
||||
student_id, game_mode, duration_seconds, distance_traveled,
|
||||
score, questions_answered, questions_correct, difficulty_level,
|
||||
json.dumps(metadata) if metadata else None
|
||||
)
|
||||
|
||||
if row:
|
||||
return str(row["id"])
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save game session: {e}")
|
||||
|
||||
return None
|
||||
|
||||
async def get_user_sessions(
|
||||
self,
|
||||
student_id: str,
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get recent game sessions for a user."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return []
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
rows = await conn.fetch(
|
||||
"""
|
||||
SELECT id, student_id, game_mode, duration_seconds, distance_traveled,
|
||||
score, questions_answered, questions_correct, difficulty_level,
|
||||
started_at, ended_at, metadata
|
||||
FROM game_sessions
|
||||
WHERE student_id = $1
|
||||
ORDER BY ended_at DESC
|
||||
LIMIT $2
|
||||
""",
|
||||
student_id, limit
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"session_id": str(row["id"]),
|
||||
"user_id": str(row["student_id"]),
|
||||
"game_mode": row["game_mode"],
|
||||
"duration_seconds": row["duration_seconds"],
|
||||
"distance_traveled": float(row["distance_traveled"]) if row["distance_traveled"] else 0.0,
|
||||
"score": row["score"],
|
||||
"questions_answered": row["questions_answered"],
|
||||
"questions_correct": row["questions_correct"],
|
||||
"difficulty_level": row["difficulty_level"],
|
||||
"started_at": row["started_at"].isoformat() if row["started_at"] else None,
|
||||
"ended_at": row["ended_at"].isoformat() if row["ended_at"] else None,
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get user sessions: {e}")
|
||||
|
||||
return []
|
||||
|
||||
async def get_leaderboard(
|
||||
self,
|
||||
timeframe: str = "day",
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get leaderboard data."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return []
|
||||
|
||||
# Timeframe filter
|
||||
timeframe_sql = {
|
||||
"day": "ended_at > NOW() - INTERVAL '1 day'",
|
||||
"week": "ended_at > NOW() - INTERVAL '7 days'",
|
||||
"month": "ended_at > NOW() - INTERVAL '30 days'",
|
||||
"all": "1=1",
|
||||
}.get(timeframe, "1=1")
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
rows = await conn.fetch(
|
||||
f"""
|
||||
SELECT student_id, SUM(score) as total_score
|
||||
FROM game_sessions
|
||||
WHERE {timeframe_sql}
|
||||
GROUP BY student_id
|
||||
ORDER BY total_score DESC
|
||||
LIMIT $1
|
||||
""",
|
||||
limit
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"rank": i + 1,
|
||||
"user_id": str(row["student_id"]),
|
||||
"total_score": int(row["total_score"]),
|
||||
}
|
||||
for i, row in enumerate(rows)
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get leaderboard: {e}")
|
||||
|
||||
return []
|
||||
|
||||
# ==============================================
|
||||
# Quiz Answer Methods
|
||||
# ==============================================
|
||||
|
||||
async def save_quiz_answer(
|
||||
self,
|
||||
session_id: str,
|
||||
question_id: str,
|
||||
subject: str,
|
||||
difficulty: int,
|
||||
is_correct: bool,
|
||||
answer_time_ms: int,
|
||||
) -> bool:
|
||||
"""Save an individual quiz answer."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return False
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO game_quiz_answers (
|
||||
session_id, question_id, subject, difficulty,
|
||||
is_correct, answer_time_ms
|
||||
) VALUES ($1, $2, $3, $4, $5, $6)
|
||||
""",
|
||||
session_id, question_id, subject, difficulty,
|
||||
is_correct, answer_time_ms
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save quiz answer: {e}")
|
||||
|
||||
return False
|
||||
|
||||
async def get_subject_stats(
|
||||
self,
|
||||
student_id: str
|
||||
) -> Dict[str, Dict[str, Any]]:
|
||||
"""Get per-subject statistics for a student."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return {}
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
rows = await conn.fetch(
|
||||
"""
|
||||
SELECT
|
||||
qa.subject,
|
||||
COUNT(*) as total,
|
||||
SUM(CASE WHEN qa.is_correct THEN 1 ELSE 0 END) as correct,
|
||||
AVG(qa.answer_time_ms) as avg_time_ms
|
||||
FROM game_quiz_answers qa
|
||||
JOIN game_sessions gs ON qa.session_id = gs.id
|
||||
WHERE gs.student_id = $1
|
||||
GROUP BY qa.subject
|
||||
""",
|
||||
student_id
|
||||
)
|
||||
|
||||
return {
|
||||
row["subject"]: {
|
||||
"total": row["total"],
|
||||
"correct": row["correct"],
|
||||
"accuracy": row["correct"] / row["total"] if row["total"] > 0 else 0.0,
|
||||
"avg_time_ms": int(row["avg_time_ms"]) if row["avg_time_ms"] else 0,
|
||||
}
|
||||
for row in rows
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get subject stats: {e}")
|
||||
|
||||
return {}
|
||||
|
||||
# ==============================================
|
||||
# Extended Leaderboard Methods
|
||||
# ==============================================
|
||||
|
||||
async def get_class_leaderboard(
|
||||
self,
|
||||
class_id: str,
|
||||
timeframe: str = "week",
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get leaderboard filtered by class.
|
||||
|
||||
Note: Requires class_id to be stored in user metadata or
|
||||
a separate class_memberships table. For now, this is a
|
||||
placeholder that can be extended.
|
||||
"""
|
||||
# For now, fall back to regular leaderboard
|
||||
# TODO: Join with class_memberships table when available
|
||||
return await self.get_leaderboard(timeframe, limit)
|
||||
|
||||
async def get_leaderboard_with_names(
|
||||
self,
|
||||
timeframe: str = "day",
|
||||
limit: int = 10,
|
||||
anonymize: bool = True
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get leaderboard with anonymized display names."""
|
||||
leaderboard = await self.get_leaderboard(timeframe, limit)
|
||||
|
||||
# Anonymize names for privacy (e.g., "Spieler 1", "Spieler 2")
|
||||
if anonymize:
|
||||
for entry in leaderboard:
|
||||
entry["display_name"] = f"Spieler {entry['rank']}"
|
||||
else:
|
||||
# In production: Join with users table to get real names
|
||||
for entry in leaderboard:
|
||||
entry["display_name"] = f"Spieler {entry['rank']}"
|
||||
|
||||
return leaderboard
|
||||
|
||||
# ==============================================
|
||||
# Parent Dashboard Methods
|
||||
# ==============================================
|
||||
|
||||
async def get_children_stats(
|
||||
self,
|
||||
children_ids: List[str]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get stats for multiple children (parent dashboard)."""
|
||||
if not children_ids:
|
||||
return []
|
||||
|
||||
results = []
|
||||
for child_id in children_ids:
|
||||
state = await self.get_learning_state(child_id)
|
||||
sessions = await self.get_user_sessions(child_id, limit=5)
|
||||
|
||||
results.append({
|
||||
"student_id": child_id,
|
||||
"learning_state": state.to_dict() if state else None,
|
||||
"recent_sessions": sessions,
|
||||
"has_played": state is not None and state.total_sessions > 0,
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
async def get_progress_over_time(
|
||||
self,
|
||||
student_id: str,
|
||||
days: int = 30
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get learning progress over time for charts."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return []
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
rows = await conn.fetch(
|
||||
"""
|
||||
SELECT
|
||||
DATE(ended_at) as date,
|
||||
COUNT(*) as sessions,
|
||||
SUM(score) as total_score,
|
||||
SUM(questions_answered) as questions,
|
||||
SUM(questions_correct) as correct,
|
||||
AVG(difficulty_level) as avg_difficulty
|
||||
FROM game_sessions
|
||||
WHERE student_id = $1
|
||||
AND ended_at > NOW() - make_interval(days => $2)
|
||||
GROUP BY DATE(ended_at)
|
||||
ORDER BY date ASC
|
||||
""",
|
||||
student_id, days
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"date": row["date"].isoformat(),
|
||||
"sessions": row["sessions"],
|
||||
"total_score": int(row["total_score"]),
|
||||
"questions": row["questions"],
|
||||
"correct": row["correct"],
|
||||
"accuracy": row["correct"] / row["questions"] if row["questions"] > 0 else 0,
|
||||
"avg_difficulty": float(row["avg_difficulty"]) if row["avg_difficulty"] else 3.0,
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get progress over time: {e}")
|
||||
|
||||
return []
|
||||
|
||||
# ==============================================
|
||||
# Achievement Methods
|
||||
# ==============================================
|
||||
|
||||
async def get_student_achievements(
|
||||
self,
|
||||
student_id: str
|
||||
) -> List[Achievement]:
|
||||
"""Get achievements with unlock status for a student."""
|
||||
await self._ensure_connected()
|
||||
|
||||
# Get student stats for progress calculation
|
||||
state = await self.get_learning_state(student_id)
|
||||
|
||||
# Calculate progress for each achievement
|
||||
achievements = []
|
||||
for a in ACHIEVEMENTS:
|
||||
achievement = Achievement(
|
||||
id=a.id,
|
||||
name=a.name,
|
||||
description=a.description,
|
||||
icon=a.icon,
|
||||
category=a.category,
|
||||
threshold=a.threshold,
|
||||
)
|
||||
|
||||
# Calculate progress based on category
|
||||
if state:
|
||||
if a.category == "general":
|
||||
achievement.progress = state.total_sessions
|
||||
achievement.unlocked = state.total_sessions >= a.threshold
|
||||
elif a.category == "time":
|
||||
achievement.progress = state.total_play_time_minutes
|
||||
achievement.unlocked = state.total_play_time_minutes >= a.threshold
|
||||
elif a.category == "level":
|
||||
achievement.progress = state.overall_level
|
||||
achievement.unlocked = state.overall_level >= a.threshold
|
||||
elif a.category == "accuracy":
|
||||
if a.id == "accuracy_80" and state.questions_answered >= 50:
|
||||
achievement.progress = int(state.accuracy * 100)
|
||||
achievement.unlocked = state.accuracy >= 0.8
|
||||
|
||||
achievements.append(achievement)
|
||||
|
||||
# Check DB for unlocked achievements (streak, score, perfect game)
|
||||
if self._pool:
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
# Check for score achievements
|
||||
max_score = await conn.fetchval(
|
||||
"SELECT MAX(score) FROM game_sessions WHERE student_id = $1",
|
||||
student_id
|
||||
)
|
||||
if max_score:
|
||||
for a in achievements:
|
||||
if a.category == "score":
|
||||
a.progress = max_score
|
||||
a.unlocked = max_score >= a.threshold
|
||||
|
||||
# Check for perfect game
|
||||
perfect = await conn.fetchval(
|
||||
"""
|
||||
SELECT COUNT(*) FROM game_sessions
|
||||
WHERE student_id = $1
|
||||
AND questions_answered >= 5
|
||||
AND questions_correct = questions_answered
|
||||
""",
|
||||
student_id
|
||||
)
|
||||
for a in achievements:
|
||||
if a.id == "perfect_game":
|
||||
a.progress = 100 if perfect and perfect > 0 else 0
|
||||
a.unlocked = perfect is not None and perfect > 0
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to check achievements: {e}")
|
||||
|
||||
return achievements
|
||||
|
||||
async def check_new_achievements(
|
||||
self,
|
||||
student_id: str,
|
||||
session_score: int,
|
||||
session_accuracy: float,
|
||||
streak: int
|
||||
) -> List[Achievement]:
|
||||
"""
|
||||
Check for newly unlocked achievements after a session.
|
||||
Returns list of newly unlocked achievements.
|
||||
"""
|
||||
all_achievements = await self.get_student_achievements(student_id)
|
||||
newly_unlocked = []
|
||||
|
||||
for a in all_achievements:
|
||||
# Check streak achievements
|
||||
if a.category == "streak" and streak >= a.threshold and not a.unlocked:
|
||||
a.unlocked = True
|
||||
newly_unlocked.append(a)
|
||||
|
||||
# Check score achievements
|
||||
if a.category == "score" and session_score >= a.threshold and not a.unlocked:
|
||||
a.unlocked = True
|
||||
newly_unlocked.append(a)
|
||||
|
||||
# Check perfect game
|
||||
if a.id == "perfect_game" and session_accuracy == 1.0:
|
||||
if not a.unlocked:
|
||||
a.unlocked = True
|
||||
newly_unlocked.append(a)
|
||||
|
||||
return newly_unlocked
|
||||
|
||||
|
||||
# Global database instance
|
||||
_game_db: Optional[GameDatabase] = None
|
||||
|
||||
236
backend-lehrer/game/database_extras.py
Normal file
236
backend-lehrer/game/database_extras.py
Normal file
@@ -0,0 +1,236 @@
|
||||
# ==============================================
|
||||
# Breakpilot Drive - Extended DB Methods
|
||||
# ==============================================
|
||||
# Leaderboard extensions, parent dashboard, achievements, progress.
|
||||
|
||||
import logging
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from .database_models import Achievement, ACHIEVEMENTS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExtrasMixin:
|
||||
"""Mixin providing leaderboard, parent dashboard, and achievement methods."""
|
||||
|
||||
async def get_class_leaderboard(
|
||||
self,
|
||||
class_id: str,
|
||||
timeframe: str = "week",
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get leaderboard filtered by class.
|
||||
|
||||
Note: Requires class_id to be stored in user metadata or
|
||||
a separate class_memberships table. For now, this is a
|
||||
placeholder that can be extended.
|
||||
"""
|
||||
# For now, fall back to regular leaderboard
|
||||
# TODO: Join with class_memberships table when available
|
||||
return await self.get_leaderboard(timeframe, limit)
|
||||
|
||||
async def get_leaderboard_with_names(
|
||||
self,
|
||||
timeframe: str = "day",
|
||||
limit: int = 10,
|
||||
anonymize: bool = True
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get leaderboard with anonymized display names."""
|
||||
leaderboard = await self.get_leaderboard(timeframe, limit)
|
||||
|
||||
# Anonymize names for privacy (e.g., "Spieler 1", "Spieler 2")
|
||||
if anonymize:
|
||||
for entry in leaderboard:
|
||||
entry["display_name"] = f"Spieler {entry['rank']}"
|
||||
else:
|
||||
# In production: Join with users table to get real names
|
||||
for entry in leaderboard:
|
||||
entry["display_name"] = f"Spieler {entry['rank']}"
|
||||
|
||||
return leaderboard
|
||||
|
||||
# ==============================================
|
||||
# Parent Dashboard Methods
|
||||
# ==============================================
|
||||
|
||||
async def get_children_stats(
|
||||
self,
|
||||
children_ids: List[str]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get stats for multiple children (parent dashboard)."""
|
||||
if not children_ids:
|
||||
return []
|
||||
|
||||
results = []
|
||||
for child_id in children_ids:
|
||||
state = await self.get_learning_state(child_id)
|
||||
sessions = await self.get_user_sessions(child_id, limit=5)
|
||||
|
||||
results.append({
|
||||
"student_id": child_id,
|
||||
"learning_state": state.to_dict() if state else None,
|
||||
"recent_sessions": sessions,
|
||||
"has_played": state is not None and state.total_sessions > 0,
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
async def get_progress_over_time(
|
||||
self,
|
||||
student_id: str,
|
||||
days: int = 30
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get learning progress over time for charts."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return []
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
rows = await conn.fetch(
|
||||
"""
|
||||
SELECT
|
||||
DATE(ended_at) as date,
|
||||
COUNT(*) as sessions,
|
||||
SUM(score) as total_score,
|
||||
SUM(questions_answered) as questions,
|
||||
SUM(questions_correct) as correct,
|
||||
AVG(difficulty_level) as avg_difficulty
|
||||
FROM game_sessions
|
||||
WHERE student_id = $1
|
||||
AND ended_at > NOW() - make_interval(days => $2)
|
||||
GROUP BY DATE(ended_at)
|
||||
ORDER BY date ASC
|
||||
""",
|
||||
student_id, days
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"date": row["date"].isoformat(),
|
||||
"sessions": row["sessions"],
|
||||
"total_score": int(row["total_score"]),
|
||||
"questions": row["questions"],
|
||||
"correct": row["correct"],
|
||||
"accuracy": row["correct"] / row["questions"] if row["questions"] > 0 else 0,
|
||||
"avg_difficulty": float(row["avg_difficulty"]) if row["avg_difficulty"] else 3.0,
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get progress over time: {e}")
|
||||
|
||||
return []
|
||||
|
||||
# ==============================================
|
||||
# Achievement Methods
|
||||
# ==============================================
|
||||
|
||||
async def get_student_achievements(
|
||||
self,
|
||||
student_id: str
|
||||
) -> List[Achievement]:
|
||||
"""Get achievements with unlock status for a student."""
|
||||
await self._ensure_connected()
|
||||
|
||||
# Get student stats for progress calculation
|
||||
state = await self.get_learning_state(student_id)
|
||||
|
||||
# Calculate progress for each achievement
|
||||
achievements = []
|
||||
for a in ACHIEVEMENTS:
|
||||
achievement = Achievement(
|
||||
id=a.id,
|
||||
name=a.name,
|
||||
description=a.description,
|
||||
icon=a.icon,
|
||||
category=a.category,
|
||||
threshold=a.threshold,
|
||||
)
|
||||
|
||||
# Calculate progress based on category
|
||||
if state:
|
||||
if a.category == "general":
|
||||
achievement.progress = state.total_sessions
|
||||
achievement.unlocked = state.total_sessions >= a.threshold
|
||||
elif a.category == "time":
|
||||
achievement.progress = state.total_play_time_minutes
|
||||
achievement.unlocked = state.total_play_time_minutes >= a.threshold
|
||||
elif a.category == "level":
|
||||
achievement.progress = state.overall_level
|
||||
achievement.unlocked = state.overall_level >= a.threshold
|
||||
elif a.category == "accuracy":
|
||||
if a.id == "accuracy_80" and state.questions_answered >= 50:
|
||||
achievement.progress = int(state.accuracy * 100)
|
||||
achievement.unlocked = state.accuracy >= 0.8
|
||||
|
||||
achievements.append(achievement)
|
||||
|
||||
# Check DB for unlocked achievements (streak, score, perfect game)
|
||||
if self._pool:
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
# Check for score achievements
|
||||
max_score = await conn.fetchval(
|
||||
"SELECT MAX(score) FROM game_sessions WHERE student_id = $1",
|
||||
student_id
|
||||
)
|
||||
if max_score:
|
||||
for a in achievements:
|
||||
if a.category == "score":
|
||||
a.progress = max_score
|
||||
a.unlocked = max_score >= a.threshold
|
||||
|
||||
# Check for perfect game
|
||||
perfect = await conn.fetchval(
|
||||
"""
|
||||
SELECT COUNT(*) FROM game_sessions
|
||||
WHERE student_id = $1
|
||||
AND questions_answered >= 5
|
||||
AND questions_correct = questions_answered
|
||||
""",
|
||||
student_id
|
||||
)
|
||||
for a in achievements:
|
||||
if a.id == "perfect_game":
|
||||
a.progress = 100 if perfect and perfect > 0 else 0
|
||||
a.unlocked = perfect is not None and perfect > 0
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to check achievements: {e}")
|
||||
|
||||
return achievements
|
||||
|
||||
async def check_new_achievements(
|
||||
self,
|
||||
student_id: str,
|
||||
session_score: int,
|
||||
session_accuracy: float,
|
||||
streak: int
|
||||
) -> List[Achievement]:
|
||||
"""
|
||||
Check for newly unlocked achievements after a session.
|
||||
Returns list of newly unlocked achievements.
|
||||
"""
|
||||
all_achievements = await self.get_student_achievements(student_id)
|
||||
newly_unlocked = []
|
||||
|
||||
for a in all_achievements:
|
||||
# Check streak achievements
|
||||
if a.category == "streak" and streak >= a.threshold and not a.unlocked:
|
||||
a.unlocked = True
|
||||
newly_unlocked.append(a)
|
||||
|
||||
# Check score achievements
|
||||
if a.category == "score" and session_score >= a.threshold and not a.unlocked:
|
||||
a.unlocked = True
|
||||
newly_unlocked.append(a)
|
||||
|
||||
# Check perfect game
|
||||
if a.id == "perfect_game" and session_accuracy == 1.0:
|
||||
if not a.unlocked:
|
||||
a.unlocked = True
|
||||
newly_unlocked.append(a)
|
||||
|
||||
return newly_unlocked
|
||||
156
backend-lehrer/game/database_learning.py
Normal file
156
backend-lehrer/game/database_learning.py
Normal file
@@ -0,0 +1,156 @@
|
||||
# ==============================================
|
||||
# Breakpilot Drive - Learning State DB Methods
|
||||
# ==============================================
|
||||
# Methods for reading/writing student learning state.
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from .database_models import StudentLearningState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LearningStateMixin:
|
||||
"""Mixin providing learning state database methods for GameDatabase."""
|
||||
|
||||
async def get_learning_state(self, student_id: str) -> Optional[StudentLearningState]:
|
||||
"""Get learning state for a student."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return None
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
row = await conn.fetchrow(
|
||||
"""
|
||||
SELECT id, student_id, overall_level, math_level, german_level,
|
||||
english_level, total_play_time_minutes, total_sessions,
|
||||
questions_answered, questions_correct, created_at, updated_at
|
||||
FROM student_learning_state
|
||||
WHERE student_id = $1
|
||||
""",
|
||||
student_id
|
||||
)
|
||||
|
||||
if row:
|
||||
return StudentLearningState(
|
||||
id=str(row["id"]),
|
||||
student_id=str(row["student_id"]),
|
||||
overall_level=row["overall_level"],
|
||||
math_level=float(row["math_level"]),
|
||||
german_level=float(row["german_level"]),
|
||||
english_level=float(row["english_level"]),
|
||||
total_play_time_minutes=row["total_play_time_minutes"],
|
||||
total_sessions=row["total_sessions"],
|
||||
questions_answered=row["questions_answered"] or 0,
|
||||
questions_correct=row["questions_correct"] or 0,
|
||||
created_at=row["created_at"],
|
||||
updated_at=row["updated_at"],
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get learning state: {e}")
|
||||
|
||||
return None
|
||||
|
||||
async def create_or_update_learning_state(
|
||||
self,
|
||||
student_id: str,
|
||||
overall_level: int = 3,
|
||||
math_level: float = 3.0,
|
||||
german_level: float = 3.0,
|
||||
english_level: float = 3.0,
|
||||
) -> Optional[StudentLearningState]:
|
||||
"""Create or update learning state for a student."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return None
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
row = await conn.fetchrow(
|
||||
"""
|
||||
INSERT INTO student_learning_state (
|
||||
student_id, overall_level, math_level, german_level, english_level
|
||||
) VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT (student_id) DO UPDATE SET
|
||||
overall_level = EXCLUDED.overall_level,
|
||||
math_level = EXCLUDED.math_level,
|
||||
german_level = EXCLUDED.german_level,
|
||||
english_level = EXCLUDED.english_level,
|
||||
updated_at = NOW()
|
||||
RETURNING id, student_id, overall_level, math_level, german_level,
|
||||
english_level, total_play_time_minutes, total_sessions,
|
||||
questions_answered, questions_correct, created_at, updated_at
|
||||
""",
|
||||
student_id, overall_level, math_level, german_level, english_level
|
||||
)
|
||||
|
||||
if row:
|
||||
return StudentLearningState(
|
||||
id=str(row["id"]),
|
||||
student_id=str(row["student_id"]),
|
||||
overall_level=row["overall_level"],
|
||||
math_level=float(row["math_level"]),
|
||||
german_level=float(row["german_level"]),
|
||||
english_level=float(row["english_level"]),
|
||||
total_play_time_minutes=row["total_play_time_minutes"],
|
||||
total_sessions=row["total_sessions"],
|
||||
questions_answered=row["questions_answered"] or 0,
|
||||
questions_correct=row["questions_correct"] or 0,
|
||||
created_at=row["created_at"],
|
||||
updated_at=row["updated_at"],
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create/update learning state: {e}")
|
||||
|
||||
return None
|
||||
|
||||
async def update_learning_stats(
|
||||
self,
|
||||
student_id: str,
|
||||
duration_minutes: int,
|
||||
questions_answered: int,
|
||||
questions_correct: int,
|
||||
new_level: Optional[int] = None,
|
||||
) -> bool:
|
||||
"""Update learning stats after a game session."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return False
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
if new_level is not None:
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE student_learning_state SET
|
||||
total_play_time_minutes = total_play_time_minutes + $2,
|
||||
total_sessions = total_sessions + 1,
|
||||
questions_answered = COALESCE(questions_answered, 0) + $3,
|
||||
questions_correct = COALESCE(questions_correct, 0) + $4,
|
||||
overall_level = $5,
|
||||
updated_at = NOW()
|
||||
WHERE student_id = $1
|
||||
""",
|
||||
student_id, duration_minutes, questions_answered,
|
||||
questions_correct, new_level
|
||||
)
|
||||
else:
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE student_learning_state SET
|
||||
total_play_time_minutes = total_play_time_minutes + $2,
|
||||
total_sessions = total_sessions + 1,
|
||||
questions_answered = COALESCE(questions_answered, 0) + $3,
|
||||
questions_correct = COALESCE(questions_correct, 0) + $4,
|
||||
updated_at = NOW()
|
||||
WHERE student_id = $1
|
||||
""",
|
||||
student_id, duration_minutes, questions_answered, questions_correct
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update learning stats: {e}")
|
||||
|
||||
return False
|
||||
143
backend-lehrer/game/database_models.py
Normal file
143
backend-lehrer/game/database_models.py
Normal file
@@ -0,0 +1,143 @@
|
||||
# ==============================================
|
||||
# Breakpilot Drive - Game Database Models
|
||||
# ==============================================
|
||||
# Data models, enums, and achievement definitions.
|
||||
|
||||
import os
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, Any
|
||||
from dataclasses import dataclass
|
||||
from enum import IntEnum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Database URL from environment
|
||||
GAME_DB_URL = os.getenv(
|
||||
"DATABASE_URL",
|
||||
"postgresql://breakpilot:breakpilot123@localhost:5432/breakpilot"
|
||||
)
|
||||
|
||||
|
||||
class LearningLevel(IntEnum):
|
||||
"""Learning level enum mapping to grade ranges."""
|
||||
BEGINNER = 1 # Klasse 2-3
|
||||
ELEMENTARY = 2 # Klasse 3-4
|
||||
INTERMEDIATE = 3 # Klasse 4-5
|
||||
ADVANCED = 4 # Klasse 5-6
|
||||
EXPERT = 5 # Klasse 6+
|
||||
|
||||
|
||||
@dataclass
|
||||
class StudentLearningState:
|
||||
"""Student learning state data model."""
|
||||
id: Optional[str] = None
|
||||
student_id: str = ""
|
||||
overall_level: int = 3
|
||||
math_level: float = 3.0
|
||||
german_level: float = 3.0
|
||||
english_level: float = 3.0
|
||||
total_play_time_minutes: int = 0
|
||||
total_sessions: int = 0
|
||||
questions_answered: int = 0
|
||||
questions_correct: int = 0
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary."""
|
||||
return {
|
||||
"id": self.id,
|
||||
"student_id": self.student_id,
|
||||
"overall_level": self.overall_level,
|
||||
"math_level": self.math_level,
|
||||
"german_level": self.german_level,
|
||||
"english_level": self.english_level,
|
||||
"total_play_time_minutes": self.total_play_time_minutes,
|
||||
"total_sessions": self.total_sessions,
|
||||
"questions_answered": self.questions_answered,
|
||||
"questions_correct": self.questions_correct,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
@property
|
||||
def accuracy(self) -> float:
|
||||
"""Calculate overall accuracy percentage."""
|
||||
if self.questions_answered == 0:
|
||||
return 0.0
|
||||
return self.questions_correct / self.questions_answered
|
||||
|
||||
|
||||
@dataclass
|
||||
class GameSessionRecord:
|
||||
"""Game session record for database storage."""
|
||||
id: Optional[str] = None
|
||||
student_id: str = ""
|
||||
game_mode: str = "video"
|
||||
duration_seconds: int = 0
|
||||
distance_traveled: float = 0.0
|
||||
score: int = 0
|
||||
questions_answered: int = 0
|
||||
questions_correct: int = 0
|
||||
difficulty_level: int = 3
|
||||
started_at: Optional[datetime] = None
|
||||
ended_at: Optional[datetime] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class GameQuizAnswer:
|
||||
"""Individual quiz answer record."""
|
||||
id: Optional[str] = None
|
||||
session_id: Optional[str] = None
|
||||
question_id: str = ""
|
||||
subject: str = ""
|
||||
difficulty: int = 3
|
||||
is_correct: bool = False
|
||||
answer_time_ms: int = 0
|
||||
created_at: Optional[datetime] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class Achievement:
|
||||
"""Achievement definition and unlock status."""
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
icon: str = "star"
|
||||
category: str = "general" # general, streak, accuracy, time, score
|
||||
threshold: int = 1
|
||||
unlocked: bool = False
|
||||
unlocked_at: Optional[datetime] = None
|
||||
progress: int = 0
|
||||
|
||||
|
||||
# Achievement definitions (static, not in DB)
|
||||
ACHIEVEMENTS = [
|
||||
# Erste Schritte
|
||||
Achievement(id="first_game", name="Erste Fahrt", description="Spiele dein erstes Spiel", icon="rocket", category="general", threshold=1),
|
||||
Achievement(id="five_games", name="Regelmaessiger Fahrer", description="Spiele 5 Spiele", icon="car", category="general", threshold=5),
|
||||
Achievement(id="twenty_games", name="Erfahrener Pilot", description="Spiele 20 Spiele", icon="trophy", category="general", threshold=20),
|
||||
|
||||
# Serien
|
||||
Achievement(id="streak_3", name="Guter Start", description="3 richtige Antworten hintereinander", icon="fire", category="streak", threshold=3),
|
||||
Achievement(id="streak_5", name="Auf Feuer", description="5 richtige Antworten hintereinander", icon="fire", category="streak", threshold=5),
|
||||
Achievement(id="streak_10", name="Unaufhaltsam", description="10 richtige Antworten hintereinander", icon="fire", category="streak", threshold=10),
|
||||
|
||||
# Genauigkeit
|
||||
Achievement(id="perfect_game", name="Perfektes Spiel", description="100% richtig in einem Spiel (min. 5 Fragen)", icon="star", category="accuracy", threshold=100),
|
||||
Achievement(id="accuracy_80", name="Scharfschuetze", description="80% Gesamtgenauigkeit (min. 50 Fragen)", icon="target", category="accuracy", threshold=80),
|
||||
|
||||
# Zeit
|
||||
Achievement(id="play_30min", name="Ausdauer", description="30 Minuten Gesamtspielzeit", icon="clock", category="time", threshold=30),
|
||||
Achievement(id="play_60min", name="Marathon", description="60 Minuten Gesamtspielzeit", icon="clock", category="time", threshold=60),
|
||||
|
||||
# Score
|
||||
Achievement(id="score_5000", name="Punktejaeger", description="5.000 Punkte in einem Spiel", icon="gem", category="score", threshold=5000),
|
||||
Achievement(id="score_10000", name="Highscore Hero", description="10.000 Punkte in einem Spiel", icon="crown", category="score", threshold=10000),
|
||||
|
||||
# Level
|
||||
Achievement(id="level_up", name="Aufsteiger", description="Erreiche Level 2", icon="arrow-up", category="level", threshold=2),
|
||||
Achievement(id="master", name="Meister", description="Erreiche Level 5", icon="medal", category="level", threshold=5),
|
||||
]
|
||||
218
backend-lehrer/game/database_sessions.py
Normal file
218
backend-lehrer/game/database_sessions.py
Normal file
@@ -0,0 +1,218 @@
|
||||
# ==============================================
|
||||
# Breakpilot Drive - Game Session & Quiz DB Methods
|
||||
# ==============================================
|
||||
# Methods for saving/querying game sessions, quiz answers, and basic leaderboard.
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SessionsMixin:
|
||||
"""Mixin providing game session and quiz database methods for GameDatabase."""
|
||||
|
||||
async def save_game_session(
|
||||
self,
|
||||
student_id: str,
|
||||
game_mode: str,
|
||||
duration_seconds: int,
|
||||
distance_traveled: float,
|
||||
score: int,
|
||||
questions_answered: int,
|
||||
questions_correct: int,
|
||||
difficulty_level: int,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> Optional[str]:
|
||||
"""Save a game session and return the session ID."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return None
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
row = await conn.fetchrow(
|
||||
"""
|
||||
INSERT INTO game_sessions (
|
||||
student_id, game_mode, duration_seconds, distance_traveled,
|
||||
score, questions_answered, questions_correct, difficulty_level,
|
||||
started_at, ended_at, metadata
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8,
|
||||
NOW() - make_interval(secs => $3), NOW(), $9)
|
||||
RETURNING id
|
||||
""",
|
||||
student_id, game_mode, duration_seconds, distance_traveled,
|
||||
score, questions_answered, questions_correct, difficulty_level,
|
||||
json.dumps(metadata) if metadata else None
|
||||
)
|
||||
|
||||
if row:
|
||||
return str(row["id"])
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save game session: {e}")
|
||||
|
||||
return None
|
||||
|
||||
async def get_user_sessions(
|
||||
self,
|
||||
student_id: str,
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get recent game sessions for a user."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return []
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
rows = await conn.fetch(
|
||||
"""
|
||||
SELECT id, student_id, game_mode, duration_seconds, distance_traveled,
|
||||
score, questions_answered, questions_correct, difficulty_level,
|
||||
started_at, ended_at, metadata
|
||||
FROM game_sessions
|
||||
WHERE student_id = $1
|
||||
ORDER BY ended_at DESC
|
||||
LIMIT $2
|
||||
""",
|
||||
student_id, limit
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"session_id": str(row["id"]),
|
||||
"user_id": str(row["student_id"]),
|
||||
"game_mode": row["game_mode"],
|
||||
"duration_seconds": row["duration_seconds"],
|
||||
"distance_traveled": float(row["distance_traveled"]) if row["distance_traveled"] else 0.0,
|
||||
"score": row["score"],
|
||||
"questions_answered": row["questions_answered"],
|
||||
"questions_correct": row["questions_correct"],
|
||||
"difficulty_level": row["difficulty_level"],
|
||||
"started_at": row["started_at"].isoformat() if row["started_at"] else None,
|
||||
"ended_at": row["ended_at"].isoformat() if row["ended_at"] else None,
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get user sessions: {e}")
|
||||
|
||||
return []
|
||||
|
||||
async def get_leaderboard(
|
||||
self,
|
||||
timeframe: str = "day",
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get leaderboard data."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return []
|
||||
|
||||
# Timeframe filter
|
||||
timeframe_sql = {
|
||||
"day": "ended_at > NOW() - INTERVAL '1 day'",
|
||||
"week": "ended_at > NOW() - INTERVAL '7 days'",
|
||||
"month": "ended_at > NOW() - INTERVAL '30 days'",
|
||||
"all": "1=1",
|
||||
}.get(timeframe, "1=1")
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
rows = await conn.fetch(
|
||||
f"""
|
||||
SELECT student_id, SUM(score) as total_score
|
||||
FROM game_sessions
|
||||
WHERE {timeframe_sql}
|
||||
GROUP BY student_id
|
||||
ORDER BY total_score DESC
|
||||
LIMIT $1
|
||||
""",
|
||||
limit
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"rank": i + 1,
|
||||
"user_id": str(row["student_id"]),
|
||||
"total_score": int(row["total_score"]),
|
||||
}
|
||||
for i, row in enumerate(rows)
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get leaderboard: {e}")
|
||||
|
||||
return []
|
||||
|
||||
async def save_quiz_answer(
|
||||
self,
|
||||
session_id: str,
|
||||
question_id: str,
|
||||
subject: str,
|
||||
difficulty: int,
|
||||
is_correct: bool,
|
||||
answer_time_ms: int,
|
||||
) -> bool:
|
||||
"""Save an individual quiz answer."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return False
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO game_quiz_answers (
|
||||
session_id, question_id, subject, difficulty,
|
||||
is_correct, answer_time_ms
|
||||
) VALUES ($1, $2, $3, $4, $5, $6)
|
||||
""",
|
||||
session_id, question_id, subject, difficulty,
|
||||
is_correct, answer_time_ms
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save quiz answer: {e}")
|
||||
|
||||
return False
|
||||
|
||||
async def get_subject_stats(
|
||||
self,
|
||||
student_id: str
|
||||
) -> Dict[str, Dict[str, Any]]:
|
||||
"""Get per-subject statistics for a student."""
|
||||
await self._ensure_connected()
|
||||
if not self._pool:
|
||||
return {}
|
||||
|
||||
try:
|
||||
async with self._pool.acquire() as conn:
|
||||
rows = await conn.fetch(
|
||||
"""
|
||||
SELECT
|
||||
qa.subject,
|
||||
COUNT(*) as total,
|
||||
SUM(CASE WHEN qa.is_correct THEN 1 ELSE 0 END) as correct,
|
||||
AVG(qa.answer_time_ms) as avg_time_ms
|
||||
FROM game_quiz_answers qa
|
||||
JOIN game_sessions gs ON qa.session_id = gs.id
|
||||
WHERE gs.student_id = $1
|
||||
GROUP BY qa.subject
|
||||
""",
|
||||
student_id
|
||||
)
|
||||
|
||||
return {
|
||||
row["subject"]: {
|
||||
"total": row["total"],
|
||||
"correct": row["correct"],
|
||||
"accuracy": row["correct"] / row["total"] if row["total"] > 0 else 0.0,
|
||||
"avg_time_ms": int(row["avg_time_ms"]) if row["avg_time_ms"] else 0,
|
||||
}
|
||||
for row in rows
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get subject stats: {e}")
|
||||
|
||||
return {}
|
||||
Reference in New Issue
Block a user