Files
breakpilot-lehrer/backend-lehrer/unit_analytics_models.py
Benjamin Admin 34da9f4cda [split-required] Split 700-870 LOC files across all services
backend-lehrer (11 files):
- llm_gateway/routes/schools.py (867 → 5), recording_api.py (848 → 6)
- messenger_api.py (840 → 5), print_generator.py (824 → 5)
- unit_analytics_api.py (751 → 5), classroom/routes/context.py (726 → 4)
- llm_gateway/routes/edu_search_seeds.py (710 → 4)

klausur-service (12 files):
- ocr_labeling_api.py (845 → 4), metrics_db.py (833 → 4)
- legal_corpus_api.py (790 → 4), page_crop.py (758 → 3)
- mail/ai_service.py (747 → 4), github_crawler.py (767 → 3)
- trocr_service.py (730 → 4), full_compliance_pipeline.py (723 → 4)
- dsfa_rag_api.py (715 → 4), ocr_pipeline_auto.py (705 → 4)

website (6 pages):
- audit-checklist (867 → 8), content (806 → 6)
- screen-flow (790 → 4), scraper (789 → 5)
- zeugnisse (776 → 5), modules (745 → 4)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-25 08:01:18 +02:00

128 lines
3.2 KiB
Python

"""
Unit Analytics API - Pydantic Models.
Data models for learning gains, stop performance, misconceptions,
student progress, class comparison, and export.
"""
from typing import List, Optional, Dict, Any
from datetime import datetime
from enum import Enum
from pydantic import BaseModel, Field
class TimeRange(str, Enum):
"""Time range for analytics queries"""
WEEK = "week"
MONTH = "month"
QUARTER = "quarter"
ALL = "all"
class LearningGainData(BaseModel):
"""Pre/Post learning gain data point"""
student_id: str
student_name: str
unit_id: str
precheck_score: float
postcheck_score: float
learning_gain: float
percentile: Optional[float] = None
class LearningGainSummary(BaseModel):
"""Aggregated learning gain statistics"""
unit_id: str
unit_title: str
total_students: int
avg_precheck: float
avg_postcheck: float
avg_gain: float
median_gain: float
std_deviation: float
positive_gain_count: int
negative_gain_count: int
no_change_count: int
gain_distribution: Dict[str, int]
individual_gains: List[LearningGainData]
class StopPerformance(BaseModel):
"""Performance data for a single stop"""
stop_id: str
stop_label: str
attempts_total: int
success_rate: float
avg_time_seconds: float
avg_attempts_before_success: float
common_errors: List[str]
difficulty_rating: float # 1-5 based on performance
class UnitPerformanceDetail(BaseModel):
"""Detailed unit performance breakdown"""
unit_id: str
unit_title: str
template: str
total_sessions: int
completed_sessions: int
completion_rate: float
avg_duration_minutes: float
stops: List[StopPerformance]
bottleneck_stops: List[str] # Stops where students struggle most
class MisconceptionEntry(BaseModel):
"""Individual misconception tracking"""
concept_id: str
concept_label: str
misconception_text: str
frequency: int
affected_student_ids: List[str]
unit_id: str
stop_id: str
detected_via: str # "precheck", "postcheck", "interaction"
first_detected: datetime
last_detected: datetime
class MisconceptionReport(BaseModel):
"""Comprehensive misconception report"""
class_id: Optional[str]
time_range: str
total_misconceptions: int
unique_concepts: int
most_common: List[MisconceptionEntry]
by_unit: Dict[str, List[MisconceptionEntry]]
trending_up: List[MisconceptionEntry] # Getting more frequent
resolved: List[MisconceptionEntry] # No longer appearing
class StudentProgressTimeline(BaseModel):
"""Timeline of student progress"""
student_id: str
student_name: str
units_completed: int
total_time_minutes: int
avg_score: float
trend: str # "improving", "stable", "declining"
timeline: List[Dict[str, Any]] # List of session events
class ClassComparisonData(BaseModel):
"""Data for comparing class performance"""
class_id: str
class_name: str
student_count: int
units_assigned: int
avg_completion_rate: float
avg_learning_gain: float
avg_time_per_unit: float
class ExportFormat(str, Enum):
"""Export format options"""
JSON = "json"
CSV = "csv"