Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 27s
CI / test-go-edu-search (push) Successful in 40s
CI / test-python-klausur (push) Failing after 2m30s
CI / test-python-agent-core (push) Successful in 28s
CI / test-nodejs-website (push) Successful in 20s
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
128 lines
3.2 KiB
Python
128 lines
3.2 KiB
Python
"""
|
|
Unit Analytics API - Pydantic Models.
|
|
|
|
Data models for learning gains, stop performance, misconceptions,
|
|
student progress, class comparison, and export.
|
|
"""
|
|
|
|
from typing import List, Optional, Dict, Any
|
|
from datetime import datetime
|
|
from enum import Enum
|
|
|
|
from pydantic import BaseModel, Field
|
|
|
|
|
|
class TimeRange(str, Enum):
|
|
"""Time range for analytics queries"""
|
|
WEEK = "week"
|
|
MONTH = "month"
|
|
QUARTER = "quarter"
|
|
ALL = "all"
|
|
|
|
|
|
class LearningGainData(BaseModel):
|
|
"""Pre/Post learning gain data point"""
|
|
student_id: str
|
|
student_name: str
|
|
unit_id: str
|
|
precheck_score: float
|
|
postcheck_score: float
|
|
learning_gain: float
|
|
percentile: Optional[float] = None
|
|
|
|
|
|
class LearningGainSummary(BaseModel):
|
|
"""Aggregated learning gain statistics"""
|
|
unit_id: str
|
|
unit_title: str
|
|
total_students: int
|
|
avg_precheck: float
|
|
avg_postcheck: float
|
|
avg_gain: float
|
|
median_gain: float
|
|
std_deviation: float
|
|
positive_gain_count: int
|
|
negative_gain_count: int
|
|
no_change_count: int
|
|
gain_distribution: Dict[str, int]
|
|
individual_gains: List[LearningGainData]
|
|
|
|
|
|
class StopPerformance(BaseModel):
|
|
"""Performance data for a single stop"""
|
|
stop_id: str
|
|
stop_label: str
|
|
attempts_total: int
|
|
success_rate: float
|
|
avg_time_seconds: float
|
|
avg_attempts_before_success: float
|
|
common_errors: List[str]
|
|
difficulty_rating: float # 1-5 based on performance
|
|
|
|
|
|
class UnitPerformanceDetail(BaseModel):
|
|
"""Detailed unit performance breakdown"""
|
|
unit_id: str
|
|
unit_title: str
|
|
template: str
|
|
total_sessions: int
|
|
completed_sessions: int
|
|
completion_rate: float
|
|
avg_duration_minutes: float
|
|
stops: List[StopPerformance]
|
|
bottleneck_stops: List[str] # Stops where students struggle most
|
|
|
|
|
|
class MisconceptionEntry(BaseModel):
|
|
"""Individual misconception tracking"""
|
|
concept_id: str
|
|
concept_label: str
|
|
misconception_text: str
|
|
frequency: int
|
|
affected_student_ids: List[str]
|
|
unit_id: str
|
|
stop_id: str
|
|
detected_via: str # "precheck", "postcheck", "interaction"
|
|
first_detected: datetime
|
|
last_detected: datetime
|
|
|
|
|
|
class MisconceptionReport(BaseModel):
|
|
"""Comprehensive misconception report"""
|
|
class_id: Optional[str]
|
|
time_range: str
|
|
total_misconceptions: int
|
|
unique_concepts: int
|
|
most_common: List[MisconceptionEntry]
|
|
by_unit: Dict[str, List[MisconceptionEntry]]
|
|
trending_up: List[MisconceptionEntry] # Getting more frequent
|
|
resolved: List[MisconceptionEntry] # No longer appearing
|
|
|
|
|
|
class StudentProgressTimeline(BaseModel):
|
|
"""Timeline of student progress"""
|
|
student_id: str
|
|
student_name: str
|
|
units_completed: int
|
|
total_time_minutes: int
|
|
avg_score: float
|
|
trend: str # "improving", "stable", "declining"
|
|
timeline: List[Dict[str, Any]] # List of session events
|
|
|
|
|
|
class ClassComparisonData(BaseModel):
|
|
"""Data for comparing class performance"""
|
|
class_id: str
|
|
class_name: str
|
|
student_count: int
|
|
units_assigned: int
|
|
avg_completion_rate: float
|
|
avg_learning_gain: float
|
|
avg_time_per_unit: float
|
|
|
|
|
|
class ExportFormat(str, Enum):
|
|
"""Export format options"""
|
|
JSON = "json"
|
|
CSV = "csv"
|