Restructure: Move 43 files into 8 domain packages (backend-lehrer)
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 27s
CI / test-go-edu-search (push) Successful in 40s
CI / test-python-klausur (push) Failing after 2m30s
CI / test-python-agent-core (push) Successful in 28s
CI / test-nodejs-website (push) Successful in 20s

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-04-25 22:32:45 +02:00
parent 165c493d1e
commit dde45b29db
93 changed files with 9469 additions and 9290 deletions

View File

@@ -0,0 +1 @@
# units — Learning units, analytics, definitions, content generation.

View File

@@ -0,0 +1,25 @@
"""
Breakpilot Drive - Unit Analytics API — Barrel Re-export.
Erweiterte Analytics fuer Lernfortschritt:
- Pre/Post Gain Visualisierung
- Misconception-Tracking
- Stop-Level Analytics
- Aggregierte Klassen-Statistiken
- Export-Funktionen
Split into:
- unit_analytics_models.py: Pydantic models & enums
- unit_analytics_helpers.py: Database access & computation helpers
- unit_analytics_routes.py: Core analytics endpoint handlers
- unit_analytics_export.py: Export & dashboard endpoints
"""
from fastapi import APIRouter
from .analytics_routes import router as _routes_router
from .analytics_export import router as _export_router
router = APIRouter(prefix="/api/analytics", tags=["Unit Analytics"])
router.include_router(_routes_router)
router.include_router(_export_router)

View File

@@ -0,0 +1,145 @@
"""
Unit Analytics API - Export & Dashboard Routes.
Export endpoints for learning gains and misconceptions, plus dashboard overview.
"""
import logging
from datetime import datetime
from typing import Optional, Dict, Any
from fastapi import APIRouter, Query
from fastapi.responses import Response
from .analytics_models import TimeRange, ExportFormat
from .analytics_helpers import get_analytics_database
logger = logging.getLogger(__name__)
router = APIRouter(tags=["Unit Analytics"])
# ==============================================
# API Endpoints - Export
# ==============================================
@router.get("/export/learning-gains")
async def export_learning_gains(
unit_id: Optional[str] = Query(None),
class_id: Optional[str] = Query(None),
time_range: TimeRange = Query(TimeRange.ALL),
format: ExportFormat = Query(ExportFormat.JSON),
) -> Any:
"""
Export learning gain data.
"""
db = await get_analytics_database()
data = []
if db:
try:
data = await db.export_learning_gains(
unit_id=unit_id, class_id=class_id, time_range=time_range.value
)
except Exception as e:
logger.error(f"Failed to export data: {e}")
if format == ExportFormat.CSV:
if not data:
csv_content = "student_id,unit_id,precheck,postcheck,gain\n"
else:
csv_content = "student_id,unit_id,precheck,postcheck,gain\n"
for row in data:
csv_content += f"{row['student_id']},{row['unit_id']},{row.get('precheck', '')},{row.get('postcheck', '')},{row.get('gain', '')}\n"
return Response(
content=csv_content,
media_type="text/csv",
headers={"Content-Disposition": "attachment; filename=learning_gains.csv"}
)
return {
"export_date": datetime.utcnow().isoformat(),
"filters": {
"unit_id": unit_id, "class_id": class_id, "time_range": time_range.value,
},
"data": data,
}
@router.get("/export/misconceptions")
async def export_misconceptions(
class_id: Optional[str] = Query(None),
format: ExportFormat = Query(ExportFormat.JSON),
) -> Any:
"""
Export misconception data for further analysis.
"""
# Import here to avoid circular dependency
from .analytics_routes import get_misconception_report
report = await get_misconception_report(
class_id=class_id, unit_id=None,
time_range=TimeRange.MONTH, limit=100
)
if format == ExportFormat.CSV:
csv_content = "concept_id,concept_label,misconception,frequency,unit_id,stop_id\n"
for m in report.most_common:
csv_content += f'"{m.concept_id}","{m.concept_label}","{m.misconception_text}",{m.frequency},"{m.unit_id}","{m.stop_id}"\n'
return Response(
content=csv_content,
media_type="text/csv",
headers={"Content-Disposition": "attachment; filename=misconceptions.csv"}
)
return {
"export_date": datetime.utcnow().isoformat(),
"class_id": class_id,
"total_entries": len(report.most_common),
"data": [m.model_dump() for m in report.most_common],
}
# ==============================================
# API Endpoints - Dashboard Aggregates
# ==============================================
@router.get("/dashboard/overview")
async def get_analytics_overview(
time_range: TimeRange = Query(TimeRange.MONTH),
) -> Dict[str, Any]:
"""
Get high-level analytics overview for dashboard.
"""
db = await get_analytics_database()
if db:
try:
overview = await db.get_analytics_overview(time_range.value)
return overview
except Exception as e:
logger.error(f"Failed to get analytics overview: {e}")
return {
"time_range": time_range.value,
"total_sessions": 0,
"unique_students": 0,
"avg_completion_rate": 0.0,
"avg_learning_gain": 0.0,
"most_played_units": [],
"struggling_concepts": [],
"active_classes": 0,
}
@router.get("/health")
async def health_check() -> Dict[str, Any]:
"""Health check for analytics API."""
db = await get_analytics_database()
return {
"status": "healthy",
"service": "unit-analytics",
"database": "connected" if db else "disconnected",
}

View File

@@ -0,0 +1,97 @@
"""
Unit Analytics API - Helpers.
Database access, statistical computation, and utility functions.
"""
import os
import logging
from typing import List, Dict, Optional
logger = logging.getLogger(__name__)
# Feature flags
USE_DATABASE = os.getenv("GAME_USE_DATABASE", "true").lower() == "true"
# Database singleton
_analytics_db = None
async def get_analytics_database():
"""Get analytics database instance."""
global _analytics_db
if not USE_DATABASE:
return None
if _analytics_db is None:
try:
from unit.database import get_analytics_db
_analytics_db = await get_analytics_db()
logger.info("Analytics database initialized")
except ImportError:
logger.warning("Analytics database module not available")
except Exception as e:
logger.warning(f"Analytics database not available: {e}")
return _analytics_db
def calculate_gain_distribution(gains: List[float]) -> Dict[str, int]:
"""Calculate distribution of learning gains into buckets."""
distribution = {
"< -20%": 0,
"-20% to -10%": 0,
"-10% to 0%": 0,
"0% to 10%": 0,
"10% to 20%": 0,
"> 20%": 0,
}
for gain in gains:
gain_percent = gain * 100
if gain_percent < -20:
distribution["< -20%"] += 1
elif gain_percent < -10:
distribution["-20% to -10%"] += 1
elif gain_percent < 0:
distribution["-10% to 0%"] += 1
elif gain_percent < 10:
distribution["0% to 10%"] += 1
elif gain_percent < 20:
distribution["10% to 20%"] += 1
else:
distribution["> 20%"] += 1
return distribution
def calculate_trend(scores: List[float]) -> str:
"""Calculate trend from a series of scores."""
if len(scores) < 3:
return "insufficient_data"
# Simple linear regression
n = len(scores)
x_mean = (n - 1) / 2
y_mean = sum(scores) / n
numerator = sum((i - x_mean) * (scores[i] - y_mean) for i in range(n))
denominator = sum((i - x_mean) ** 2 for i in range(n))
if denominator == 0:
return "stable"
slope = numerator / denominator
if slope > 0.05:
return "improving"
elif slope < -0.05:
return "declining"
else:
return "stable"
def calculate_difficulty_rating(success_rate: float, avg_attempts: float) -> float:
"""Calculate difficulty rating 1-5 based on success metrics."""
# Lower success rate and higher attempts = higher difficulty
base_difficulty = (1 - success_rate) * 3 + 1 # 1-4 range
attempt_modifier = min(avg_attempts - 1, 1) # 0-1 range
return min(5.0, base_difficulty + attempt_modifier)

View File

@@ -0,0 +1,127 @@
"""
Unit Analytics API - Pydantic Models.
Data models for learning gains, stop performance, misconceptions,
student progress, class comparison, and export.
"""
from typing import List, Optional, Dict, Any
from datetime import datetime
from enum import Enum
from pydantic import BaseModel, Field
class TimeRange(str, Enum):
"""Time range for analytics queries"""
WEEK = "week"
MONTH = "month"
QUARTER = "quarter"
ALL = "all"
class LearningGainData(BaseModel):
"""Pre/Post learning gain data point"""
student_id: str
student_name: str
unit_id: str
precheck_score: float
postcheck_score: float
learning_gain: float
percentile: Optional[float] = None
class LearningGainSummary(BaseModel):
"""Aggregated learning gain statistics"""
unit_id: str
unit_title: str
total_students: int
avg_precheck: float
avg_postcheck: float
avg_gain: float
median_gain: float
std_deviation: float
positive_gain_count: int
negative_gain_count: int
no_change_count: int
gain_distribution: Dict[str, int]
individual_gains: List[LearningGainData]
class StopPerformance(BaseModel):
"""Performance data for a single stop"""
stop_id: str
stop_label: str
attempts_total: int
success_rate: float
avg_time_seconds: float
avg_attempts_before_success: float
common_errors: List[str]
difficulty_rating: float # 1-5 based on performance
class UnitPerformanceDetail(BaseModel):
"""Detailed unit performance breakdown"""
unit_id: str
unit_title: str
template: str
total_sessions: int
completed_sessions: int
completion_rate: float
avg_duration_minutes: float
stops: List[StopPerformance]
bottleneck_stops: List[str] # Stops where students struggle most
class MisconceptionEntry(BaseModel):
"""Individual misconception tracking"""
concept_id: str
concept_label: str
misconception_text: str
frequency: int
affected_student_ids: List[str]
unit_id: str
stop_id: str
detected_via: str # "precheck", "postcheck", "interaction"
first_detected: datetime
last_detected: datetime
class MisconceptionReport(BaseModel):
"""Comprehensive misconception report"""
class_id: Optional[str]
time_range: str
total_misconceptions: int
unique_concepts: int
most_common: List[MisconceptionEntry]
by_unit: Dict[str, List[MisconceptionEntry]]
trending_up: List[MisconceptionEntry] # Getting more frequent
resolved: List[MisconceptionEntry] # No longer appearing
class StudentProgressTimeline(BaseModel):
"""Timeline of student progress"""
student_id: str
student_name: str
units_completed: int
total_time_minutes: int
avg_score: float
trend: str # "improving", "stable", "declining"
timeline: List[Dict[str, Any]] # List of session events
class ClassComparisonData(BaseModel):
"""Data for comparing class performance"""
class_id: str
class_name: str
student_count: int
units_assigned: int
avg_completion_rate: float
avg_learning_gain: float
avg_time_per_unit: float
class ExportFormat(str, Enum):
"""Export format options"""
JSON = "json"
CSV = "csv"

View File

@@ -0,0 +1,394 @@
"""
Unit Analytics API - Routes.
All API endpoints for learning gain, stop-level, misconception,
student timeline, class comparison, export, and dashboard analytics.
"""
import logging
import statistics
from datetime import datetime
from typing import Optional, Dict, Any, List
from fastapi import APIRouter, Query
from .analytics_models import (
TimeRange,
LearningGainData,
LearningGainSummary,
StopPerformance,
UnitPerformanceDetail,
MisconceptionEntry,
MisconceptionReport,
StudentProgressTimeline,
ClassComparisonData,
)
from .analytics_helpers import (
get_analytics_database,
calculate_gain_distribution,
calculate_trend,
calculate_difficulty_rating,
)
logger = logging.getLogger(__name__)
router = APIRouter(tags=["Unit Analytics"])
# ==============================================
# API Endpoints - Learning Gain
# ==============================================
# NOTE: Static routes must come BEFORE dynamic routes like /{unit_id}
@router.get("/learning-gain/compare")
async def compare_learning_gains(
unit_ids: str = Query(..., description="Comma-separated unit IDs"),
class_id: Optional[str] = Query(None),
time_range: TimeRange = Query(TimeRange.MONTH),
) -> Dict[str, Any]:
"""
Compare learning gains across multiple units.
"""
unit_list = [u.strip() for u in unit_ids.split(",")]
comparisons = []
for unit_id in unit_list:
try:
summary = await get_learning_gain_analysis(unit_id, class_id, time_range)
comparisons.append({
"unit_id": unit_id,
"avg_gain": summary.avg_gain,
"median_gain": summary.median_gain,
"total_students": summary.total_students,
"positive_rate": summary.positive_gain_count / max(summary.total_students, 1),
})
except Exception as e:
logger.error(f"Failed to get comparison for {unit_id}: {e}")
return {
"time_range": time_range.value,
"class_id": class_id,
"comparisons": sorted(comparisons, key=lambda x: x["avg_gain"], reverse=True),
}
@router.get("/learning-gain/{unit_id}", response_model=LearningGainSummary)
async def get_learning_gain_analysis(
unit_id: str,
class_id: Optional[str] = Query(None, description="Filter by class"),
time_range: TimeRange = Query(TimeRange.MONTH, description="Time range for analysis"),
) -> LearningGainSummary:
"""
Get detailed pre/post learning gain analysis for a unit.
"""
db = await get_analytics_database()
individual_gains = []
if db:
try:
sessions = await db.get_unit_sessions_with_scores(
unit_id=unit_id,
class_id=class_id,
time_range=time_range.value
)
for session in sessions:
if session.get("precheck_score") is not None and session.get("postcheck_score") is not None:
gain = session["postcheck_score"] - session["precheck_score"]
individual_gains.append(LearningGainData(
student_id=session["student_id"],
student_name=session.get("student_name", session["student_id"][:8]),
unit_id=unit_id,
precheck_score=session["precheck_score"],
postcheck_score=session["postcheck_score"],
learning_gain=gain,
))
except Exception as e:
logger.error(f"Failed to get learning gain data: {e}")
# Calculate statistics
if not individual_gains:
return LearningGainSummary(
unit_id=unit_id,
unit_title=f"Unit {unit_id}",
total_students=0,
avg_precheck=0.0, avg_postcheck=0.0,
avg_gain=0.0, median_gain=0.0, std_deviation=0.0,
positive_gain_count=0, negative_gain_count=0, no_change_count=0,
gain_distribution={}, individual_gains=[],
)
gains = [g.learning_gain for g in individual_gains]
prechecks = [g.precheck_score for g in individual_gains]
postchecks = [g.postcheck_score for g in individual_gains]
avg_gain = statistics.mean(gains)
median_gain = statistics.median(gains)
std_dev = statistics.stdev(gains) if len(gains) > 1 else 0.0
# Calculate percentiles
sorted_gains = sorted(gains)
for data in individual_gains:
rank = sorted_gains.index(data.learning_gain) + 1
data.percentile = rank / len(sorted_gains) * 100
return LearningGainSummary(
unit_id=unit_id,
unit_title=f"Unit {unit_id}",
total_students=len(individual_gains),
avg_precheck=statistics.mean(prechecks),
avg_postcheck=statistics.mean(postchecks),
avg_gain=avg_gain,
median_gain=median_gain,
std_deviation=std_dev,
positive_gain_count=sum(1 for g in gains if g > 0.01),
negative_gain_count=sum(1 for g in gains if g < -0.01),
no_change_count=sum(1 for g in gains if -0.01 <= g <= 0.01),
gain_distribution=calculate_gain_distribution(gains),
individual_gains=sorted(individual_gains, key=lambda x: x.learning_gain, reverse=True),
)
# ==============================================
# API Endpoints - Stop-Level Analytics
# ==============================================
@router.get("/unit/{unit_id}/stops", response_model=UnitPerformanceDetail)
async def get_unit_stop_analytics(
unit_id: str,
class_id: Optional[str] = Query(None),
time_range: TimeRange = Query(TimeRange.MONTH),
) -> UnitPerformanceDetail:
"""
Get detailed stop-level performance analytics.
"""
db = await get_analytics_database()
stops_data = []
if db:
try:
stop_stats = await db.get_stop_performance(
unit_id=unit_id, class_id=class_id, time_range=time_range.value
)
for stop in stop_stats:
difficulty = calculate_difficulty_rating(
stop.get("success_rate", 0.5),
stop.get("avg_attempts", 1.0)
)
stops_data.append(StopPerformance(
stop_id=stop["stop_id"],
stop_label=stop.get("stop_label", stop["stop_id"]),
attempts_total=stop.get("total_attempts", 0),
success_rate=stop.get("success_rate", 0.0),
avg_time_seconds=stop.get("avg_time_seconds", 0.0),
avg_attempts_before_success=stop.get("avg_attempts", 1.0),
common_errors=stop.get("common_errors", []),
difficulty_rating=difficulty,
))
unit_stats = await db.get_unit_overall_stats(unit_id, class_id, time_range.value)
except Exception as e:
logger.error(f"Failed to get stop analytics: {e}")
unit_stats = {}
else:
unit_stats = {}
# Identify bottleneck stops
bottlenecks = [
s.stop_id for s in stops_data
if s.difficulty_rating > 3.5 or s.success_rate < 0.6
]
return UnitPerformanceDetail(
unit_id=unit_id,
unit_title=f"Unit {unit_id}",
template=unit_stats.get("template", "unknown"),
total_sessions=unit_stats.get("total_sessions", 0),
completed_sessions=unit_stats.get("completed_sessions", 0),
completion_rate=unit_stats.get("completion_rate", 0.0),
avg_duration_minutes=unit_stats.get("avg_duration_minutes", 0.0),
stops=stops_data,
bottleneck_stops=bottlenecks,
)
# ==============================================
# API Endpoints - Misconception Tracking
# ==============================================
@router.get("/misconceptions", response_model=MisconceptionReport)
async def get_misconception_report(
class_id: Optional[str] = Query(None),
unit_id: Optional[str] = Query(None),
time_range: TimeRange = Query(TimeRange.MONTH),
limit: int = Query(20, ge=1, le=100),
) -> MisconceptionReport:
"""
Get comprehensive misconception report.
"""
db = await get_analytics_database()
misconceptions = []
if db:
try:
raw_misconceptions = await db.get_misconceptions(
class_id=class_id, unit_id=unit_id,
time_range=time_range.value, limit=limit
)
for m in raw_misconceptions:
misconceptions.append(MisconceptionEntry(
concept_id=m["concept_id"],
concept_label=m["concept_label"],
misconception_text=m["misconception_text"],
frequency=m["frequency"],
affected_student_ids=m.get("student_ids", []),
unit_id=m["unit_id"],
stop_id=m["stop_id"],
detected_via=m.get("detected_via", "unknown"),
first_detected=m.get("first_detected", datetime.utcnow()),
last_detected=m.get("last_detected", datetime.utcnow()),
))
except Exception as e:
logger.error(f"Failed to get misconceptions: {e}")
# Group by unit
by_unit = {}
for m in misconceptions:
if m.unit_id not in by_unit:
by_unit[m.unit_id] = []
by_unit[m.unit_id].append(m)
trending_up = misconceptions[:3] if misconceptions else []
resolved = []
return MisconceptionReport(
class_id=class_id,
time_range=time_range.value,
total_misconceptions=sum(m.frequency for m in misconceptions),
unique_concepts=len(set(m.concept_id for m in misconceptions)),
most_common=sorted(misconceptions, key=lambda x: x.frequency, reverse=True)[:10],
by_unit=by_unit,
trending_up=trending_up,
resolved=resolved,
)
@router.get("/misconceptions/student/{student_id}")
async def get_student_misconceptions(
student_id: str,
time_range: TimeRange = Query(TimeRange.ALL),
) -> Dict[str, Any]:
"""
Get misconceptions for a specific student.
"""
db = await get_analytics_database()
if db:
try:
misconceptions = await db.get_student_misconceptions(
student_id=student_id, time_range=time_range.value
)
return {
"student_id": student_id,
"misconceptions": misconceptions,
"recommended_remediation": [
{"concept": m["concept_label"], "activity": f"Review {m['unit_id']}/{m['stop_id']}"}
for m in misconceptions[:5]
]
}
except Exception as e:
logger.error(f"Failed to get student misconceptions: {e}")
return {
"student_id": student_id,
"misconceptions": [],
"recommended_remediation": [],
}
# ==============================================
# API Endpoints - Student Progress Timeline
# ==============================================
@router.get("/student/{student_id}/timeline", response_model=StudentProgressTimeline)
async def get_student_timeline(
student_id: str,
time_range: TimeRange = Query(TimeRange.ALL),
) -> StudentProgressTimeline:
"""
Get detailed progress timeline for a student.
"""
db = await get_analytics_database()
timeline = []
scores = []
if db:
try:
sessions = await db.get_student_sessions(
student_id=student_id, time_range=time_range.value
)
for session in sessions:
timeline.append({
"date": session.get("started_at"),
"unit_id": session.get("unit_id"),
"completed": session.get("completed_at") is not None,
"precheck": session.get("precheck_score"),
"postcheck": session.get("postcheck_score"),
"duration_minutes": session.get("duration_seconds", 0) // 60,
})
if session.get("postcheck_score") is not None:
scores.append(session["postcheck_score"])
except Exception as e:
logger.error(f"Failed to get student timeline: {e}")
trend = calculate_trend(scores) if scores else "insufficient_data"
return StudentProgressTimeline(
student_id=student_id,
student_name=f"Student {student_id[:8]}",
units_completed=sum(1 for t in timeline if t["completed"]),
total_time_minutes=sum(t["duration_minutes"] for t in timeline),
avg_score=statistics.mean(scores) if scores else 0.0,
trend=trend,
timeline=timeline,
)
# ==============================================
# API Endpoints - Class Comparison
# ==============================================
@router.get("/compare/classes", response_model=List[ClassComparisonData])
async def compare_classes(
class_ids: str = Query(..., description="Comma-separated class IDs"),
time_range: TimeRange = Query(TimeRange.MONTH),
) -> List[ClassComparisonData]:
"""
Compare performance across multiple classes.
"""
class_list = [c.strip() for c in class_ids.split(",")]
comparisons = []
db = await get_analytics_database()
if db:
for class_id in class_list:
try:
stats = await db.get_class_aggregate_stats(class_id, time_range.value)
comparisons.append(ClassComparisonData(
class_id=class_id,
class_name=stats.get("class_name", f"Klasse {class_id[:8]}"),
student_count=stats.get("student_count", 0),
units_assigned=stats.get("units_assigned", 0),
avg_completion_rate=stats.get("avg_completion_rate", 0.0),
avg_learning_gain=stats.get("avg_learning_gain", 0.0),
avg_time_per_unit=stats.get("avg_time_per_unit", 0.0),
))
except Exception as e:
logger.error(f"Failed to get stats for class {class_id}: {e}")
return sorted(comparisons, key=lambda x: x.avg_learning_gain, reverse=True)

View File

@@ -0,0 +1,57 @@
# ==============================================
# Breakpilot Drive - Unit API (barrel re-export)
# ==============================================
# This module was split into:
# - unit_models.py (Pydantic models)
# - unit_helpers.py (Auth, DB, token, validation helpers)
# - unit_routes.py (Definition, session, analytics routes)
# - unit_content_routes.py (H5P, worksheet, PDF routes)
#
# The `router` object is assembled here by including all sub-routers.
# Importers that did `from unit_api import router` continue to work.
from fastapi import APIRouter
from .routes import router as _routes_router
from .definition_routes import router as _definition_router
from .content_routes import router as _content_router
# Re-export models for any direct importers
from .models import ( # noqa: F401
UnitDefinitionResponse,
CreateSessionRequest,
SessionResponse,
TelemetryEvent,
TelemetryPayload,
TelemetryResponse,
PostcheckAnswer,
CompleteSessionRequest,
SessionSummaryResponse,
UnitListItem,
RecommendedUnit,
CreateUnitRequest,
UpdateUnitRequest,
ValidationError,
ValidationResult,
)
# Re-export helpers for any direct importers
from .helpers import ( # noqa: F401
get_optional_current_user,
get_unit_database,
create_session_token,
verify_session_token,
get_session_from_token,
validate_unit_definition,
USE_DATABASE,
REQUIRE_AUTH,
SECRET_KEY,
)
# Assemble the combined router.
# _routes_router and _content_router both use prefix="/api/units",
# so we create a plain router and include them without extra prefix.
router = APIRouter()
router.include_router(_routes_router)
router.include_router(_definition_router)
router.include_router(_content_router)

View File

@@ -0,0 +1,160 @@
# ==============================================
# Breakpilot Drive - Unit Content Generation Routes
# ==============================================
# API endpoints for H5P content, worksheets, and PDF generation.
# Extracted from unit_api.py for file-size compliance.
from fastapi import APIRouter, HTTPException, Query, Depends
from typing import Optional, Dict, Any
import logging
from .models import UnitDefinitionResponse
from .helpers import get_optional_current_user, get_unit_database
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/units", tags=["Breakpilot Units"])
@router.get("/content/{unit_id}/h5p")
async def generate_h5p_content(
unit_id: str,
locale: str = Query("de-DE", description="Target locale"),
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> Dict[str, Any]:
"""
Generate H5P content items for a unit.
Returns H5P-compatible content structures for:
- Drag and Drop (vocabulary matching)
- Fill in the Blanks (concept texts)
- Multiple Choice (misconception targeting)
"""
from content_generators import generate_h5p_for_unit, H5PGenerator, generate_h5p_manifest
# Get unit definition
db = await get_unit_database()
unit_def = None
if db:
try:
unit = await db.get_unit_definition(unit_id)
if unit:
unit_def = unit.get("definition", {})
except Exception as e:
logger.error(f"Failed to get unit for H5P generation: {e}")
if not unit_def:
raise HTTPException(status_code=404, detail=f"Unit not found: {unit_id}")
try:
generator = H5PGenerator(locale=locale)
contents = generator.generate_from_unit(unit_def)
manifest = generate_h5p_manifest(contents, unit_id)
return {
"unit_id": unit_id,
"locale": locale,
"generated_count": len(contents),
"manifest": manifest,
"contents": [c.to_h5p_structure() for c in contents]
}
except Exception as e:
logger.error(f"H5P generation failed: {e}")
raise HTTPException(status_code=500, detail=f"H5P generation failed: {str(e)}")
@router.get("/content/{unit_id}/worksheet")
async def generate_worksheet_html(
unit_id: str,
locale: str = Query("de-DE", description="Target locale"),
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> Dict[str, Any]:
"""
Generate worksheet HTML for a unit.
Returns HTML that can be:
- Displayed in browser
- Converted to PDF using weasyprint
- Printed directly
"""
from content_generators import PDFGenerator
# Get unit definition
db = await get_unit_database()
unit_def = None
if db:
try:
unit = await db.get_unit_definition(unit_id)
if unit:
unit_def = unit.get("definition", {})
except Exception as e:
logger.error(f"Failed to get unit for worksheet generation: {e}")
if not unit_def:
raise HTTPException(status_code=404, detail=f"Unit not found: {unit_id}")
try:
generator = PDFGenerator(locale=locale)
worksheet = generator.generate_from_unit(unit_def)
return {
"unit_id": unit_id,
"locale": locale,
"title": worksheet.title,
"sections": len(worksheet.sections),
"html": worksheet.to_html()
}
except Exception as e:
logger.error(f"Worksheet generation failed: {e}")
raise HTTPException(status_code=500, detail=f"Worksheet generation failed: {str(e)}")
@router.get("/content/{unit_id}/worksheet.pdf")
async def download_worksheet_pdf(
unit_id: str,
locale: str = Query("de-DE", description="Target locale"),
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
):
"""
Generate and download worksheet as PDF.
Requires weasyprint to be installed on the server.
"""
from fastapi.responses import Response
# Get unit definition
db = await get_unit_database()
unit_def = None
if db:
try:
unit = await db.get_unit_definition(unit_id)
if unit:
unit_def = unit.get("definition", {})
except Exception as e:
logger.error(f"Failed to get unit for PDF generation: {e}")
if not unit_def:
raise HTTPException(status_code=404, detail=f"Unit not found: {unit_id}")
try:
from content_generators import generate_worksheet_pdf
pdf_bytes = generate_worksheet_pdf(unit_def, locale)
return Response(
content=pdf_bytes,
media_type="application/pdf",
headers={
"Content-Disposition": f'attachment; filename="{unit_id}_worksheet.pdf"'
}
)
except ImportError:
raise HTTPException(
status_code=501,
detail="PDF generation not available. Install weasyprint: pip install weasyprint"
)
except Exception as e:
logger.error(f"PDF generation failed: {e}")
raise HTTPException(status_code=500, detail=f"PDF generation failed: {str(e)}")

View File

@@ -0,0 +1,301 @@
# ==============================================
# Breakpilot Drive - Unit Definition CRUD Routes
# ==============================================
# Endpoints for creating, updating, deleting, and validating
# unit definitions. Extracted from unit_routes.py for file-size compliance.
from fastapi import APIRouter, HTTPException, Query, Depends
from typing import Optional, Dict, Any
from datetime import datetime
import logging
from .models import (
UnitDefinitionResponse,
CreateUnitRequest,
UpdateUnitRequest,
ValidationResult,
)
from .helpers import (
get_optional_current_user,
get_unit_database,
validate_unit_definition,
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/units", tags=["Breakpilot Units"])
@router.post("/definitions", response_model=UnitDefinitionResponse)
async def create_unit_definition(
request_data: CreateUnitRequest,
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> UnitDefinitionResponse:
"""
Create a new unit definition.
- Validates unit structure
- Saves to database or JSON file
- Returns created unit
"""
import json
from pathlib import Path
# Build full definition
definition = {
"unit_id": request_data.unit_id,
"template": request_data.template,
"version": request_data.version,
"locale": request_data.locale,
"grade_band": request_data.grade_band,
"duration_minutes": request_data.duration_minutes,
"difficulty": request_data.difficulty,
"subject": request_data.subject,
"topic": request_data.topic,
"learning_objectives": request_data.learning_objectives,
"stops": request_data.stops,
"precheck": request_data.precheck or {
"question_set_id": f"{request_data.unit_id}_precheck",
"required": True,
"time_limit_seconds": 120
},
"postcheck": request_data.postcheck or {
"question_set_id": f"{request_data.unit_id}_postcheck",
"required": True,
"time_limit_seconds": 180
},
"teacher_controls": request_data.teacher_controls or {
"allow_skip": True,
"allow_replay": True,
"max_time_per_stop_sec": 90,
"show_hints": True,
"require_precheck": True,
"require_postcheck": True
},
"assets": request_data.assets or {},
"metadata": request_data.metadata or {
"author": user.get("email", "Unknown") if user else "Unknown",
"created": datetime.utcnow().isoformat(),
"curriculum_reference": ""
}
}
# Validate
validation = validate_unit_definition(definition)
if not validation.valid:
error_msgs = [f"{e.field}: {e.message}" for e in validation.errors]
raise HTTPException(status_code=400, detail=f"Validierung fehlgeschlagen: {'; '.join(error_msgs)}")
# Check if unit_id already exists
db = await get_unit_database()
if db:
try:
existing = await db.get_unit_definition(request_data.unit_id)
if existing:
raise HTTPException(status_code=409, detail=f"Unit existiert bereits: {request_data.unit_id}")
# Save to database
await db.create_unit_definition(
unit_id=request_data.unit_id,
template=request_data.template,
version=request_data.version,
locale=request_data.locale,
grade_band=request_data.grade_band,
duration_minutes=request_data.duration_minutes,
difficulty=request_data.difficulty,
definition=definition,
status=request_data.status
)
logger.info(f"Unit created in database: {request_data.unit_id}")
except HTTPException:
raise
except Exception as e:
logger.warning(f"Database save failed, using JSON fallback: {e}")
# Fallback to JSON
units_dir = Path(__file__).parent / "data" / "units"
units_dir.mkdir(parents=True, exist_ok=True)
json_path = units_dir / f"{request_data.unit_id}.json"
if json_path.exists():
raise HTTPException(status_code=409, detail=f"Unit existiert bereits: {request_data.unit_id}")
with open(json_path, "w", encoding="utf-8") as f:
json.dump(definition, f, ensure_ascii=False, indent=2)
logger.info(f"Unit created as JSON: {json_path}")
else:
# JSON only mode
units_dir = Path(__file__).parent / "data" / "units"
units_dir.mkdir(parents=True, exist_ok=True)
json_path = units_dir / f"{request_data.unit_id}.json"
if json_path.exists():
raise HTTPException(status_code=409, detail=f"Unit existiert bereits: {request_data.unit_id}")
with open(json_path, "w", encoding="utf-8") as f:
json.dump(definition, f, ensure_ascii=False, indent=2)
logger.info(f"Unit created as JSON: {json_path}")
return UnitDefinitionResponse(
unit_id=request_data.unit_id,
template=request_data.template,
version=request_data.version,
locale=request_data.locale,
grade_band=request_data.grade_band,
duration_minutes=request_data.duration_minutes,
difficulty=request_data.difficulty,
definition=definition
)
@router.put("/definitions/{unit_id}", response_model=UnitDefinitionResponse)
async def update_unit_definition(
unit_id: str,
request_data: UpdateUnitRequest,
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> UnitDefinitionResponse:
"""
Update an existing unit definition.
- Merges updates with existing definition
- Re-validates
- Saves updated version
"""
import json
from pathlib import Path
# Get existing unit
db = await get_unit_database()
existing = None
if db:
try:
existing = await db.get_unit_definition(unit_id)
except Exception as e:
logger.warning(f"Database read failed: {e}")
if not existing:
# Try JSON file
json_path = Path(__file__).parent / "data" / "units" / f"{unit_id}.json"
if json_path.exists():
with open(json_path, "r", encoding="utf-8") as f:
file_data = json.load(f)
existing = {
"unit_id": file_data.get("unit_id"),
"template": file_data.get("template"),
"version": file_data.get("version", "1.0.0"),
"locale": file_data.get("locale", ["de-DE"]),
"grade_band": file_data.get("grade_band", []),
"duration_minutes": file_data.get("duration_minutes", 8),
"difficulty": file_data.get("difficulty", "base"),
"definition": file_data
}
if not existing:
raise HTTPException(status_code=404, detail=f"Unit nicht gefunden: {unit_id}")
# Merge updates into existing definition
definition = existing.get("definition", {})
update_dict = request_data.model_dump(exclude_unset=True)
for key, value in update_dict.items():
if value is not None:
definition[key] = value
# Validate updated definition
validation = validate_unit_definition(definition)
if not validation.valid:
error_msgs = [f"{e.field}: {e.message}" for e in validation.errors]
raise HTTPException(status_code=400, detail=f"Validierung fehlgeschlagen: {'; '.join(error_msgs)}")
# Save
if db:
try:
await db.update_unit_definition(
unit_id=unit_id,
version=definition.get("version"),
locale=definition.get("locale"),
grade_band=definition.get("grade_band"),
duration_minutes=definition.get("duration_minutes"),
difficulty=definition.get("difficulty"),
definition=definition,
status=update_dict.get("status")
)
logger.info(f"Unit updated in database: {unit_id}")
except Exception as e:
logger.warning(f"Database update failed, using JSON: {e}")
json_path = Path(__file__).parent / "data" / "units" / f"{unit_id}.json"
with open(json_path, "w", encoding="utf-8") as f:
json.dump(definition, f, ensure_ascii=False, indent=2)
else:
json_path = Path(__file__).parent / "data" / "units" / f"{unit_id}.json"
with open(json_path, "w", encoding="utf-8") as f:
json.dump(definition, f, ensure_ascii=False, indent=2)
logger.info(f"Unit updated as JSON: {json_path}")
return UnitDefinitionResponse(
unit_id=unit_id,
template=definition.get("template", existing.get("template")),
version=definition.get("version", existing.get("version", "1.0.0")),
locale=definition.get("locale", existing.get("locale", ["de-DE"])),
grade_band=definition.get("grade_band", existing.get("grade_band", [])),
duration_minutes=definition.get("duration_minutes", existing.get("duration_minutes", 8)),
difficulty=definition.get("difficulty", existing.get("difficulty", "base")),
definition=definition
)
@router.delete("/definitions/{unit_id}")
async def delete_unit_definition(
unit_id: str,
force: bool = Query(False, description="Force delete even if published"),
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> Dict[str, Any]:
"""
Delete a unit definition.
- By default, only drafts can be deleted
- Use force=true to delete published units
"""
from pathlib import Path
db = await get_unit_database()
deleted = False
if db:
try:
existing = await db.get_unit_definition(unit_id)
if existing:
status = existing.get("status", "draft")
if status == "published" and not force:
raise HTTPException(
status_code=400,
detail="Veroeffentlichte Units koennen nicht geloescht werden. Verwende force=true."
)
await db.delete_unit_definition(unit_id)
deleted = True
logger.info(f"Unit deleted from database: {unit_id}")
except HTTPException:
raise
except Exception as e:
logger.warning(f"Database delete failed: {e}")
# Also check JSON file
json_path = Path(__file__).parent / "data" / "units" / f"{unit_id}.json"
if json_path.exists():
json_path.unlink()
deleted = True
logger.info(f"Unit JSON deleted: {json_path}")
if not deleted:
raise HTTPException(status_code=404, detail=f"Unit nicht gefunden: {unit_id}")
return {"success": True, "unit_id": unit_id, "message": "Unit geloescht"}
@router.post("/definitions/validate", response_model=ValidationResult)
async def validate_unit(
unit_data: Dict[str, Any],
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> ValidationResult:
"""
Validate a unit definition without saving.
Returns validation result with errors and warnings.
"""
return validate_unit_definition(unit_data)

View File

@@ -0,0 +1,204 @@
# ==============================================
# Breakpilot Drive - Unit API Helpers
# ==============================================
# Auth, database, token, and validation helpers for the Unit API.
# Extracted from unit_api.py for file-size compliance.
from fastapi import HTTPException, Request
from typing import Optional, Dict, Any, List
from datetime import datetime, timedelta
import os
import logging
import jwt
from .models import ValidationError, ValidationResult
logger = logging.getLogger(__name__)
# Feature flags
USE_DATABASE = os.getenv("GAME_USE_DATABASE", "true").lower() == "true"
REQUIRE_AUTH = os.getenv("GAME_REQUIRE_AUTH", "false").lower() == "true"
SECRET_KEY = os.getenv("JWT_SECRET_KEY", "dev-secret-key-change-in-production")
# ==============================================
# Auth Dependency (reuse from game_api)
# ==============================================
async def get_optional_current_user(request: Request) -> Optional[Dict[str, Any]]:
"""Optional auth dependency for Unit API."""
if not REQUIRE_AUTH:
return None
try:
from auth import get_current_user
return await get_current_user(request)
except ImportError:
logger.warning("Auth module not available")
return None
except HTTPException:
raise
except Exception as e:
logger.error(f"Auth error: {e}")
raise HTTPException(status_code=401, detail="Authentication failed")
# ==============================================
# Database Integration
# ==============================================
_unit_db = None
async def get_unit_database():
"""Get unit database instance with lazy initialization."""
global _unit_db
if not USE_DATABASE:
return None
if _unit_db is None:
try:
from unit.database import get_unit_db
_unit_db = await get_unit_db()
logger.info("Unit database initialized")
except ImportError:
logger.warning("Unit database module not available")
except Exception as e:
logger.warning(f"Unit database not available: {e}")
return _unit_db
# ==============================================
# Token Helpers
# ==============================================
def create_session_token(session_id: str, student_id: str, expires_hours: int = 4) -> str:
"""Create a JWT session token for telemetry authentication."""
payload = {
"session_id": session_id,
"student_id": student_id,
"exp": datetime.utcnow() + timedelta(hours=expires_hours),
"iat": datetime.utcnow(),
}
return jwt.encode(payload, SECRET_KEY, algorithm="HS256")
def verify_session_token(token: str) -> Optional[Dict[str, Any]]:
"""Verify a session token and return payload."""
try:
return jwt.decode(token, SECRET_KEY, algorithms=["HS256"])
except jwt.ExpiredSignatureError:
return None
except jwt.InvalidTokenError:
return None
async def get_session_from_token(request: Request) -> Optional[Dict[str, Any]]:
"""Extract and verify session from Authorization header."""
auth_header = request.headers.get("Authorization", "")
if not auth_header.startswith("Bearer "):
return None
token = auth_header[7:]
return verify_session_token(token)
# ==============================================
# Validation
# ==============================================
def validate_unit_definition(unit_data: Dict[str, Any]) -> ValidationResult:
"""
Validate a unit definition structure.
Returns validation result with errors and warnings.
"""
errors: List[ValidationError] = []
warnings: List[ValidationError] = []
# Required fields
if not unit_data.get("unit_id"):
errors.append(ValidationError(field="unit_id", message="unit_id ist erforderlich"))
if not unit_data.get("template"):
errors.append(ValidationError(field="template", message="template ist erforderlich"))
elif unit_data["template"] not in ["flight_path", "station_loop"]:
errors.append(ValidationError(
field="template",
message="template muss 'flight_path' oder 'station_loop' sein"
))
# Validate stops
stops = unit_data.get("stops", [])
if not stops:
errors.append(ValidationError(field="stops", message="Mindestens 1 Stop erforderlich"))
else:
# Check minimum stops for flight_path
if unit_data.get("template") == "flight_path" and len(stops) < 3:
warnings.append(ValidationError(
field="stops",
message="FlightPath sollte mindestens 3 Stops haben",
severity="warning"
))
# Validate each stop
stop_ids = set()
for i, stop in enumerate(stops):
if not stop.get("stop_id"):
errors.append(ValidationError(
field=f"stops[{i}].stop_id",
message=f"Stop {i}: stop_id fehlt"
))
else:
if stop["stop_id"] in stop_ids:
errors.append(ValidationError(
field=f"stops[{i}].stop_id",
message=f"Stop {i}: Doppelte stop_id '{stop['stop_id']}'"
))
stop_ids.add(stop["stop_id"])
# Check interaction type
interaction = stop.get("interaction", {})
if not interaction.get("type"):
errors.append(ValidationError(
field=f"stops[{i}].interaction.type",
message=f"Stop {stop.get('stop_id', i)}: Interaktionstyp fehlt"
))
elif interaction["type"] not in [
"aim_and_pass", "slider_adjust", "slider_equivalence",
"sequence_arrange", "toggle_switch", "drag_match",
"error_find", "transfer_apply"
]:
warnings.append(ValidationError(
field=f"stops[{i}].interaction.type",
message=f"Stop {stop.get('stop_id', i)}: Unbekannter Interaktionstyp '{interaction['type']}'",
severity="warning"
))
# Check for label
if not stop.get("label"):
warnings.append(ValidationError(
field=f"stops[{i}].label",
message=f"Stop {stop.get('stop_id', i)}: Label fehlt",
severity="warning"
))
# Validate duration
duration = unit_data.get("duration_minutes", 0)
if duration < 3 or duration > 20:
warnings.append(ValidationError(
field="duration_minutes",
message="Dauer sollte zwischen 3 und 20 Minuten liegen",
severity="warning"
))
# Validate difficulty
if unit_data.get("difficulty") and unit_data["difficulty"] not in ["base", "advanced"]:
warnings.append(ValidationError(
field="difficulty",
message="difficulty sollte 'base' oder 'advanced' sein",
severity="warning"
))
return ValidationResult(
valid=len(errors) == 0,
errors=errors,
warnings=warnings
)

View File

@@ -0,0 +1,178 @@
from __future__ import annotations
from pydantic import BaseModel, Field
from typing import List, Dict, Optional
from pathlib import Path
from datetime import datetime
import uuid
import json
import threading
# Basisverzeichnis für Arbeitsblätter & Lerneinheiten
BASE_DIR = Path.home() / "Arbeitsblaetter"
LEARNING_UNITS_DIR = BASE_DIR / "Lerneinheiten"
LEARNING_UNITS_FILE = LEARNING_UNITS_DIR / "learning_units.json"
# Thread-Lock, damit Dateizugriffe sicher bleiben
_lock = threading.Lock()
class LearningUnitBase(BaseModel):
title: str = Field(..., description="Titel der Lerneinheit, z.B. 'Das Auge Klasse 7'")
description: Optional[str] = Field(None, description="Freitext-Beschreibung")
topic: Optional[str] = Field(None, description="Kurz-Thema, z.B. 'Auge'")
grade_level: Optional[str] = Field(None, description="Klassenstufe, z.B. '7'")
language: Optional[str] = Field("de", description="Hauptsprache der Lerneinheit (z.B. 'de', 'tr')")
worksheet_files: List[str] = Field(
default_factory=list,
description="Liste der zugeordneten Arbeitsblatt-Dateien (Basenames oder Pfade)"
)
status: str = Field(
"raw",
description="Pipeline-Status: raw, cleaned, qa_generated, mc_generated, cloze_generated"
)
class LearningUnitCreate(LearningUnitBase):
"""Payload zum Erstellen einer neuen Lerneinheit."""
pass
class LearningUnitUpdate(BaseModel):
"""Teil-Update für eine Lerneinheit."""
title: Optional[str] = None
description: Optional[str] = None
topic: Optional[str] = None
grade_level: Optional[str] = None
language: Optional[str] = None
worksheet_files: Optional[List[str]] = None
status: Optional[str] = None
class LearningUnit(LearningUnitBase):
id: str
created_at: datetime
updated_at: datetime
@classmethod
def from_dict(cls, data: Dict) -> "LearningUnit":
data = data.copy()
if isinstance(data.get("created_at"), str):
data["created_at"] = datetime.fromisoformat(data["created_at"])
if isinstance(data.get("updated_at"), str):
data["updated_at"] = datetime.fromisoformat(data["updated_at"])
return cls(**data)
def to_dict(self) -> Dict:
d = self.dict()
d["created_at"] = self.created_at.isoformat()
d["updated_at"] = self.updated_at.isoformat()
return d
def _ensure_storage():
"""Sorgt dafür, dass der Ordner und die JSON-Datei existieren."""
LEARNING_UNITS_DIR.mkdir(parents=True, exist_ok=True)
if not LEARNING_UNITS_FILE.exists():
with LEARNING_UNITS_FILE.open("w", encoding="utf-8") as f:
json.dump({}, f)
def _load_all_units() -> Dict[str, Dict]:
_ensure_storage()
with LEARNING_UNITS_FILE.open("r", encoding="utf-8") as f:
try:
data = json.load(f)
if not isinstance(data, dict):
return {}
return data
except json.JSONDecodeError:
return {}
def _save_all_units(raw: Dict[str, Dict]) -> None:
_ensure_storage()
with LEARNING_UNITS_FILE.open("w", encoding="utf-8") as f:
json.dump(raw, f, ensure_ascii=False, indent=2)
def list_learning_units() -> List[LearningUnit]:
with _lock:
raw = _load_all_units()
return [LearningUnit.from_dict(v) for v in raw.values()]
def get_learning_unit(unit_id: str) -> Optional[LearningUnit]:
with _lock:
raw = _load_all_units()
data = raw.get(unit_id)
if not data:
return None
return LearningUnit.from_dict(data)
def create_learning_unit(payload: LearningUnitCreate) -> LearningUnit:
now = datetime.utcnow()
lu = LearningUnit(
id=str(uuid.uuid4()),
created_at=now,
updated_at=now,
**payload.dict()
)
with _lock:
raw = _load_all_units()
raw[lu.id] = lu.to_dict()
_save_all_units(raw)
return lu
def update_learning_unit(unit_id: str, payload: LearningUnitUpdate) -> Optional[LearningUnit]:
with _lock:
raw = _load_all_units()
existing = raw.get(unit_id)
if not existing:
return None
lu = LearningUnit.from_dict(existing)
update_data = payload.dict(exclude_unset=True)
for field, value in update_data.items():
setattr(lu, field, value)
lu.updated_at = datetime.utcnow()
raw[lu.id] = lu.to_dict()
_save_all_units(raw)
return lu
def delete_learning_unit(unit_id: str) -> bool:
with _lock:
raw = _load_all_units()
if unit_id not in raw:
return False
del raw[unit_id]
_save_all_units(raw)
return True
def attach_worksheets(unit_id: str, worksheet_files: List[str]) -> Optional[LearningUnit]:
"""
Hängt eine Liste von Arbeitsblatt-Dateien an eine bestehende Lerneinheit an.
Doppelte Einträge werden vermieden.
"""
with _lock:
raw = _load_all_units()
existing = raw.get(unit_id)
if not existing:
return None
lu = LearningUnit.from_dict(existing)
current_set = set(lu.worksheet_files)
for f in worksheet_files:
current_set.add(f)
lu.worksheet_files = sorted(current_set)
lu.updated_at = datetime.utcnow()
raw[lu.id] = lu.to_dict()
_save_all_units(raw)
return lu

View File

@@ -0,0 +1,376 @@
from typing import List, Dict, Any, Optional
from datetime import datetime
from pathlib import Path
import json
import os
import logging
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from .learning import (
LearningUnit,
LearningUnitCreate,
LearningUnitUpdate,
list_learning_units,
get_learning_unit,
create_learning_unit,
update_learning_unit,
delete_learning_unit,
)
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/learning-units",
tags=["learning-units"],
)
# ---------- Payload-Modelle für das Frontend ----------
class LearningUnitCreatePayload(BaseModel):
"""
Payload so, wie er aus dem Frontend kommt:
{
"student": "...",
"subject": "...",
"title": "...",
"grade": "7a"
}
"""
student: Optional[str] = None
subject: Optional[str] = None
title: Optional[str] = None
grade: Optional[str] = None
class AttachWorksheetsPayload(BaseModel):
worksheet_files: List[str]
class RemoveWorksheetPayload(BaseModel):
worksheet_file: str
class GenerateFromAnalysisPayload(BaseModel):
analysis_data: Dict[str, Any]
num_questions: int = 8
# ---------- Hilfsfunktion: Backend-Modell -> Frontend-Objekt ----------
def unit_to_frontend_dict(lu: LearningUnit) -> Dict[str, Any]:
"""
Wandelt eine LearningUnit in das Format um, das das Frontend erwartet.
Wichtig sind:
- id
- label (sichtbarer Name)
- meta (Untertitelzeile)
- worksheet_files (Liste von Dateinamen)
"""
label = lu.title or "Lerneinheit"
# Meta-Text: z.B. "Thema: Auge · Klasse: 7a · angelegt am 10.12.2025"
meta_parts: List[str] = []
if lu.topic:
meta_parts.append(f"Thema: {lu.topic}")
if lu.grade_level:
meta_parts.append(f"Klasse: {lu.grade_level}")
created_str = lu.created_at.strftime("%d.%m.%Y")
meta_parts.append(f"angelegt am {created_str}")
meta = " · ".join(meta_parts)
return {
"id": lu.id,
"label": label,
"meta": meta,
"title": lu.title,
"topic": lu.topic,
"grade_level": lu.grade_level,
"language": lu.language,
"status": lu.status,
"worksheet_files": lu.worksheet_files,
"created_at": lu.created_at.isoformat(),
"updated_at": lu.updated_at.isoformat(),
}
# ---------- Endpunkte ----------
@router.get("/", response_model=List[Dict[str, Any]])
def api_list_learning_units():
"""Alle Lerneinheiten für das Frontend auflisten."""
units = list_learning_units()
return [unit_to_frontend_dict(u) for u in units]
@router.post("/", response_model=Dict[str, Any])
def api_create_learning_unit(payload: LearningUnitCreatePayload):
"""
Neue Lerneinheit anlegen.
Mapped das Frontend-Payload (student/subject/title/grade)
auf das generische LearningUnit-Modell.
"""
# Mindestens eines der Felder muss gesetzt sein
if not (payload.student or payload.subject or payload.title):
raise HTTPException(
status_code=400,
detail="Bitte mindestens Schüler/in, Fach oder Thema angeben.",
)
# Titel/Topic bestimmen
# sichtbarer Titel: bevorzugt Thema (title), sonst Kombination
if payload.title:
title = payload.title
else:
parts = []
if payload.subject:
parts.append(payload.subject)
if payload.student:
parts.append(payload.student)
title = " ".join(parts) if parts else "Lerneinheit"
topic = payload.title or payload.subject or None
grade_level = payload.grade or None
lu_create = LearningUnitCreate(
title=title,
description=None,
topic=topic,
grade_level=grade_level,
language="de",
worksheet_files=[],
status="raw",
)
lu = create_learning_unit(lu_create)
return unit_to_frontend_dict(lu)
@router.post("/{unit_id}/attach-worksheets", response_model=Dict[str, Any])
def api_attach_worksheets(unit_id: str, payload: AttachWorksheetsPayload):
"""
Fügt der Lerneinheit eine oder mehrere Arbeitsblätter hinzu.
"""
lu = get_learning_unit(unit_id)
if not lu:
raise HTTPException(status_code=404, detail="Lerneinheit nicht gefunden.")
files_to_add = [f for f in payload.worksheet_files if f not in lu.worksheet_files]
if files_to_add:
new_list = lu.worksheet_files + files_to_add
update = LearningUnitUpdate(worksheet_files=new_list)
lu = update_learning_unit(unit_id, update)
if not lu:
raise HTTPException(status_code=500, detail="Lerneinheit konnte nicht aktualisiert werden.")
return unit_to_frontend_dict(lu)
@router.post("/{unit_id}/remove-worksheet", response_model=Dict[str, Any])
def api_remove_worksheet(unit_id: str, payload: RemoveWorksheetPayload):
"""
Entfernt genau ein Arbeitsblatt aus der Lerneinheit.
"""
lu = get_learning_unit(unit_id)
if not lu:
raise HTTPException(status_code=404, detail="Lerneinheit nicht gefunden.")
if payload.worksheet_file not in lu.worksheet_files:
# Nichts zu tun, aber kein Fehler einfach unverändert zurückgeben
return unit_to_frontend_dict(lu)
new_list = [f for f in lu.worksheet_files if f != payload.worksheet_file]
update = LearningUnitUpdate(worksheet_files=new_list)
lu = update_learning_unit(unit_id, update)
if not lu:
raise HTTPException(status_code=500, detail="Lerneinheit konnte nicht aktualisiert werden.")
return unit_to_frontend_dict(lu)
@router.delete("/{unit_id}")
def api_delete_learning_unit(unit_id: str):
"""
Lerneinheit komplett löschen (aktuell vom Frontend noch nicht verwendet).
"""
ok = delete_learning_unit(unit_id)
if not ok:
raise HTTPException(status_code=404, detail="Lerneinheit nicht gefunden.")
return {"status": "deleted", "id": unit_id}
# ---------- Generator-Endpunkte ----------
LERNEINHEITEN_DIR = os.path.expanduser("~/Arbeitsblaetter/Lerneinheiten")
def _save_analysis_and_get_path(unit_id: str, analysis_data: Dict[str, Any]) -> Path:
"""Save analysis_data to disk and return the path."""
os.makedirs(LERNEINHEITEN_DIR, exist_ok=True)
path = Path(LERNEINHEITEN_DIR) / f"{unit_id}_analyse.json"
with open(path, "w", encoding="utf-8") as f:
json.dump(analysis_data, f, ensure_ascii=False, indent=2)
return path
@router.post("/{unit_id}/generate-qa")
def api_generate_qa(unit_id: str, payload: GenerateFromAnalysisPayload):
"""Generate Q&A items with Leitner fields from analysis data."""
lu = get_learning_unit(unit_id)
if not lu:
raise HTTPException(status_code=404, detail="Lerneinheit nicht gefunden.")
analysis_path = _save_analysis_and_get_path(unit_id, payload.analysis_data)
try:
from ai_processing.qa_generator import generate_qa_from_analysis
qa_path = generate_qa_from_analysis(analysis_path, num_questions=payload.num_questions)
with open(qa_path, "r", encoding="utf-8") as f:
qa_data = json.load(f)
# Update unit status
update_learning_unit(unit_id, LearningUnitUpdate(status="qa_generated"))
logger.info(f"Generated QA for unit {unit_id}: {len(qa_data.get('qa_items', []))} items")
return qa_data
except Exception as e:
logger.error(f"QA generation failed for {unit_id}: {e}")
raise HTTPException(status_code=500, detail=f"QA-Generierung fehlgeschlagen: {e}")
@router.post("/{unit_id}/generate-mc")
def api_generate_mc(unit_id: str, payload: GenerateFromAnalysisPayload):
"""Generate multiple choice questions from analysis data."""
lu = get_learning_unit(unit_id)
if not lu:
raise HTTPException(status_code=404, detail="Lerneinheit nicht gefunden.")
analysis_path = _save_analysis_and_get_path(unit_id, payload.analysis_data)
try:
from ai_processing.mc_generator import generate_mc_from_analysis
mc_path = generate_mc_from_analysis(analysis_path, num_questions=payload.num_questions)
with open(mc_path, "r", encoding="utf-8") as f:
mc_data = json.load(f)
update_learning_unit(unit_id, LearningUnitUpdate(status="mc_generated"))
logger.info(f"Generated MC for unit {unit_id}: {len(mc_data.get('questions', []))} questions")
return mc_data
except Exception as e:
logger.error(f"MC generation failed for {unit_id}: {e}")
raise HTTPException(status_code=500, detail=f"MC-Generierung fehlgeschlagen: {e}")
@router.post("/{unit_id}/generate-cloze")
def api_generate_cloze(unit_id: str, payload: GenerateFromAnalysisPayload):
"""Generate cloze (fill-in-the-blank) items from analysis data."""
lu = get_learning_unit(unit_id)
if not lu:
raise HTTPException(status_code=404, detail="Lerneinheit nicht gefunden.")
analysis_path = _save_analysis_and_get_path(unit_id, payload.analysis_data)
try:
from ai_processing.cloze_generator import generate_cloze_from_analysis
cloze_path = generate_cloze_from_analysis(analysis_path)
with open(cloze_path, "r", encoding="utf-8") as f:
cloze_data = json.load(f)
update_learning_unit(unit_id, LearningUnitUpdate(status="cloze_generated"))
logger.info(f"Generated Cloze for unit {unit_id}: {len(cloze_data.get('cloze_items', []))} items")
return cloze_data
except Exception as e:
logger.error(f"Cloze generation failed for {unit_id}: {e}")
raise HTTPException(status_code=500, detail=f"Cloze-Generierung fehlgeschlagen: {e}")
@router.get("/{unit_id}/qa")
def api_get_qa(unit_id: str):
"""Get generated QA items for a unit."""
qa_path = Path(LERNEINHEITEN_DIR) / f"{unit_id}_qa.json"
if not qa_path.exists():
raise HTTPException(status_code=404, detail="Keine QA-Daten gefunden.")
with open(qa_path, "r", encoding="utf-8") as f:
return json.load(f)
@router.get("/{unit_id}/mc")
def api_get_mc(unit_id: str):
"""Get generated MC questions for a unit."""
mc_path = Path(LERNEINHEITEN_DIR) / f"{unit_id}_mc.json"
if not mc_path.exists():
raise HTTPException(status_code=404, detail="Keine MC-Daten gefunden.")
with open(mc_path, "r", encoding="utf-8") as f:
return json.load(f)
@router.get("/{unit_id}/cloze")
def api_get_cloze(unit_id: str):
"""Get generated cloze items for a unit."""
cloze_path = Path(LERNEINHEITEN_DIR) / f"{unit_id}_cloze.json"
if not cloze_path.exists():
raise HTTPException(status_code=404, detail="Keine Cloze-Daten gefunden.")
with open(cloze_path, "r", encoding="utf-8") as f:
return json.load(f)
@router.post("/{unit_id}/leitner/update")
def api_update_leitner(unit_id: str, item_id: str, correct: bool):
"""Update Leitner progress for a QA item."""
qa_path = Path(LERNEINHEITEN_DIR) / f"{unit_id}_qa.json"
if not qa_path.exists():
raise HTTPException(status_code=404, detail="Keine QA-Daten gefunden.")
try:
from ai_processing.qa_generator import update_leitner_progress
result = update_leitner_progress(qa_path, item_id, correct)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/{unit_id}/leitner/next")
def api_get_next_review(unit_id: str, limit: int = 5):
"""Get next Leitner review items."""
qa_path = Path(LERNEINHEITEN_DIR) / f"{unit_id}_qa.json"
if not qa_path.exists():
raise HTTPException(status_code=404, detail="Keine QA-Daten gefunden.")
try:
from ai_processing.qa_generator import get_next_review_items
items = get_next_review_items(qa_path, limit=limit)
return {"items": items, "count": len(items)}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
class StoryGeneratePayload(BaseModel):
vocabulary: List[Dict[str, Any]]
language: str = "en"
grade_level: str = "5-8"
@router.post("/{unit_id}/generate-story")
def api_generate_story(unit_id: str, payload: StoryGeneratePayload):
"""Generate a short story using vocabulary words."""
lu = get_learning_unit(unit_id)
if not lu:
raise HTTPException(status_code=404, detail="Lerneinheit nicht gefunden.")
try:
from story_generator import generate_story
result = generate_story(
vocabulary=payload.vocabulary,
language=payload.language,
grade_level=payload.grade_level,
)
return result
except Exception as e:
logger.error(f"Story generation failed for {unit_id}: {e}")
raise HTTPException(status_code=500, detail=f"Story-Generierung fehlgeschlagen: {e}")

View File

@@ -0,0 +1,149 @@
# ==============================================
# Breakpilot Drive - Unit API Models
# ==============================================
# Pydantic models for the Unit API.
# Extracted from unit_api.py for file-size compliance.
from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any
from datetime import datetime
class UnitDefinitionResponse(BaseModel):
"""Unit definition response"""
unit_id: str
template: str
version: str
locale: List[str]
grade_band: List[str]
duration_minutes: int
difficulty: str
definition: Dict[str, Any]
class CreateSessionRequest(BaseModel):
"""Request to create a unit session"""
unit_id: str
student_id: str
locale: str = "de-DE"
difficulty: str = "base"
class SessionResponse(BaseModel):
"""Response after creating a session"""
session_id: str
unit_definition_url: str
session_token: str
telemetry_endpoint: str
expires_at: datetime
class TelemetryEvent(BaseModel):
"""Single telemetry event"""
ts: Optional[str] = None
type: str = Field(..., alias="type")
stop_id: Optional[str] = None
metrics: Optional[Dict[str, Any]] = None
class Config:
populate_by_name = True
class TelemetryPayload(BaseModel):
"""Batch telemetry payload"""
session_id: str
events: List[TelemetryEvent]
class TelemetryResponse(BaseModel):
"""Response after receiving telemetry"""
accepted: int
class PostcheckAnswer(BaseModel):
"""Single postcheck answer"""
question_id: str
answer: str
class CompleteSessionRequest(BaseModel):
"""Request to complete a session"""
postcheck_answers: Optional[List[PostcheckAnswer]] = None
class SessionSummaryResponse(BaseModel):
"""Response with session summary"""
summary: Dict[str, Any]
next_recommendations: Dict[str, Any]
class UnitListItem(BaseModel):
"""Unit list item"""
unit_id: str
template: str
difficulty: str
duration_minutes: int
locale: List[str]
grade_band: List[str]
class RecommendedUnit(BaseModel):
"""Recommended unit with reason"""
unit_id: str
template: str
difficulty: str
reason: str
class CreateUnitRequest(BaseModel):
"""Request to create a new unit definition"""
unit_id: str = Field(..., description="Unique unit identifier")
template: str = Field(..., description="Template type: flight_path or station_loop")
version: str = Field(default="1.0.0", description="Version string")
locale: List[str] = Field(default=["de-DE"], description="Supported locales")
grade_band: List[str] = Field(default=["5", "6", "7"], description="Target grade levels")
duration_minutes: int = Field(default=8, ge=3, le=20, description="Expected duration")
difficulty: str = Field(default="base", description="Difficulty level: base or advanced")
subject: Optional[str] = Field(default=None, description="Subject area")
topic: Optional[str] = Field(default=None, description="Topic within subject")
learning_objectives: List[str] = Field(default=[], description="Learning objectives")
stops: List[Dict[str, Any]] = Field(default=[], description="Unit stops/stations")
precheck: Optional[Dict[str, Any]] = Field(default=None, description="Pre-check configuration")
postcheck: Optional[Dict[str, Any]] = Field(default=None, description="Post-check configuration")
teacher_controls: Optional[Dict[str, Any]] = Field(default=None, description="Teacher control settings")
assets: Optional[Dict[str, Any]] = Field(default=None, description="Asset configuration")
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Additional metadata")
status: str = Field(default="draft", description="Publication status: draft or published")
class UpdateUnitRequest(BaseModel):
"""Request to update an existing unit definition"""
version: Optional[str] = None
locale: Optional[List[str]] = None
grade_band: Optional[List[str]] = None
duration_minutes: Optional[int] = Field(default=None, ge=3, le=20)
difficulty: Optional[str] = None
subject: Optional[str] = None
topic: Optional[str] = None
learning_objectives: Optional[List[str]] = None
stops: Optional[List[Dict[str, Any]]] = None
precheck: Optional[Dict[str, Any]] = None
postcheck: Optional[Dict[str, Any]] = None
teacher_controls: Optional[Dict[str, Any]] = None
assets: Optional[Dict[str, Any]] = None
metadata: Optional[Dict[str, Any]] = None
status: Optional[str] = None
class ValidationError(BaseModel):
"""Single validation error"""
field: str
message: str
severity: str = "error" # error or warning
class ValidationResult(BaseModel):
"""Result of unit validation"""
valid: bool
errors: List[ValidationError] = []
warnings: List[ValidationError] = []

View File

@@ -0,0 +1,494 @@
# ==============================================
# Breakpilot Drive - Unit API Routes
# ==============================================
# Endpoints for listing/getting definitions, sessions, telemetry,
# recommendations, and analytics.
# CRUD definition routes are in unit_definition_routes.py.
# Extracted from unit_api.py for file-size compliance.
from fastapi import APIRouter, HTTPException, Query, Depends, Request
from typing import List, Optional, Dict, Any
from datetime import datetime, timedelta
import uuid
import logging
from .models import (
UnitDefinitionResponse,
CreateSessionRequest,
SessionResponse,
TelemetryPayload,
TelemetryResponse,
CompleteSessionRequest,
SessionSummaryResponse,
UnitListItem,
RecommendedUnit,
)
from .helpers import (
get_optional_current_user,
get_unit_database,
create_session_token,
get_session_from_token,
REQUIRE_AUTH,
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/units", tags=["Breakpilot Units"])
# ==============================================
# Definition List/Get Endpoints
# ==============================================
@router.get("/definitions", response_model=List[UnitListItem])
async def list_unit_definitions(
template: Optional[str] = Query(None, description="Filter by template: flight_path, station_loop"),
grade: Optional[str] = Query(None, description="Filter by grade level"),
locale: str = Query("de-DE", description="Filter by locale"),
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> List[UnitListItem]:
"""
List available unit definitions.
Returns published units matching the filter criteria.
"""
db = await get_unit_database()
if db:
try:
units = await db.list_units(
template=template,
grade=grade,
locale=locale,
published_only=True
)
return [
UnitListItem(
unit_id=u["unit_id"],
template=u["template"],
difficulty=u["difficulty"],
duration_minutes=u["duration_minutes"],
locale=u["locale"],
grade_band=u["grade_band"],
)
for u in units
]
except Exception as e:
logger.error(f"Failed to list units: {e}")
# Fallback: return demo unit
return [
UnitListItem(
unit_id="demo_unit_v1",
template="flight_path",
difficulty="base",
duration_minutes=5,
locale=["de-DE"],
grade_band=["5", "6", "7"],
)
]
@router.get("/definitions/{unit_id}", response_model=UnitDefinitionResponse)
async def get_unit_definition(
unit_id: str,
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> UnitDefinitionResponse:
"""
Get a specific unit definition.
Returns the full unit configuration including stops, interactions, etc.
"""
db = await get_unit_database()
if db:
try:
unit = await db.get_unit_definition(unit_id)
if unit:
return UnitDefinitionResponse(
unit_id=unit["unit_id"],
template=unit["template"],
version=unit["version"],
locale=unit["locale"],
grade_band=unit["grade_band"],
duration_minutes=unit["duration_minutes"],
difficulty=unit["difficulty"],
definition=unit["definition"],
)
except Exception as e:
logger.error(f"Failed to get unit definition: {e}")
# Demo unit fallback
if unit_id == "demo_unit_v1":
return UnitDefinitionResponse(
unit_id="demo_unit_v1",
template="flight_path",
version="1.0.0",
locale=["de-DE"],
grade_band=["5", "6", "7"],
duration_minutes=5,
difficulty="base",
definition={
"unit_id": "demo_unit_v1",
"template": "flight_path",
"version": "1.0.0",
"learning_objectives": ["Demo: Grundfunktion testen"],
"stops": [
{"stop_id": "stop_1", "label": {"de-DE": "Start"}, "interaction": {"type": "aim_and_pass"}},
{"stop_id": "stop_2", "label": {"de-DE": "Mitte"}, "interaction": {"type": "aim_and_pass"}},
{"stop_id": "stop_3", "label": {"de-DE": "Ende"}, "interaction": {"type": "aim_and_pass"}},
],
"teacher_controls": {"allow_skip": True, "allow_replay": True},
},
)
raise HTTPException(status_code=404, detail=f"Unit not found: {unit_id}")
# ==============================================
# Session Endpoints
# ==============================================
@router.post("/sessions", response_model=SessionResponse)
async def create_unit_session(
request_data: CreateSessionRequest,
request: Request,
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> SessionResponse:
"""
Create a new unit session.
- Validates unit exists
- Creates session record
- Returns session token for telemetry
"""
session_id = str(uuid.uuid4())
expires_at = datetime.utcnow() + timedelta(hours=4)
# Validate unit exists
db = await get_unit_database()
if db:
try:
unit = await db.get_unit_definition(request_data.unit_id)
if not unit:
raise HTTPException(status_code=404, detail=f"Unit not found: {request_data.unit_id}")
# Create session in database
total_stops = len(unit.get("definition", {}).get("stops", []))
await db.create_session(
session_id=session_id,
unit_id=request_data.unit_id,
student_id=request_data.student_id,
locale=request_data.locale,
difficulty=request_data.difficulty,
total_stops=total_stops,
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Failed to create session: {e}")
# Continue with in-memory fallback
# Create session token
session_token = create_session_token(session_id, request_data.student_id)
# Build definition URL
base_url = str(request.base_url).rstrip("/")
definition_url = f"{base_url}/api/units/definitions/{request_data.unit_id}"
return SessionResponse(
session_id=session_id,
unit_definition_url=definition_url,
session_token=session_token,
telemetry_endpoint="/api/units/telemetry",
expires_at=expires_at,
)
@router.post("/telemetry", response_model=TelemetryResponse)
async def receive_telemetry(
payload: TelemetryPayload,
request: Request,
) -> TelemetryResponse:
"""
Receive batched telemetry events from Unity client.
- Validates session token
- Stores events in database
- Returns count of accepted events
"""
# Verify session token
session_data = await get_session_from_token(request)
if session_data is None:
# Allow without auth in dev mode
if REQUIRE_AUTH:
raise HTTPException(status_code=401, detail="Invalid or expired session token")
logger.warning("Telemetry received without valid token (dev mode)")
# Verify session_id matches
if session_data and session_data.get("session_id") != payload.session_id:
raise HTTPException(status_code=403, detail="Session ID mismatch")
accepted = 0
db = await get_unit_database()
for event in payload.events:
try:
# Set timestamp if not provided
timestamp = event.ts or datetime.utcnow().isoformat()
if db:
await db.store_telemetry_event(
session_id=payload.session_id,
event_type=event.type,
stop_id=event.stop_id,
timestamp=timestamp,
metrics=event.metrics,
)
accepted += 1
logger.debug(f"Telemetry: {event.type} for session {payload.session_id}")
except Exception as e:
logger.error(f"Failed to store telemetry event: {e}")
return TelemetryResponse(accepted=accepted)
@router.post("/sessions/{session_id}/complete", response_model=SessionSummaryResponse)
async def complete_session(
session_id: str,
request_data: CompleteSessionRequest,
request: Request,
) -> SessionSummaryResponse:
"""
Complete a unit session.
- Processes postcheck answers if provided
- Calculates learning gain
- Returns summary and recommendations
"""
# Verify session token
session_data = await get_session_from_token(request)
if REQUIRE_AUTH and session_data is None:
raise HTTPException(status_code=401, detail="Invalid or expired session token")
db = await get_unit_database()
summary = {}
recommendations = {}
if db:
try:
# Get session data
session = await db.get_session(session_id)
if not session:
raise HTTPException(status_code=404, detail="Session not found")
# Calculate postcheck score if answers provided
postcheck_score = None
if request_data.postcheck_answers:
# Simple scoring: count correct answers
# In production, would validate against question bank
postcheck_score = len(request_data.postcheck_answers) * 0.2 # Placeholder
postcheck_score = min(postcheck_score, 1.0)
# Complete session in database
await db.complete_session(
session_id=session_id,
postcheck_score=postcheck_score,
)
# Get updated session summary
session = await db.get_session(session_id)
# Calculate learning gain
pre_score = session.get("precheck_score")
post_score = session.get("postcheck_score")
learning_gain = None
if pre_score is not None and post_score is not None:
learning_gain = post_score - pre_score
summary = {
"session_id": session_id,
"unit_id": session.get("unit_id"),
"duration_seconds": session.get("duration_seconds"),
"completion_rate": session.get("completion_rate"),
"precheck_score": pre_score,
"postcheck_score": post_score,
"pre_to_post_gain": learning_gain,
"stops_completed": session.get("stops_completed"),
"total_stops": session.get("total_stops"),
}
# Get recommendations
recommendations = await db.get_recommendations(
student_id=session.get("student_id"),
completed_unit_id=session.get("unit_id"),
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Failed to complete session: {e}")
summary = {"session_id": session_id, "error": str(e)}
else:
# Fallback summary
summary = {
"session_id": session_id,
"duration_seconds": 0,
"completion_rate": 1.0,
"message": "Database not available",
}
return SessionSummaryResponse(
summary=summary,
next_recommendations=recommendations or {
"h5p_activity_ids": [],
"worksheet_pdf_url": None,
},
)
@router.get("/sessions/{session_id}")
async def get_session(
session_id: str,
request: Request,
) -> Dict[str, Any]:
"""
Get session details.
Returns current state of a session including progress.
"""
# Verify session token
session_data = await get_session_from_token(request)
if REQUIRE_AUTH and session_data is None:
raise HTTPException(status_code=401, detail="Invalid or expired session token")
db = await get_unit_database()
if db:
try:
session = await db.get_session(session_id)
if session:
return session
except Exception as e:
logger.error(f"Failed to get session: {e}")
raise HTTPException(status_code=404, detail="Session not found")
# ==============================================
# Recommendations & Analytics
# ==============================================
@router.get("/recommendations/{student_id}", response_model=List[RecommendedUnit])
async def get_recommendations(
student_id: str,
grade: Optional[str] = Query(None, description="Grade level filter"),
locale: str = Query("de-DE", description="Locale filter"),
limit: int = Query(5, ge=1, le=20),
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> List[RecommendedUnit]:
"""
Get recommended units for a student.
Based on completion status and performance.
"""
db = await get_unit_database()
if db:
try:
recommendations = await db.get_student_recommendations(
student_id=student_id,
grade=grade,
locale=locale,
limit=limit,
)
return [
RecommendedUnit(
unit_id=r["unit_id"],
template=r["template"],
difficulty=r["difficulty"],
reason=r["reason"],
)
for r in recommendations
]
except Exception as e:
logger.error(f"Failed to get recommendations: {e}")
# Fallback: recommend demo unit
return [
RecommendedUnit(
unit_id="demo_unit_v1",
template="flight_path",
difficulty="base",
reason="Neu: Noch nicht gespielt",
)
]
@router.get("/analytics/student/{student_id}")
async def get_student_analytics(
student_id: str,
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> Dict[str, Any]:
"""
Get unit analytics for a student.
Includes completion rates, learning gains, time spent.
"""
db = await get_unit_database()
if db:
try:
analytics = await db.get_student_unit_analytics(student_id)
return analytics
except Exception as e:
logger.error(f"Failed to get analytics: {e}")
return {
"student_id": student_id,
"units_attempted": 0,
"units_completed": 0,
"avg_completion_rate": 0.0,
"avg_learning_gain": None,
"total_minutes": 0,
}
@router.get("/analytics/unit/{unit_id}")
async def get_unit_analytics(
unit_id: str,
user: Optional[Dict[str, Any]] = Depends(get_optional_current_user)
) -> Dict[str, Any]:
"""
Get analytics for a specific unit.
Shows aggregate performance across all students.
"""
db = await get_unit_database()
if db:
try:
analytics = await db.get_unit_performance(unit_id)
return analytics
except Exception as e:
logger.error(f"Failed to get unit analytics: {e}")
return {
"unit_id": unit_id,
"total_sessions": 0,
"completed_sessions": 0,
"completion_percent": 0.0,
"avg_duration_minutes": 0,
"avg_learning_gain": None,
}
@router.get("/health")
async def health_check() -> Dict[str, Any]:
"""Health check for unit API."""
db = await get_unit_database()
db_status = "connected" if db else "disconnected"
return {
"status": "healthy",
"service": "breakpilot-units",
"database": db_status,
"auth_required": REQUIRE_AUTH,
}