A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
636 lines
22 KiB
Python
636 lines
22 KiB
Python
"""
|
|
Unit Tests for Unit Analytics API
|
|
Tests for learning gain, misconception tracking, and export endpoints
|
|
"""
|
|
import pytest
|
|
from unittest.mock import patch, AsyncMock, MagicMock
|
|
from fastapi.testclient import TestClient
|
|
from datetime import datetime, timedelta
|
|
import uuid
|
|
import sys
|
|
|
|
sys.path.insert(0, '..')
|
|
|
|
from unit_analytics_api import (
|
|
router,
|
|
TimeRange,
|
|
ExportFormat,
|
|
LearningGainData,
|
|
LearningGainSummary,
|
|
StopPerformance,
|
|
UnitPerformanceDetail,
|
|
MisconceptionEntry,
|
|
MisconceptionReport,
|
|
StudentProgressTimeline,
|
|
ClassComparisonData,
|
|
calculate_gain_distribution,
|
|
calculate_trend,
|
|
calculate_difficulty_rating,
|
|
)
|
|
from fastapi import FastAPI
|
|
|
|
# Create test app
|
|
app = FastAPI()
|
|
app.include_router(router)
|
|
client = TestClient(app)
|
|
|
|
|
|
class TestHelperFunctions:
|
|
"""Test helper functions"""
|
|
|
|
def test_calculate_gain_distribution_empty(self):
|
|
"""Test gain distribution with empty list"""
|
|
result = calculate_gain_distribution([])
|
|
assert all(v == 0 for v in result.values())
|
|
|
|
def test_calculate_gain_distribution_positive_gains(self):
|
|
"""Test gain distribution with positive gains"""
|
|
gains = [0.05, 0.15, 0.25, 0.35] # 5%, 15%, 25%, 35%
|
|
result = calculate_gain_distribution(gains)
|
|
assert result["0% to 10%"] == 1
|
|
assert result["10% to 20%"] == 1
|
|
assert result["> 20%"] == 2
|
|
|
|
def test_calculate_gain_distribution_negative_gains(self):
|
|
"""Test gain distribution with negative gains"""
|
|
gains = [-0.25, -0.15, -0.05] # -25%, -15%, -5%
|
|
result = calculate_gain_distribution(gains)
|
|
assert result["< -20%"] == 1
|
|
assert result["-20% to -10%"] == 1
|
|
assert result["-10% to 0%"] == 1
|
|
|
|
def test_calculate_gain_distribution_mixed(self):
|
|
"""Test gain distribution with mixed gains"""
|
|
gains = [-0.30, -0.05, 0.05, 0.15, 0.30]
|
|
result = calculate_gain_distribution(gains)
|
|
assert result["< -20%"] == 1
|
|
assert result["-10% to 0%"] == 1
|
|
assert result["0% to 10%"] == 1
|
|
assert result["10% to 20%"] == 1
|
|
assert result["> 20%"] == 1
|
|
|
|
def test_calculate_trend_insufficient_data(self):
|
|
"""Test trend calculation with insufficient data"""
|
|
result = calculate_trend([0.5, 0.6])
|
|
assert result == "insufficient_data"
|
|
|
|
def test_calculate_trend_improving(self):
|
|
"""Test trend calculation for improving scores"""
|
|
scores = [0.4, 0.5, 0.6, 0.7, 0.8]
|
|
result = calculate_trend(scores)
|
|
assert result == "improving"
|
|
|
|
def test_calculate_trend_declining(self):
|
|
"""Test trend calculation for declining scores"""
|
|
scores = [0.8, 0.7, 0.6, 0.5, 0.4]
|
|
result = calculate_trend(scores)
|
|
assert result == "declining"
|
|
|
|
def test_calculate_trend_stable(self):
|
|
"""Test trend calculation for stable scores"""
|
|
scores = [0.5, 0.51, 0.49, 0.5, 0.5]
|
|
result = calculate_trend(scores)
|
|
assert result == "stable"
|
|
|
|
def test_calculate_difficulty_rating_easy(self):
|
|
"""Test difficulty rating for easy stop"""
|
|
rating = calculate_difficulty_rating(success_rate=0.95, avg_attempts=1.0)
|
|
assert rating < 2.0
|
|
|
|
def test_calculate_difficulty_rating_hard(self):
|
|
"""Test difficulty rating for hard stop"""
|
|
rating = calculate_difficulty_rating(success_rate=0.3, avg_attempts=3.0)
|
|
assert rating > 3.0
|
|
|
|
def test_calculate_difficulty_rating_bounds(self):
|
|
"""Test difficulty rating stays within bounds"""
|
|
# Very hard
|
|
rating_hard = calculate_difficulty_rating(success_rate=0.0, avg_attempts=5.0)
|
|
assert rating_hard <= 5.0
|
|
|
|
# Very easy
|
|
rating_easy = calculate_difficulty_rating(success_rate=1.0, avg_attempts=1.0)
|
|
assert rating_easy >= 1.0
|
|
|
|
|
|
class TestLearningGainEndpoints:
|
|
"""Test learning gain analysis endpoints"""
|
|
|
|
def test_get_learning_gain_empty(self):
|
|
"""Test learning gain with no data"""
|
|
response = client.get("/api/analytics/learning-gain/test_unit")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["unit_id"] == "test_unit"
|
|
assert data["total_students"] == 0
|
|
assert data["avg_gain"] == 0.0
|
|
|
|
def test_get_learning_gain_structure(self):
|
|
"""Test learning gain response structure"""
|
|
response = client.get("/api/analytics/learning-gain/demo_unit_v1")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "unit_id" in data
|
|
assert "unit_title" in data
|
|
assert "total_students" in data
|
|
assert "avg_precheck" in data
|
|
assert "avg_postcheck" in data
|
|
assert "avg_gain" in data
|
|
assert "median_gain" in data
|
|
assert "std_deviation" in data
|
|
assert "positive_gain_count" in data
|
|
assert "negative_gain_count" in data
|
|
assert "no_change_count" in data
|
|
assert "gain_distribution" in data
|
|
assert "individual_gains" in data
|
|
|
|
def test_get_learning_gain_with_class_filter(self):
|
|
"""Test learning gain with class filter"""
|
|
response = client.get(
|
|
"/api/analytics/learning-gain/demo_unit_v1?class_id=class-5a"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "unit_id" in data
|
|
|
|
def test_get_learning_gain_with_time_range(self):
|
|
"""Test learning gain with different time ranges"""
|
|
for time_range in ["week", "month", "quarter", "all"]:
|
|
response = client.get(
|
|
f"/api/analytics/learning-gain/demo_unit_v1?time_range={time_range}"
|
|
)
|
|
assert response.status_code == 200
|
|
|
|
def test_compare_learning_gains(self):
|
|
"""Test comparing learning gains across units"""
|
|
response = client.get(
|
|
"/api/analytics/learning-gain/compare?unit_ids=unit_a,unit_b,unit_c"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "comparisons" in data
|
|
assert "time_range" in data
|
|
assert isinstance(data["comparisons"], list)
|
|
|
|
def test_compare_learning_gains_with_class(self):
|
|
"""Test comparing learning gains with class filter"""
|
|
response = client.get(
|
|
"/api/analytics/learning-gain/compare?unit_ids=unit_a,unit_b&class_id=class-5a"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["class_id"] == "class-5a"
|
|
|
|
|
|
class TestStopAnalyticsEndpoints:
|
|
"""Test stop-level analytics endpoints"""
|
|
|
|
def test_get_stop_analytics(self):
|
|
"""Test getting stop-level analytics"""
|
|
response = client.get("/api/analytics/unit/demo_unit_v1/stops")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "unit_id" in data
|
|
assert "unit_title" in data
|
|
assert "template" in data
|
|
assert "total_sessions" in data
|
|
assert "completed_sessions" in data
|
|
assert "completion_rate" in data
|
|
assert "avg_duration_minutes" in data
|
|
assert "stops" in data
|
|
assert "bottleneck_stops" in data
|
|
assert isinstance(data["stops"], list)
|
|
|
|
def test_get_stop_analytics_with_filters(self):
|
|
"""Test stop analytics with class and time range filters"""
|
|
response = client.get(
|
|
"/api/analytics/unit/demo_unit_v1/stops?class_id=class-5a&time_range=month"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
|
|
|
|
class TestMisconceptionEndpoints:
|
|
"""Test misconception tracking endpoints"""
|
|
|
|
def test_get_misconception_report(self):
|
|
"""Test getting misconception report"""
|
|
response = client.get("/api/analytics/misconceptions")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "time_range" in data
|
|
assert "total_misconceptions" in data
|
|
assert "unique_concepts" in data
|
|
assert "most_common" in data
|
|
assert "by_unit" in data
|
|
assert "trending_up" in data
|
|
assert "resolved" in data
|
|
|
|
def test_get_misconception_report_with_filters(self):
|
|
"""Test misconception report with filters"""
|
|
response = client.get(
|
|
"/api/analytics/misconceptions?class_id=class-5a&unit_id=demo_unit_v1&limit=10"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
|
|
def test_get_misconception_report_limit(self):
|
|
"""Test misconception report respects limit"""
|
|
response = client.get("/api/analytics/misconceptions?limit=5")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert len(data["most_common"]) <= 10 # capped at 10 in most_common
|
|
|
|
def test_get_student_misconceptions(self):
|
|
"""Test getting misconceptions for specific student"""
|
|
student_id = str(uuid.uuid4())
|
|
response = client.get(f"/api/analytics/misconceptions/student/{student_id}")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["student_id"] == student_id
|
|
assert "misconceptions" in data
|
|
assert "recommended_remediation" in data
|
|
|
|
def test_get_student_misconceptions_with_time_range(self):
|
|
"""Test student misconceptions with time range"""
|
|
student_id = str(uuid.uuid4())
|
|
response = client.get(
|
|
f"/api/analytics/misconceptions/student/{student_id}?time_range=all"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
|
|
|
|
class TestStudentTimelineEndpoints:
|
|
"""Test student progress timeline endpoints"""
|
|
|
|
def test_get_student_timeline(self):
|
|
"""Test getting student progress timeline"""
|
|
student_id = str(uuid.uuid4())
|
|
response = client.get(f"/api/analytics/student/{student_id}/timeline")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["student_id"] == student_id
|
|
assert "student_name" in data
|
|
assert "units_completed" in data
|
|
assert "total_time_minutes" in data
|
|
assert "avg_score" in data
|
|
assert "trend" in data
|
|
assert "timeline" in data
|
|
assert isinstance(data["timeline"], list)
|
|
|
|
def test_get_student_timeline_with_time_range(self):
|
|
"""Test student timeline with different time ranges"""
|
|
student_id = str(uuid.uuid4())
|
|
for time_range in ["week", "month", "quarter", "all"]:
|
|
response = client.get(
|
|
f"/api/analytics/student/{student_id}/timeline?time_range={time_range}"
|
|
)
|
|
assert response.status_code == 200
|
|
|
|
|
|
class TestClassComparisonEndpoints:
|
|
"""Test class comparison endpoints"""
|
|
|
|
def test_compare_classes(self):
|
|
"""Test comparing multiple classes"""
|
|
response = client.get(
|
|
"/api/analytics/compare/classes?class_ids=class-5a,class-5b,class-6a"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert isinstance(data, list)
|
|
|
|
def test_compare_classes_with_time_range(self):
|
|
"""Test class comparison with time range"""
|
|
response = client.get(
|
|
"/api/analytics/compare/classes?class_ids=class-5a,class-5b&time_range=quarter"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
|
|
|
|
class TestExportEndpoints:
|
|
"""Test export endpoints"""
|
|
|
|
def test_export_learning_gains_json(self):
|
|
"""Test exporting learning gains as JSON"""
|
|
response = client.get("/api/analytics/export/learning-gains?format=json")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "export_date" in data
|
|
assert "filters" in data
|
|
assert "data" in data
|
|
|
|
def test_export_learning_gains_csv(self):
|
|
"""Test exporting learning gains as CSV"""
|
|
response = client.get("/api/analytics/export/learning-gains?format=csv")
|
|
|
|
assert response.status_code == 200
|
|
assert response.headers["content-type"] == "text/csv; charset=utf-8"
|
|
assert "attachment" in response.headers.get("content-disposition", "")
|
|
# CSV should have header row
|
|
assert "student_id,unit_id,precheck,postcheck,gain" in response.text
|
|
|
|
def test_export_learning_gains_with_filters(self):
|
|
"""Test export with filters"""
|
|
response = client.get(
|
|
"/api/analytics/export/learning-gains?unit_id=demo_unit_v1&class_id=class-5a&format=json"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["filters"]["unit_id"] == "demo_unit_v1"
|
|
assert data["filters"]["class_id"] == "class-5a"
|
|
|
|
def test_export_misconceptions_json(self):
|
|
"""Test exporting misconceptions as JSON"""
|
|
response = client.get("/api/analytics/export/misconceptions?format=json")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "export_date" in data
|
|
assert "data" in data
|
|
|
|
def test_export_misconceptions_csv(self):
|
|
"""Test exporting misconceptions as CSV"""
|
|
response = client.get("/api/analytics/export/misconceptions?format=csv")
|
|
|
|
assert response.status_code == 200
|
|
assert response.headers["content-type"] == "text/csv; charset=utf-8"
|
|
# CSV should have header row
|
|
assert "concept_id" in response.text
|
|
|
|
def test_export_misconceptions_with_class(self):
|
|
"""Test export misconceptions filtered by class"""
|
|
response = client.get(
|
|
"/api/analytics/export/misconceptions?class_id=class-5a&format=json"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["class_id"] == "class-5a"
|
|
|
|
|
|
class TestDashboardEndpoints:
|
|
"""Test dashboard overview endpoints"""
|
|
|
|
def test_get_dashboard_overview(self):
|
|
"""Test getting analytics dashboard overview"""
|
|
response = client.get("/api/analytics/dashboard/overview")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "time_range" in data
|
|
assert "total_sessions" in data
|
|
assert "unique_students" in data
|
|
assert "avg_completion_rate" in data
|
|
assert "avg_learning_gain" in data
|
|
assert "most_played_units" in data
|
|
assert "struggling_concepts" in data
|
|
assert "active_classes" in data
|
|
|
|
def test_get_dashboard_overview_with_time_range(self):
|
|
"""Test dashboard overview with different time ranges"""
|
|
for time_range in ["week", "month", "quarter", "all"]:
|
|
response = client.get(
|
|
f"/api/analytics/dashboard/overview?time_range={time_range}"
|
|
)
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["time_range"] == time_range
|
|
|
|
|
|
class TestHealthEndpoint:
|
|
"""Test health check endpoint"""
|
|
|
|
def test_health_check(self):
|
|
"""Test health check endpoint"""
|
|
response = client.get("/api/analytics/health")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["status"] == "healthy"
|
|
assert data["service"] == "unit-analytics"
|
|
assert "database" in data
|
|
|
|
|
|
class TestPydanticModels:
|
|
"""Test Pydantic model validation"""
|
|
|
|
def test_time_range_enum(self):
|
|
"""Test TimeRange enum values"""
|
|
assert TimeRange.WEEK == "week"
|
|
assert TimeRange.MONTH == "month"
|
|
assert TimeRange.QUARTER == "quarter"
|
|
assert TimeRange.ALL == "all"
|
|
|
|
def test_export_format_enum(self):
|
|
"""Test ExportFormat enum values"""
|
|
assert ExportFormat.JSON == "json"
|
|
assert ExportFormat.CSV == "csv"
|
|
|
|
def test_learning_gain_data_model(self):
|
|
"""Test LearningGainData model"""
|
|
data = LearningGainData(
|
|
student_id="test-student",
|
|
student_name="Max Mustermann",
|
|
unit_id="test_unit",
|
|
precheck_score=0.5,
|
|
postcheck_score=0.8,
|
|
learning_gain=0.3,
|
|
)
|
|
assert data.student_id == "test-student"
|
|
assert data.learning_gain == 0.3
|
|
assert data.percentile is None
|
|
|
|
def test_learning_gain_data_with_percentile(self):
|
|
"""Test LearningGainData model with percentile"""
|
|
data = LearningGainData(
|
|
student_id="test-student",
|
|
student_name="Max",
|
|
unit_id="test_unit",
|
|
precheck_score=0.5,
|
|
postcheck_score=0.8,
|
|
learning_gain=0.3,
|
|
percentile=75.0,
|
|
)
|
|
assert data.percentile == 75.0
|
|
|
|
def test_stop_performance_model(self):
|
|
"""Test StopPerformance model"""
|
|
stop = StopPerformance(
|
|
stop_id="lens",
|
|
stop_label="Linse",
|
|
attempts_total=100,
|
|
success_rate=0.85,
|
|
avg_time_seconds=45.0,
|
|
avg_attempts_before_success=1.2,
|
|
common_errors=["wrong_direction", "timeout"],
|
|
difficulty_rating=2.5,
|
|
)
|
|
assert stop.stop_id == "lens"
|
|
assert stop.success_rate == 0.85
|
|
assert len(stop.common_errors) == 2
|
|
|
|
def test_misconception_entry_model(self):
|
|
"""Test MisconceptionEntry model"""
|
|
entry = MisconceptionEntry(
|
|
concept_id="pupil_focus",
|
|
concept_label="Pupillenfokus",
|
|
misconception_text="Die Pupille macht scharf",
|
|
frequency=15,
|
|
affected_student_ids=["s1", "s2", "s3"],
|
|
unit_id="bio_eye_v1",
|
|
stop_id="iris",
|
|
detected_via="precheck",
|
|
first_detected=datetime.utcnow(),
|
|
last_detected=datetime.utcnow(),
|
|
)
|
|
assert entry.concept_id == "pupil_focus"
|
|
assert entry.frequency == 15
|
|
assert len(entry.affected_student_ids) == 3
|
|
|
|
def test_student_progress_timeline_model(self):
|
|
"""Test StudentProgressTimeline model"""
|
|
timeline = StudentProgressTimeline(
|
|
student_id="test-student",
|
|
student_name="Max Mustermann",
|
|
units_completed=5,
|
|
total_time_minutes=45,
|
|
avg_score=0.78,
|
|
trend="improving",
|
|
timeline=[
|
|
{"date": "2026-01-01", "unit_id": "unit_1", "score": 0.7},
|
|
{"date": "2026-01-05", "unit_id": "unit_2", "score": 0.8},
|
|
],
|
|
)
|
|
assert timeline.units_completed == 5
|
|
assert timeline.trend == "improving"
|
|
assert len(timeline.timeline) == 2
|
|
|
|
def test_class_comparison_data_model(self):
|
|
"""Test ClassComparisonData model"""
|
|
data = ClassComparisonData(
|
|
class_id="class-5a",
|
|
class_name="Klasse 5a",
|
|
student_count=25,
|
|
units_assigned=10,
|
|
avg_completion_rate=0.85,
|
|
avg_learning_gain=0.15,
|
|
avg_time_per_unit=8.5,
|
|
)
|
|
assert data.class_id == "class-5a"
|
|
assert data.student_count == 25
|
|
|
|
|
|
class TestEdgeCases:
|
|
"""Test edge cases and error handling"""
|
|
|
|
def test_learning_gain_nonexistent_unit(self):
|
|
"""Test learning gain for non-existent unit"""
|
|
response = client.get(
|
|
"/api/analytics/learning-gain/nonexistent_unit_xyz"
|
|
)
|
|
|
|
# Should return empty summary, not 404
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["total_students"] == 0
|
|
|
|
def test_compare_single_unit(self):
|
|
"""Test comparison with single unit"""
|
|
response = client.get(
|
|
"/api/analytics/learning-gain/compare?unit_ids=single_unit"
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert isinstance(data["comparisons"], list)
|
|
|
|
def test_compare_empty_units(self):
|
|
"""Test comparison with empty unit list"""
|
|
response = client.get(
|
|
"/api/analytics/learning-gain/compare?unit_ids="
|
|
)
|
|
|
|
# Should handle gracefully
|
|
assert response.status_code in [200, 422]
|
|
|
|
def test_invalid_time_range(self):
|
|
"""Test with invalid time range"""
|
|
response = client.get(
|
|
"/api/analytics/learning-gain/demo_unit_v1?time_range=invalid"
|
|
)
|
|
|
|
# FastAPI should reject invalid enum value
|
|
assert response.status_code == 422
|
|
|
|
def test_invalid_export_format(self):
|
|
"""Test with invalid export format"""
|
|
response = client.get(
|
|
"/api/analytics/export/learning-gains?format=xml"
|
|
)
|
|
|
|
# FastAPI should reject invalid enum value
|
|
assert response.status_code == 422
|
|
|
|
def test_misconceptions_limit_bounds(self):
|
|
"""Test misconceptions limit parameter bounds"""
|
|
# Too low
|
|
response = client.get("/api/analytics/misconceptions?limit=0")
|
|
assert response.status_code == 422
|
|
|
|
# Too high
|
|
response = client.get("/api/analytics/misconceptions?limit=200")
|
|
assert response.status_code == 422
|
|
|
|
# Valid bounds
|
|
response = client.get("/api/analytics/misconceptions?limit=1")
|
|
assert response.status_code == 200
|
|
response = client.get("/api/analytics/misconceptions?limit=100")
|
|
assert response.status_code == 200
|
|
|
|
def test_compare_classes_empty(self):
|
|
"""Test class comparison with empty list"""
|
|
response = client.get("/api/analytics/compare/classes?class_ids=")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert isinstance(data, list)
|
|
|
|
def test_student_timeline_new_student(self):
|
|
"""Test timeline for student with no history"""
|
|
new_student_id = str(uuid.uuid4())
|
|
response = client.get(f"/api/analytics/student/{new_student_id}/timeline")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert data["units_completed"] == 0
|
|
assert data["trend"] == "insufficient_data"
|
|
assert data["timeline"] == []
|
|
|
|
def test_special_characters_in_ids(self):
|
|
"""Test handling of special characters in IDs"""
|
|
# URL-encoded special characters - slashes in path params are problematic
|
|
# because FastAPI/Starlette decodes them before routing
|
|
response = client.get(
|
|
"/api/analytics/learning-gain/unit%2Fwith%2Fslashes"
|
|
)
|
|
# Slashes in path params result in 404 as the decoded path doesn't match
|
|
# This is expected behavior - use URL-safe IDs in practice
|
|
assert response.status_code in [200, 404]
|
|
|