fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
9
backend/api/__init__.py
Normal file
9
backend/api/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""
|
||||
Backend API Module.
|
||||
|
||||
Sammelt alle API-Router für die FastAPI-Anwendung.
|
||||
"""
|
||||
|
||||
from . import classroom
|
||||
|
||||
__all__ = ["classroom"]
|
||||
70
backend/api/classroom/__init__.py
Normal file
70
backend/api/classroom/__init__.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
Classroom API - Modularer Router.
|
||||
|
||||
Dieser Router sammelt alle Classroom-bezogenen Endpoints aus den Submodulen.
|
||||
Für Rückwärtskompatibilität kann der alte classroom_api.py Pfad weiterhin
|
||||
verwendet werden.
|
||||
|
||||
Struktur:
|
||||
- sessions.py: Session CRUD, Timer, Phasen, History
|
||||
- templates.py: Stunden-Vorlagen
|
||||
- homework.py: Hausaufgaben-Tracking
|
||||
- materials.py: Unterrichtsmaterialien
|
||||
- analytics.py: Analytics & Reflexionen
|
||||
- feedback.py: Lehrer-Feedback
|
||||
- settings.py: Lehrer-Einstellungen
|
||||
- utility.py: Health, Phases, Export
|
||||
- context.py: Teacher Context (v1 API)
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter
|
||||
|
||||
from .sessions import router as sessions_router
|
||||
from .templates import router as templates_router
|
||||
from .homework import router as homework_router
|
||||
from .materials import router as materials_router
|
||||
from .analytics import router as analytics_router
|
||||
from .feedback import router as feedback_router
|
||||
from .settings import router as settings_router
|
||||
from .utility import router as utility_router
|
||||
from .context import router as context_router
|
||||
|
||||
# Haupt-Router mit Prefix
|
||||
router = APIRouter(prefix="/api/classroom", tags=["Classroom"])
|
||||
|
||||
# Sub-Router einbinden
|
||||
router.include_router(sessions_router)
|
||||
router.include_router(templates_router)
|
||||
router.include_router(homework_router)
|
||||
router.include_router(materials_router)
|
||||
router.include_router(analytics_router)
|
||||
router.include_router(feedback_router)
|
||||
router.include_router(settings_router)
|
||||
router.include_router(utility_router)
|
||||
router.include_router(context_router)
|
||||
|
||||
# Re-exports für einfachen Import
|
||||
from .models import (
|
||||
CreateSessionRequest,
|
||||
SessionResponse,
|
||||
TimerStatus,
|
||||
SuggestionsResponse,
|
||||
)
|
||||
from .shared import (
|
||||
ws_manager,
|
||||
get_session_or_404,
|
||||
start_timer_broadcast,
|
||||
stop_timer_broadcast,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"router",
|
||||
"ws_manager",
|
||||
"get_session_or_404",
|
||||
"start_timer_broadcast",
|
||||
"stop_timer_broadcast",
|
||||
"CreateSessionRequest",
|
||||
"SessionResponse",
|
||||
"TimerStatus",
|
||||
"SuggestionsResponse",
|
||||
]
|
||||
343
backend/api/classroom/analytics.py
Normal file
343
backend/api/classroom/analytics.py
Normal file
@@ -0,0 +1,343 @@
|
||||
"""
|
||||
Classroom API - Analytics & Reflections Endpoints.
|
||||
|
||||
Endpoints fuer Session-Analytics und Post-Lesson Reflections (Phase 5).
|
||||
"""
|
||||
|
||||
from uuid import uuid4
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .shared import init_db_if_needed, DB_ENABLED, logger
|
||||
|
||||
try:
|
||||
from classroom_engine.database import SessionLocal
|
||||
from classroom_engine.repository import AnalyticsRepository, ReflectionRepository
|
||||
from classroom_engine.analytics import LessonReflection
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
router = APIRouter(tags=["Analytics"])
|
||||
|
||||
|
||||
# === Pydantic Models ===
|
||||
|
||||
class SessionSummaryResponse(BaseModel):
|
||||
"""Response fuer Session-Summary."""
|
||||
session_id: str
|
||||
teacher_id: str
|
||||
class_id: str
|
||||
subject: str
|
||||
topic: Optional[str]
|
||||
date: Optional[str]
|
||||
date_formatted: str
|
||||
total_duration_seconds: int
|
||||
total_duration_formatted: str
|
||||
planned_duration_seconds: int
|
||||
planned_duration_formatted: str
|
||||
phases_completed: int
|
||||
total_phases: int
|
||||
completion_percentage: int
|
||||
phase_statistics: List[Dict[str, Any]]
|
||||
total_overtime_seconds: int
|
||||
total_overtime_formatted: str
|
||||
phases_with_overtime: int
|
||||
total_pause_count: int
|
||||
total_pause_seconds: int
|
||||
reflection_notes: str = ""
|
||||
reflection_rating: Optional[int] = None
|
||||
key_learnings: List[str] = []
|
||||
|
||||
|
||||
class TeacherAnalyticsResponse(BaseModel):
|
||||
"""Response fuer Lehrer-Analytics."""
|
||||
teacher_id: str
|
||||
period_start: Optional[str]
|
||||
period_end: Optional[str]
|
||||
total_sessions: int
|
||||
completed_sessions: int
|
||||
total_teaching_minutes: int
|
||||
total_teaching_hours: float
|
||||
avg_phase_durations: Dict[str, int]
|
||||
sessions_with_overtime: int
|
||||
overtime_percentage: int
|
||||
avg_overtime_seconds: int
|
||||
avg_overtime_formatted: str
|
||||
most_overtime_phase: Optional[str]
|
||||
avg_pause_count: float
|
||||
avg_pause_duration_seconds: int
|
||||
subjects_taught: Dict[str, int]
|
||||
classes_taught: Dict[str, int]
|
||||
|
||||
|
||||
class ReflectionCreate(BaseModel):
|
||||
"""Request-Body fuer Reflection-Erstellung."""
|
||||
session_id: str
|
||||
teacher_id: str
|
||||
notes: str = ""
|
||||
overall_rating: Optional[int] = Field(None, ge=1, le=5)
|
||||
what_worked: List[str] = []
|
||||
improvements: List[str] = []
|
||||
notes_for_next_lesson: str = ""
|
||||
|
||||
|
||||
class ReflectionUpdate(BaseModel):
|
||||
"""Request-Body fuer Reflection-Update."""
|
||||
notes: Optional[str] = None
|
||||
overall_rating: Optional[int] = Field(None, ge=1, le=5)
|
||||
what_worked: Optional[List[str]] = None
|
||||
improvements: Optional[List[str]] = None
|
||||
notes_for_next_lesson: Optional[str] = None
|
||||
|
||||
|
||||
class ReflectionResponse(BaseModel):
|
||||
"""Response fuer eine einzelne Reflection."""
|
||||
reflection_id: str
|
||||
session_id: str
|
||||
teacher_id: str
|
||||
notes: str
|
||||
overall_rating: Optional[int]
|
||||
what_worked: List[str]
|
||||
improvements: List[str]
|
||||
notes_for_next_lesson: str
|
||||
created_at: Optional[str]
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
class ReflectionListResponse(BaseModel):
|
||||
"""Response fuer Reflection-Liste."""
|
||||
reflections: List[ReflectionResponse]
|
||||
total: int
|
||||
|
||||
|
||||
# === Analytics Endpoints ===
|
||||
|
||||
@router.get("/analytics/session/{session_id}")
|
||||
async def get_session_summary(session_id: str) -> SessionSummaryResponse:
|
||||
"""Gibt die Analytics-Zusammenfassung einer Session zurueck."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = AnalyticsRepository(db)
|
||||
summary = repo.get_session_summary(session_id)
|
||||
|
||||
if not summary:
|
||||
raise HTTPException(status_code=404, detail=f"Session {session_id} not found")
|
||||
|
||||
return SessionSummaryResponse(**summary.to_dict())
|
||||
|
||||
|
||||
@router.get("/analytics/teacher/{teacher_id}")
|
||||
async def get_teacher_analytics(
|
||||
teacher_id: str,
|
||||
days: int = Query(30, ge=1, le=365)
|
||||
) -> TeacherAnalyticsResponse:
|
||||
"""Gibt aggregierte Analytics fuer einen Lehrer zurueck."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
period_end = datetime.utcnow()
|
||||
period_start = period_end - timedelta(days=days)
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = AnalyticsRepository(db)
|
||||
analytics = repo.get_teacher_analytics(teacher_id, period_start, period_end)
|
||||
|
||||
return TeacherAnalyticsResponse(**analytics.to_dict())
|
||||
|
||||
|
||||
@router.get("/analytics/phase-trends/{teacher_id}/{phase}")
|
||||
async def get_phase_trends(
|
||||
teacher_id: str,
|
||||
phase: str,
|
||||
limit: int = Query(20, ge=1, le=100)
|
||||
) -> Dict[str, Any]:
|
||||
"""Gibt die Dauer-Trends fuer eine Phase zurueck."""
|
||||
if phase not in ["einstieg", "erarbeitung", "sicherung", "transfer", "reflexion"]:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid phase: {phase}")
|
||||
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = AnalyticsRepository(db)
|
||||
trends = repo.get_phase_duration_trends(teacher_id, phase, limit)
|
||||
|
||||
return {
|
||||
"teacher_id": teacher_id,
|
||||
"phase": phase,
|
||||
"data_points": trends,
|
||||
"count": len(trends)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/analytics/overtime/{teacher_id}")
|
||||
async def get_overtime_analysis(
|
||||
teacher_id: str,
|
||||
limit: int = Query(30, ge=1, le=100)
|
||||
) -> Dict[str, Any]:
|
||||
"""Analysiert Overtime-Muster nach Phase."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = AnalyticsRepository(db)
|
||||
analysis = repo.get_overtime_analysis(teacher_id, limit)
|
||||
|
||||
return {
|
||||
"teacher_id": teacher_id,
|
||||
"sessions_analyzed": limit,
|
||||
"phases": analysis
|
||||
}
|
||||
|
||||
|
||||
# === Reflection Endpoints ===
|
||||
|
||||
@router.post("/reflections", status_code=201)
|
||||
async def create_reflection(data: ReflectionCreate) -> ReflectionResponse:
|
||||
"""Erstellt eine Post-Lesson Reflection."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = ReflectionRepository(db)
|
||||
|
||||
existing = repo.get_by_session(data.session_id)
|
||||
if existing:
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail=f"Reflection for session {data.session_id} already exists"
|
||||
)
|
||||
|
||||
reflection = LessonReflection(
|
||||
reflection_id=str(uuid4()),
|
||||
session_id=data.session_id,
|
||||
teacher_id=data.teacher_id,
|
||||
notes=data.notes,
|
||||
overall_rating=data.overall_rating,
|
||||
what_worked=data.what_worked,
|
||||
improvements=data.improvements,
|
||||
notes_for_next_lesson=data.notes_for_next_lesson,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
db_reflection = repo.create(reflection)
|
||||
result = repo.to_dataclass(db_reflection)
|
||||
|
||||
return ReflectionResponse(**result.to_dict())
|
||||
|
||||
|
||||
@router.get("/reflections/session/{session_id}")
|
||||
async def get_reflection_by_session(session_id: str) -> ReflectionResponse:
|
||||
"""Holt die Reflection einer Session."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = ReflectionRepository(db)
|
||||
db_reflection = repo.get_by_session(session_id)
|
||||
|
||||
if not db_reflection:
|
||||
raise HTTPException(status_code=404, detail=f"No reflection for session {session_id}")
|
||||
|
||||
result = repo.to_dataclass(db_reflection)
|
||||
return ReflectionResponse(**result.to_dict())
|
||||
|
||||
|
||||
@router.get("/reflections/teacher/{teacher_id}")
|
||||
async def get_reflections_by_teacher(
|
||||
teacher_id: str,
|
||||
limit: int = Query(20, ge=1, le=100),
|
||||
offset: int = Query(0, ge=0)
|
||||
) -> ReflectionListResponse:
|
||||
"""Holt alle Reflections eines Lehrers."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = ReflectionRepository(db)
|
||||
db_reflections = repo.get_by_teacher(teacher_id, limit, offset)
|
||||
|
||||
reflections = []
|
||||
for db_ref in db_reflections:
|
||||
result = repo.to_dataclass(db_ref)
|
||||
reflections.append(ReflectionResponse(**result.to_dict()))
|
||||
|
||||
total = repo.count_by_teacher(teacher_id)
|
||||
|
||||
return ReflectionListResponse(reflections=reflections, total=total)
|
||||
|
||||
|
||||
@router.put("/reflections/{reflection_id}")
|
||||
async def update_reflection(reflection_id: str, data: ReflectionUpdate) -> ReflectionResponse:
|
||||
"""Aktualisiert eine Reflection."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = ReflectionRepository(db)
|
||||
db_reflection = repo.get_by_id(reflection_id)
|
||||
|
||||
if not db_reflection:
|
||||
raise HTTPException(status_code=404, detail=f"Reflection {reflection_id} not found")
|
||||
|
||||
reflection = repo.to_dataclass(db_reflection)
|
||||
|
||||
if data.notes is not None:
|
||||
reflection.notes = data.notes
|
||||
if data.overall_rating is not None:
|
||||
reflection.overall_rating = data.overall_rating
|
||||
if data.what_worked is not None:
|
||||
reflection.what_worked = data.what_worked
|
||||
if data.improvements is not None:
|
||||
reflection.improvements = data.improvements
|
||||
if data.notes_for_next_lesson is not None:
|
||||
reflection.notes_for_next_lesson = data.notes_for_next_lesson
|
||||
|
||||
reflection.updated_at = datetime.utcnow()
|
||||
|
||||
db_reflection = repo.update(reflection)
|
||||
result = repo.to_dataclass(db_reflection)
|
||||
|
||||
return ReflectionResponse(**result.to_dict())
|
||||
|
||||
|
||||
@router.delete("/reflections/{reflection_id}")
|
||||
async def delete_reflection(reflection_id: str) -> Dict[str, str]:
|
||||
"""Loescht eine Reflection."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Database not available")
|
||||
|
||||
init_db_if_needed()
|
||||
|
||||
with SessionLocal() as db:
|
||||
repo = ReflectionRepository(db)
|
||||
db_reflection = repo.get_by_id(reflection_id)
|
||||
|
||||
if not db_reflection:
|
||||
raise HTTPException(status_code=404, detail=f"Reflection {reflection_id} not found")
|
||||
|
||||
repo.delete(reflection_id)
|
||||
|
||||
return {"status": "deleted", "reflection_id": reflection_id}
|
||||
687
backend/api/classroom/context.py
Normal file
687
backend/api/classroom/context.py
Normal file
@@ -0,0 +1,687 @@
|
||||
"""
|
||||
Classroom API - Teacher Context Endpoints (v1 API).
|
||||
|
||||
Endpoints fuer Teacher Context, Events, Routines und Antizipations-Engine.
|
||||
"""
|
||||
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query, Depends
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.orm import Session as DBSession
|
||||
|
||||
from .shared import init_db_if_needed, DB_ENABLED, logger
|
||||
|
||||
try:
|
||||
from classroom_engine.database import get_db, SessionLocal
|
||||
from classroom_engine.repository import (
|
||||
TeacherContextRepository, SchoolyearEventRepository, RecurringRoutineRepository
|
||||
)
|
||||
from classroom_engine.context_models import (
|
||||
MacroPhaseEnum, EventTypeEnum, EventStatusEnum,
|
||||
RoutineTypeEnum, RecurrencePatternEnum,
|
||||
FEDERAL_STATES, SCHOOL_TYPES
|
||||
)
|
||||
from classroom_engine.antizipation import SuggestionGenerator
|
||||
except ImportError:
|
||||
FEDERAL_STATES = {}
|
||||
SCHOOL_TYPES = {}
|
||||
|
||||
router = APIRouter(prefix="/v1", tags=["Teacher Context"])
|
||||
|
||||
|
||||
# === Pydantic Models ===
|
||||
|
||||
class SchoolInfo(BaseModel):
|
||||
federal_state: str
|
||||
federal_state_name: str
|
||||
school_type: str
|
||||
school_type_name: str
|
||||
|
||||
|
||||
class SchoolYearInfo(BaseModel):
|
||||
id: str
|
||||
start: Optional[str]
|
||||
current_week: int
|
||||
|
||||
|
||||
class MacroPhaseInfo(BaseModel):
|
||||
id: str
|
||||
label: str
|
||||
confidence: float
|
||||
|
||||
|
||||
class CoreCounts(BaseModel):
|
||||
classes: int = 0
|
||||
exams_scheduled: int = 0
|
||||
corrections_pending: int = 0
|
||||
|
||||
|
||||
class ContextFlags(BaseModel):
|
||||
onboarding_completed: bool = False
|
||||
has_classes: bool = False
|
||||
has_schedule: bool = False
|
||||
is_exam_period: bool = False
|
||||
is_before_holidays: bool = False
|
||||
|
||||
|
||||
class TeacherContextResponse(BaseModel):
|
||||
schema_version: str = "1.0"
|
||||
teacher_id: str
|
||||
school: SchoolInfo
|
||||
school_year: SchoolYearInfo
|
||||
macro_phase: MacroPhaseInfo
|
||||
core_counts: CoreCounts
|
||||
flags: ContextFlags
|
||||
|
||||
|
||||
class UpdateContextRequest(BaseModel):
|
||||
federal_state: Optional[str] = None
|
||||
school_type: Optional[str] = None
|
||||
schoolyear: Optional[str] = None
|
||||
schoolyear_start: Optional[str] = None
|
||||
macro_phase: Optional[str] = None
|
||||
current_week: Optional[int] = None
|
||||
|
||||
|
||||
class CreateEventRequest(BaseModel):
|
||||
title: str = Field(..., max_length=300)
|
||||
event_type: str = "other"
|
||||
start_date: str
|
||||
end_date: Optional[str] = None
|
||||
class_id: Optional[str] = None
|
||||
subject: Optional[str] = None
|
||||
description: str = ""
|
||||
needs_preparation: bool = False
|
||||
reminder_days_before: int = 3
|
||||
|
||||
|
||||
class EventResponse(BaseModel):
|
||||
id: str
|
||||
teacher_id: str
|
||||
event_type: str
|
||||
title: str
|
||||
description: str
|
||||
start_date: str
|
||||
end_date: Optional[str]
|
||||
class_id: Optional[str]
|
||||
subject: Optional[str]
|
||||
status: str
|
||||
needs_preparation: bool
|
||||
preparation_done: bool
|
||||
reminder_days_before: int
|
||||
|
||||
|
||||
class CreateRoutineRequest(BaseModel):
|
||||
title: str
|
||||
routine_type: str = "other"
|
||||
recurrence_pattern: str = "weekly"
|
||||
day_of_week: Optional[int] = None
|
||||
day_of_month: Optional[int] = None
|
||||
time_of_day: Optional[str] = None
|
||||
duration_minutes: int = 60
|
||||
description: str = ""
|
||||
|
||||
|
||||
class RoutineResponse(BaseModel):
|
||||
id: str
|
||||
teacher_id: str
|
||||
routine_type: str
|
||||
title: str
|
||||
description: str
|
||||
recurrence_pattern: str
|
||||
day_of_week: Optional[int]
|
||||
day_of_month: Optional[int]
|
||||
time_of_day: Optional[str]
|
||||
duration_minutes: int
|
||||
is_active: bool
|
||||
|
||||
|
||||
# === Helper Functions ===
|
||||
|
||||
def get_macro_phase_label(phase) -> str:
|
||||
"""Gibt den Anzeigenamen einer Makro-Phase zurueck."""
|
||||
labels = {
|
||||
"onboarding": "Einrichtung",
|
||||
"schuljahresstart": "Schuljahresstart",
|
||||
"unterrichtsaufbau": "Unterrichtsaufbau",
|
||||
"leistungsphase_1": "Leistungsphase 1",
|
||||
"halbjahresabschluss": "Halbjahresabschluss",
|
||||
"leistungsphase_2": "Leistungsphase 2",
|
||||
"jahresabschluss": "Jahresabschluss",
|
||||
}
|
||||
phase_value = phase.value if hasattr(phase, 'value') else str(phase)
|
||||
return labels.get(phase_value, phase_value)
|
||||
|
||||
|
||||
def get_default_context_response(teacher_id: str) -> TeacherContextResponse:
|
||||
"""Gibt eine Default-Context-Response zurueck."""
|
||||
return TeacherContextResponse(
|
||||
teacher_id=teacher_id,
|
||||
school=SchoolInfo(
|
||||
federal_state="BY",
|
||||
federal_state_name="Bayern",
|
||||
school_type="gymnasium",
|
||||
school_type_name="Gymnasium",
|
||||
),
|
||||
school_year=SchoolYearInfo(id="2024-2025", start=None, current_week=1),
|
||||
macro_phase=MacroPhaseInfo(id="onboarding", label="Einrichtung", confidence=1.0),
|
||||
core_counts=CoreCounts(),
|
||||
flags=ContextFlags(),
|
||||
)
|
||||
|
||||
|
||||
# === Context Endpoints ===
|
||||
|
||||
@router.get("/context", response_model=TeacherContextResponse)
|
||||
async def get_teacher_context(teacher_id: str = Query(...)):
|
||||
"""Liefert den aktuellen Makro-Kontext eines Lehrers."""
|
||||
if not DB_ENABLED:
|
||||
return get_default_context_response(teacher_id)
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherContextRepository(db)
|
||||
context = repo.get_or_create(teacher_id)
|
||||
|
||||
event_repo = SchoolyearEventRepository(db)
|
||||
upcoming_exams = event_repo.get_upcoming(teacher_id, days=30)
|
||||
exams_count = len([e for e in upcoming_exams if e.event_type.value == "exam"])
|
||||
|
||||
result = TeacherContextResponse(
|
||||
teacher_id=teacher_id,
|
||||
school=SchoolInfo(
|
||||
federal_state=context.federal_state or "BY",
|
||||
federal_state_name=FEDERAL_STATES.get(context.federal_state, ""),
|
||||
school_type=context.school_type or "gymnasium",
|
||||
school_type_name=SCHOOL_TYPES.get(context.school_type, ""),
|
||||
),
|
||||
school_year=SchoolYearInfo(
|
||||
id=context.schoolyear or "2024-2025",
|
||||
start=context.schoolyear_start.isoformat() if context.schoolyear_start else None,
|
||||
current_week=context.current_week or 1,
|
||||
),
|
||||
macro_phase=MacroPhaseInfo(
|
||||
id=context.macro_phase.value,
|
||||
label=get_macro_phase_label(context.macro_phase),
|
||||
confidence=1.0,
|
||||
),
|
||||
core_counts=CoreCounts(
|
||||
classes=1 if context.has_classes else 0,
|
||||
exams_scheduled=exams_count,
|
||||
),
|
||||
flags=ContextFlags(
|
||||
onboarding_completed=context.onboarding_completed,
|
||||
has_classes=context.has_classes,
|
||||
has_schedule=context.has_schedule,
|
||||
is_exam_period=context.is_exam_period,
|
||||
is_before_holidays=context.is_before_holidays,
|
||||
),
|
||||
)
|
||||
db.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get teacher context: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler beim Laden des Kontexts: {e}")
|
||||
|
||||
|
||||
@router.put("/context", response_model=TeacherContextResponse)
|
||||
async def update_teacher_context(teacher_id: str, request: UpdateContextRequest):
|
||||
"""Aktualisiert den Kontext eines Lehrers."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Datenbank nicht verfuegbar")
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherContextRepository(db)
|
||||
|
||||
if request.federal_state and request.federal_state not in FEDERAL_STATES:
|
||||
raise HTTPException(status_code=400, detail=f"Ungueltiges Bundesland: {request.federal_state}")
|
||||
if request.school_type and request.school_type not in SCHOOL_TYPES:
|
||||
raise HTTPException(status_code=400, detail=f"Ungueltige Schulart: {request.school_type}")
|
||||
|
||||
schoolyear_start = None
|
||||
if request.schoolyear_start:
|
||||
schoolyear_start = datetime.fromisoformat(request.schoolyear_start.replace('Z', '+00:00'))
|
||||
|
||||
repo.update_context(
|
||||
teacher_id=teacher_id,
|
||||
federal_state=request.federal_state,
|
||||
school_type=request.school_type,
|
||||
schoolyear=request.schoolyear,
|
||||
schoolyear_start=schoolyear_start,
|
||||
macro_phase=request.macro_phase,
|
||||
current_week=request.current_week,
|
||||
)
|
||||
db.close()
|
||||
|
||||
return await get_teacher_context(teacher_id)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update teacher context: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler beim Aktualisieren: {e}")
|
||||
|
||||
|
||||
@router.post("/context/complete-onboarding")
|
||||
async def complete_onboarding(teacher_id: str = Query(...)):
|
||||
"""Markiert das Onboarding als abgeschlossen."""
|
||||
if not DB_ENABLED:
|
||||
return {"success": True, "macro_phase": "schuljahresstart", "note": "DB not available"}
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherContextRepository(db)
|
||||
context = repo.complete_onboarding(teacher_id)
|
||||
db.close()
|
||||
return {"success": True, "macro_phase": context.macro_phase.value, "teacher_id": teacher_id}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to complete onboarding: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
@router.post("/context/reset-onboarding")
|
||||
async def reset_onboarding(teacher_id: str = Query(...)):
|
||||
"""Setzt das Onboarding zurueck (fuer Tests)."""
|
||||
if not DB_ENABLED:
|
||||
return {"success": True, "macro_phase": "onboarding", "note": "DB not available"}
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherContextRepository(db)
|
||||
context = repo.get_or_create(teacher_id)
|
||||
context.onboarding_completed = False
|
||||
context.macro_phase = MacroPhaseEnum.ONBOARDING
|
||||
db.commit()
|
||||
db.close()
|
||||
return {"success": True, "macro_phase": "onboarding", "teacher_id": teacher_id}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to reset onboarding: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
# === Events Endpoints ===
|
||||
|
||||
@router.get("/events")
|
||||
async def get_events(
|
||||
teacher_id: str = Query(...),
|
||||
status: Optional[str] = None,
|
||||
event_type: Optional[str] = None,
|
||||
limit: int = 50
|
||||
):
|
||||
"""Holt Events eines Lehrers."""
|
||||
if not DB_ENABLED:
|
||||
return {"events": [], "count": 0}
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SchoolyearEventRepository(db)
|
||||
events = repo.get_by_teacher(teacher_id, status=status, event_type=event_type, limit=limit)
|
||||
result = {"events": [repo.to_dict(e) for e in events], "count": len(events)}
|
||||
db.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get events: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
@router.get("/events/upcoming")
|
||||
async def get_upcoming_events(teacher_id: str = Query(...), days: int = 30, limit: int = 10):
|
||||
"""Holt anstehende Events der naechsten X Tage."""
|
||||
if not DB_ENABLED:
|
||||
return {"events": [], "count": 0}
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SchoolyearEventRepository(db)
|
||||
events = repo.get_upcoming(teacher_id, days=days, limit=limit)
|
||||
result = {"events": [repo.to_dict(e) for e in events], "count": len(events)}
|
||||
db.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get upcoming events: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
@router.post("/events", response_model=EventResponse)
|
||||
async def create_event(teacher_id: str, request: CreateEventRequest):
|
||||
"""Erstellt ein neues Schuljahr-Event."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Datenbank nicht verfuegbar")
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SchoolyearEventRepository(db)
|
||||
start_date = datetime.fromisoformat(request.start_date.replace('Z', '+00:00'))
|
||||
end_date = datetime.fromisoformat(request.end_date.replace('Z', '+00:00')) if request.end_date else None
|
||||
|
||||
event = repo.create(
|
||||
teacher_id=teacher_id,
|
||||
title=request.title,
|
||||
event_type=request.event_type,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
class_id=request.class_id,
|
||||
subject=request.subject,
|
||||
description=request.description,
|
||||
needs_preparation=request.needs_preparation,
|
||||
reminder_days_before=request.reminder_days_before,
|
||||
)
|
||||
|
||||
result = EventResponse(
|
||||
id=event.id,
|
||||
teacher_id=event.teacher_id,
|
||||
event_type=event.event_type.value,
|
||||
title=event.title,
|
||||
description=event.description,
|
||||
start_date=event.start_date.isoformat(),
|
||||
end_date=event.end_date.isoformat() if event.end_date else None,
|
||||
class_id=event.class_id,
|
||||
subject=event.subject,
|
||||
status=event.status.value,
|
||||
needs_preparation=event.needs_preparation,
|
||||
preparation_done=event.preparation_done,
|
||||
reminder_days_before=event.reminder_days_before,
|
||||
)
|
||||
db.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create event: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
@router.delete("/events/{event_id}")
|
||||
async def delete_event(event_id: str):
|
||||
"""Loescht ein Event."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Datenbank nicht verfuegbar")
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SchoolyearEventRepository(db)
|
||||
if repo.delete(event_id):
|
||||
db.close()
|
||||
return {"success": True, "deleted_id": event_id}
|
||||
db.close()
|
||||
raise HTTPException(status_code=404, detail="Event nicht gefunden")
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete event: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
# === Routines Endpoints ===
|
||||
|
||||
@router.get("/routines")
|
||||
async def get_routines(
|
||||
teacher_id: str = Query(...),
|
||||
is_active: bool = True,
|
||||
routine_type: Optional[str] = None
|
||||
):
|
||||
"""Holt Routinen eines Lehrers."""
|
||||
if not DB_ENABLED:
|
||||
return {"routines": [], "count": 0}
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = RecurringRoutineRepository(db)
|
||||
routines = repo.get_by_teacher(teacher_id, is_active=is_active, routine_type=routine_type)
|
||||
result = {"routines": [repo.to_dict(r) for r in routines], "count": len(routines)}
|
||||
db.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get routines: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
@router.get("/routines/today")
|
||||
async def get_today_routines(teacher_id: str = Query(...)):
|
||||
"""Holt Routinen die heute stattfinden."""
|
||||
if not DB_ENABLED:
|
||||
return {"routines": [], "count": 0}
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = RecurringRoutineRepository(db)
|
||||
routines = repo.get_today(teacher_id)
|
||||
result = {"routines": [repo.to_dict(r) for r in routines], "count": len(routines)}
|
||||
db.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get today's routines: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
@router.post("/routines", response_model=RoutineResponse)
|
||||
async def create_routine(teacher_id: str, request: CreateRoutineRequest):
|
||||
"""Erstellt eine neue wiederkehrende Routine."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Datenbank nicht verfuegbar")
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = RecurringRoutineRepository(db)
|
||||
routine = repo.create(
|
||||
teacher_id=teacher_id,
|
||||
title=request.title,
|
||||
routine_type=request.routine_type,
|
||||
recurrence_pattern=request.recurrence_pattern,
|
||||
day_of_week=request.day_of_week,
|
||||
day_of_month=request.day_of_month,
|
||||
time_of_day=request.time_of_day,
|
||||
duration_minutes=request.duration_minutes,
|
||||
description=request.description,
|
||||
)
|
||||
|
||||
result = RoutineResponse(
|
||||
id=routine.id,
|
||||
teacher_id=routine.teacher_id,
|
||||
routine_type=routine.routine_type.value,
|
||||
title=routine.title,
|
||||
description=routine.description,
|
||||
recurrence_pattern=routine.recurrence_pattern.value,
|
||||
day_of_week=routine.day_of_week,
|
||||
day_of_month=routine.day_of_month,
|
||||
time_of_day=routine.time_of_day.isoformat() if routine.time_of_day else None,
|
||||
duration_minutes=routine.duration_minutes,
|
||||
is_active=routine.is_active,
|
||||
)
|
||||
db.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create routine: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
@router.delete("/routines/{routine_id}")
|
||||
async def delete_routine(routine_id: str):
|
||||
"""Loescht eine Routine."""
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Datenbank nicht verfuegbar")
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = RecurringRoutineRepository(db)
|
||||
if repo.delete(routine_id):
|
||||
db.close()
|
||||
return {"success": True, "deleted_id": routine_id}
|
||||
db.close()
|
||||
raise HTTPException(status_code=404, detail="Routine nicht gefunden")
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete routine: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
# === Static Data Endpoints ===
|
||||
|
||||
@router.get("/federal-states")
|
||||
async def get_federal_states_list():
|
||||
"""Gibt alle Bundeslaender zurueck."""
|
||||
return {"federal_states": [{"id": k, "name": v} for k, v in FEDERAL_STATES.items()]}
|
||||
|
||||
|
||||
@router.get("/school-types")
|
||||
async def get_school_types_list():
|
||||
"""Gibt alle Schularten zurueck."""
|
||||
return {"school_types": [{"id": k, "name": v} for k, v in SCHOOL_TYPES.items()]}
|
||||
|
||||
|
||||
@router.get("/macro-phases")
|
||||
async def get_macro_phases_list():
|
||||
"""Gibt alle Makro-Phasen zurueck."""
|
||||
return {
|
||||
"macro_phases": [
|
||||
{"id": "onboarding", "label": "Einrichtung", "order": 1},
|
||||
{"id": "schuljahresstart", "label": "Schuljahresstart", "order": 2},
|
||||
{"id": "unterrichtsaufbau", "label": "Unterrichtsaufbau", "order": 3},
|
||||
{"id": "leistungsphase_1", "label": "Leistungsphase 1", "order": 4},
|
||||
{"id": "halbjahresabschluss", "label": "Halbjahresabschluss", "order": 5},
|
||||
{"id": "leistungsphase_2", "label": "Leistungsphase 2", "order": 6},
|
||||
{"id": "jahresabschluss", "label": "Jahresabschluss", "order": 7},
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@router.get("/event-types")
|
||||
async def get_event_types_list():
|
||||
"""Gibt alle Event-Typen zurueck."""
|
||||
return {
|
||||
"event_types": [
|
||||
{"id": "exam", "label": "Klassenarbeit/Klausur"},
|
||||
{"id": "parent_evening", "label": "Elternabend"},
|
||||
{"id": "trip", "label": "Klassenfahrt/Ausflug"},
|
||||
{"id": "project", "label": "Projektwoche"},
|
||||
{"id": "other", "label": "Sonstiges"},
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@router.get("/routine-types")
|
||||
async def get_routine_types_list():
|
||||
"""Gibt alle Routine-Typen zurueck."""
|
||||
return {
|
||||
"routine_types": [
|
||||
{"id": "teacher_conference", "label": "Lehrerkonferenz"},
|
||||
{"id": "subject_conference", "label": "Fachkonferenz"},
|
||||
{"id": "office_hours", "label": "Sprechstunde"},
|
||||
{"id": "correction_time", "label": "Korrekturzeit"},
|
||||
{"id": "other", "label": "Sonstiges"},
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# === Antizipations-Engine ===
|
||||
|
||||
@router.get("/suggestions")
|
||||
async def get_suggestions(teacher_id: str = Query(...), limit: int = Query(5, ge=1, le=20)):
|
||||
"""Generiert kontextbasierte Vorschlaege fuer einen Lehrer."""
|
||||
if not DB_ENABLED:
|
||||
return {
|
||||
"active_contexts": [],
|
||||
"suggestions": [],
|
||||
"signals_summary": {"macro_phase": "onboarding"},
|
||||
"total_suggestions": 0,
|
||||
}
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
generator = SuggestionGenerator(db)
|
||||
result = generator.generate(teacher_id, limit=limit)
|
||||
db.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate suggestions: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Fehler: {e}")
|
||||
|
||||
|
||||
# === Sidebar ===
|
||||
|
||||
@router.get("/sidebar")
|
||||
async def get_sidebar(teacher_id: str = Query(...), mode: str = Query("companion")):
|
||||
"""Generiert das dynamische Sidebar-Model."""
|
||||
if mode == "companion":
|
||||
return {
|
||||
"mode": "companion",
|
||||
"sections": [
|
||||
{"id": "SEARCH", "type": "search_bar", "placeholder": "Suchen..."},
|
||||
{"id": "NOW_RELEVANT", "type": "list", "title": "Jetzt relevant", "items": []},
|
||||
{
|
||||
"id": "ALL_MODULES",
|
||||
"type": "folder",
|
||||
"label": "Alle Module",
|
||||
"collapsed": True,
|
||||
"items": [
|
||||
{"id": "lesson", "label": "Stundenmodus", "icon": "timer"},
|
||||
{"id": "classes", "label": "Klassen", "icon": "groups"},
|
||||
{"id": "exams", "label": "Klausuren", "icon": "quiz"},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
return {
|
||||
"mode": "classic",
|
||||
"sections": [
|
||||
{
|
||||
"id": "NAVIGATION",
|
||||
"type": "tree",
|
||||
"items": [
|
||||
{"id": "dashboard", "label": "Dashboard", "icon": "dashboard"},
|
||||
{"id": "lesson", "label": "Stundenmodus", "icon": "timer"},
|
||||
{"id": "classes", "label": "Klassen", "icon": "groups"},
|
||||
],
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
# === Schuljahres-Pfad ===
|
||||
|
||||
@router.get("/path")
|
||||
async def get_schoolyear_path(teacher_id: str = Query(...)):
|
||||
"""Generiert den Schuljahres-Pfad mit Meilensteinen."""
|
||||
current_phase = "onboarding"
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherContextRepository(db)
|
||||
context = repo.get_or_create(teacher_id)
|
||||
current_phase = context.macro_phase.value
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get context for path: {e}")
|
||||
|
||||
phase_order = [
|
||||
"onboarding", "schuljahresstart", "unterrichtsaufbau",
|
||||
"leistungsphase_1", "halbjahresabschluss", "leistungsphase_2", "jahresabschluss",
|
||||
]
|
||||
current_index = phase_order.index(current_phase) if current_phase in phase_order else 0
|
||||
|
||||
milestones = [
|
||||
{"id": "MS_START", "label": "Start", "phase": "onboarding"},
|
||||
{"id": "MS_SETUP", "label": "Einrichtung", "phase": "schuljahresstart"},
|
||||
{"id": "MS_ROUTINE", "label": "Routinen", "phase": "unterrichtsaufbau"},
|
||||
{"id": "MS_EXAM_1", "label": "Klausuren", "phase": "leistungsphase_1"},
|
||||
{"id": "MS_HALFYEAR", "label": "Halbjahr", "phase": "halbjahresabschluss"},
|
||||
{"id": "MS_EXAM_2", "label": "Pruefungen", "phase": "leistungsphase_2"},
|
||||
{"id": "MS_END", "label": "Abschluss", "phase": "jahresabschluss"},
|
||||
]
|
||||
|
||||
for i, milestone in enumerate(milestones):
|
||||
phase_index = phase_order.index(milestone["phase"])
|
||||
if phase_index < current_index:
|
||||
milestone["status"] = "done"
|
||||
elif phase_index == current_index:
|
||||
milestone["status"] = "current"
|
||||
else:
|
||||
milestone["status"] = "upcoming"
|
||||
|
||||
return {
|
||||
"milestones": milestones,
|
||||
"current_milestone_id": milestones[current_index]["id"],
|
||||
"progress_percent": int((current_index / (len(phase_order) - 1)) * 100),
|
||||
}
|
||||
271
backend/api/classroom/feedback.py
Normal file
271
backend/api/classroom/feedback.py
Normal file
@@ -0,0 +1,271 @@
|
||||
"""
|
||||
Classroom API - Feedback Endpoints.
|
||||
|
||||
Endpoints fuer Lehrer-Feedback (Feature Request Tracking).
|
||||
"""
|
||||
|
||||
from uuid import uuid4
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .shared import init_db_if_needed, DB_ENABLED, logger
|
||||
|
||||
try:
|
||||
from classroom_engine.database import SessionLocal
|
||||
from classroom_engine.repository import TeacherFeedbackRepository
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
router = APIRouter(tags=["Feedback"])
|
||||
|
||||
# In-Memory Storage (Fallback)
|
||||
_feedback: Dict[str, dict] = {}
|
||||
|
||||
|
||||
# === Pydantic Models ===
|
||||
|
||||
class CreateFeedbackRequest(BaseModel):
|
||||
"""Request zum Erstellen von Feedback."""
|
||||
teacher_id: str
|
||||
session_id: Optional[str] = None
|
||||
category: str = Field(..., description="bug, feature, usability, content, other")
|
||||
title: str = Field(..., min_length=1, max_length=200)
|
||||
description: str = Field(..., min_length=10, max_length=5000)
|
||||
priority: str = Field("medium", description="low, medium, high, critical")
|
||||
context_data: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class FeedbackResponse(BaseModel):
|
||||
"""Response fuer ein Feedback."""
|
||||
feedback_id: str
|
||||
teacher_id: str
|
||||
session_id: Optional[str]
|
||||
category: str
|
||||
title: str
|
||||
description: str
|
||||
priority: str
|
||||
status: str
|
||||
context_data: Optional[Dict[str, Any]]
|
||||
admin_notes: Optional[str]
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
class FeedbackListResponse(BaseModel):
|
||||
"""Response fuer Feedback-Liste."""
|
||||
feedback: List[FeedbackResponse]
|
||||
total_count: int
|
||||
|
||||
|
||||
class FeedbackStatsResponse(BaseModel):
|
||||
"""Response fuer Feedback-Statistiken."""
|
||||
total: int
|
||||
by_category: Dict[str, int]
|
||||
by_status: Dict[str, int]
|
||||
by_priority: Dict[str, int]
|
||||
|
||||
|
||||
# === Endpoints ===
|
||||
|
||||
@router.post("/feedback", response_model=FeedbackResponse, status_code=201)
|
||||
async def create_feedback(request: CreateFeedbackRequest) -> FeedbackResponse:
|
||||
"""Erstellt ein neues Feedback."""
|
||||
init_db_if_needed()
|
||||
|
||||
valid_categories = ["bug", "feature", "usability", "content", "other"]
|
||||
if request.category not in valid_categories:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid category. Must be one of: {valid_categories}")
|
||||
|
||||
valid_priorities = ["low", "medium", "high", "critical"]
|
||||
if request.priority not in valid_priorities:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid priority. Must be one of: {valid_priorities}")
|
||||
|
||||
feedback_id = str(uuid4())
|
||||
now = datetime.utcnow()
|
||||
|
||||
feedback_data = {
|
||||
"feedback_id": feedback_id,
|
||||
"teacher_id": request.teacher_id,
|
||||
"session_id": request.session_id,
|
||||
"category": request.category,
|
||||
"title": request.title,
|
||||
"description": request.description,
|
||||
"priority": request.priority,
|
||||
"status": "open",
|
||||
"context_data": request.context_data,
|
||||
"admin_notes": None,
|
||||
"created_at": now.isoformat(),
|
||||
"updated_at": None,
|
||||
}
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherFeedbackRepository(db)
|
||||
repo.create(feedback_data)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB persist failed for feedback: {e}")
|
||||
|
||||
_feedback[feedback_id] = feedback_data
|
||||
return FeedbackResponse(**feedback_data)
|
||||
|
||||
|
||||
@router.get("/feedback", response_model=FeedbackListResponse)
|
||||
async def list_feedback(
|
||||
teacher_id: Optional[str] = Query(None),
|
||||
category: Optional[str] = Query(None),
|
||||
status: Optional[str] = Query(None),
|
||||
priority: Optional[str] = Query(None),
|
||||
limit: int = Query(50, ge=1, le=100),
|
||||
offset: int = Query(0, ge=0)
|
||||
) -> FeedbackListResponse:
|
||||
"""Listet Feedback (optional gefiltert)."""
|
||||
init_db_if_needed()
|
||||
|
||||
feedback_list = []
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherFeedbackRepository(db)
|
||||
db_feedback = repo.get_all(
|
||||
teacher_id=teacher_id,
|
||||
category=category,
|
||||
status=status,
|
||||
priority=priority,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
|
||||
for fb in db_feedback:
|
||||
feedback_list.append(FeedbackResponse(**fb))
|
||||
|
||||
total = repo.count(teacher_id=teacher_id, category=category, status=status)
|
||||
db.close()
|
||||
return FeedbackListResponse(feedback=feedback_list, total_count=total)
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed for feedback: {e}")
|
||||
|
||||
# Fallback auf Memory
|
||||
for fb in _feedback.values():
|
||||
if teacher_id and fb["teacher_id"] != teacher_id:
|
||||
continue
|
||||
if category and fb["category"] != category:
|
||||
continue
|
||||
if status and fb["status"] != status:
|
||||
continue
|
||||
if priority and fb["priority"] != priority:
|
||||
continue
|
||||
feedback_list.append(FeedbackResponse(**fb))
|
||||
|
||||
total = len(feedback_list)
|
||||
feedback_list = feedback_list[offset:offset + limit]
|
||||
|
||||
return FeedbackListResponse(feedback=feedback_list, total_count=total)
|
||||
|
||||
|
||||
@router.get("/feedback/stats", response_model=FeedbackStatsResponse)
|
||||
async def get_feedback_stats() -> FeedbackStatsResponse:
|
||||
"""Gibt Feedback-Statistiken zurueck."""
|
||||
init_db_if_needed()
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherFeedbackRepository(db)
|
||||
stats = repo.get_stats()
|
||||
db.close()
|
||||
return FeedbackStatsResponse(**stats)
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed for feedback stats: {e}")
|
||||
|
||||
# Fallback auf Memory
|
||||
by_category: Dict[str, int] = {}
|
||||
by_status: Dict[str, int] = {}
|
||||
by_priority: Dict[str, int] = {}
|
||||
|
||||
for fb in _feedback.values():
|
||||
cat = fb["category"]
|
||||
by_category[cat] = by_category.get(cat, 0) + 1
|
||||
|
||||
st = fb["status"]
|
||||
by_status[st] = by_status.get(st, 0) + 1
|
||||
|
||||
pr = fb["priority"]
|
||||
by_priority[pr] = by_priority.get(pr, 0) + 1
|
||||
|
||||
return FeedbackStatsResponse(
|
||||
total=len(_feedback),
|
||||
by_category=by_category,
|
||||
by_status=by_status,
|
||||
by_priority=by_priority,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/feedback/{feedback_id}")
|
||||
async def get_feedback(feedback_id: str) -> FeedbackResponse:
|
||||
"""Ruft ein einzelnes Feedback ab."""
|
||||
init_db_if_needed()
|
||||
|
||||
if feedback_id in _feedback:
|
||||
return FeedbackResponse(**_feedback[feedback_id])
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherFeedbackRepository(db)
|
||||
fb = repo.get_by_id(feedback_id)
|
||||
db.close()
|
||||
if fb:
|
||||
return FeedbackResponse(**fb)
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed: {e}")
|
||||
|
||||
raise HTTPException(status_code=404, detail="Feedback nicht gefunden")
|
||||
|
||||
|
||||
@router.put("/feedback/{feedback_id}/status")
|
||||
async def update_feedback_status(
|
||||
feedback_id: str,
|
||||
status: str = Query(..., description="open, in_progress, resolved, closed, wont_fix")
|
||||
) -> FeedbackResponse:
|
||||
"""Aktualisiert den Status eines Feedbacks."""
|
||||
init_db_if_needed()
|
||||
|
||||
valid_statuses = ["open", "in_progress", "resolved", "closed", "wont_fix"]
|
||||
if status not in valid_statuses:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid status. Must be one of: {valid_statuses}")
|
||||
|
||||
feedback_data = _feedback.get(feedback_id)
|
||||
|
||||
if not feedback_data and DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherFeedbackRepository(db)
|
||||
feedback_data = repo.get_by_id(feedback_id)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed: {e}")
|
||||
|
||||
if not feedback_data:
|
||||
raise HTTPException(status_code=404, detail="Feedback nicht gefunden")
|
||||
|
||||
feedback_data["status"] = status
|
||||
feedback_data["updated_at"] = datetime.utcnow().isoformat()
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherFeedbackRepository(db)
|
||||
repo.update_status(feedback_id, status)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB update failed: {e}")
|
||||
|
||||
_feedback[feedback_id] = feedback_data
|
||||
return FeedbackResponse(**feedback_data)
|
||||
281
backend/api/classroom/homework.py
Normal file
281
backend/api/classroom/homework.py
Normal file
@@ -0,0 +1,281 @@
|
||||
"""
|
||||
Classroom API - Homework Endpoints.
|
||||
|
||||
Endpoints fuer Hausaufgaben-Tracking (Feature f20).
|
||||
"""
|
||||
|
||||
from uuid import uuid4
|
||||
from typing import Dict, List, Optional
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from classroom_engine import Homework, HomeworkStatus
|
||||
|
||||
from .shared import init_db_if_needed, DB_ENABLED, logger
|
||||
|
||||
try:
|
||||
from classroom_engine.database import SessionLocal
|
||||
from classroom_engine.repository import HomeworkRepository
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
router = APIRouter(tags=["Homework"])
|
||||
|
||||
# In-Memory Storage (Fallback)
|
||||
_homework: Dict[str, Homework] = {}
|
||||
|
||||
|
||||
# === Pydantic Models ===
|
||||
|
||||
class CreateHomeworkRequest(BaseModel):
|
||||
"""Request zum Erstellen einer Hausaufgabe."""
|
||||
teacher_id: str
|
||||
class_id: str
|
||||
subject: str
|
||||
title: str = Field(..., max_length=300)
|
||||
description: str = ""
|
||||
session_id: Optional[str] = None
|
||||
due_date: Optional[str] = Field(None, description="ISO-Format Datum")
|
||||
|
||||
|
||||
class UpdateHomeworkRequest(BaseModel):
|
||||
"""Request zum Aktualisieren einer Hausaufgabe."""
|
||||
title: Optional[str] = Field(None, max_length=300)
|
||||
description: Optional[str] = None
|
||||
due_date: Optional[str] = None
|
||||
status: Optional[str] = None
|
||||
|
||||
|
||||
class HomeworkResponse(BaseModel):
|
||||
"""Response fuer eine Hausaufgabe."""
|
||||
homework_id: str
|
||||
teacher_id: str
|
||||
class_id: str
|
||||
subject: str
|
||||
title: str
|
||||
description: str
|
||||
session_id: Optional[str]
|
||||
due_date: Optional[str]
|
||||
status: str
|
||||
is_overdue: bool
|
||||
created_at: Optional[str]
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
class HomeworkListResponse(BaseModel):
|
||||
"""Response fuer Liste von Hausaufgaben."""
|
||||
homework: List[HomeworkResponse]
|
||||
total: int
|
||||
|
||||
|
||||
# === Helper Functions ===
|
||||
|
||||
def build_homework_response(hw: Homework) -> HomeworkResponse:
|
||||
"""Baut eine HomeworkResponse aus einem Homework-Objekt."""
|
||||
return HomeworkResponse(
|
||||
homework_id=hw.homework_id,
|
||||
teacher_id=hw.teacher_id,
|
||||
class_id=hw.class_id,
|
||||
subject=hw.subject,
|
||||
title=hw.title,
|
||||
description=hw.description,
|
||||
session_id=hw.session_id,
|
||||
due_date=hw.due_date.isoformat() if hw.due_date else None,
|
||||
status=hw.status.value,
|
||||
is_overdue=hw.is_overdue,
|
||||
created_at=hw.created_at.isoformat() if hw.created_at else None,
|
||||
updated_at=hw.updated_at.isoformat() if hw.updated_at else None,
|
||||
)
|
||||
|
||||
|
||||
# === Endpoints ===
|
||||
|
||||
@router.post("/homework", response_model=HomeworkResponse, status_code=201)
|
||||
async def create_homework(request: CreateHomeworkRequest) -> HomeworkResponse:
|
||||
"""Erstellt eine neue Hausaufgabe (Feature f20)."""
|
||||
init_db_if_needed()
|
||||
|
||||
due_date = None
|
||||
if request.due_date:
|
||||
try:
|
||||
due_date = datetime.fromisoformat(request.due_date.replace('Z', '+00:00'))
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Ungueltiges Datumsformat")
|
||||
|
||||
homework = Homework(
|
||||
homework_id=str(uuid4()),
|
||||
teacher_id=request.teacher_id,
|
||||
class_id=request.class_id,
|
||||
subject=request.subject,
|
||||
title=request.title,
|
||||
description=request.description,
|
||||
session_id=request.session_id,
|
||||
due_date=due_date,
|
||||
status=HomeworkStatus.ASSIGNED,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = HomeworkRepository(db)
|
||||
repo.create(homework)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB persist failed for homework: {e}")
|
||||
|
||||
_homework[homework.homework_id] = homework
|
||||
return build_homework_response(homework)
|
||||
|
||||
|
||||
@router.get("/homework", response_model=HomeworkListResponse)
|
||||
async def list_homework(
|
||||
teacher_id: str = Query(...),
|
||||
class_id: Optional[str] = Query(None),
|
||||
status: Optional[str] = Query(None),
|
||||
include_completed: bool = Query(False),
|
||||
limit: int = Query(50, ge=1, le=100)
|
||||
) -> HomeworkListResponse:
|
||||
"""Listet Hausaufgaben eines Lehrers (Feature f20)."""
|
||||
init_db_if_needed()
|
||||
|
||||
homework_list = []
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = HomeworkRepository(db)
|
||||
if class_id:
|
||||
db_homework = repo.get_by_class(class_id, teacher_id, include_completed, limit)
|
||||
else:
|
||||
db_homework = repo.get_by_teacher(teacher_id, status, limit)
|
||||
|
||||
for db_hw in db_homework:
|
||||
hw = repo.to_dataclass(db_hw)
|
||||
_homework[hw.homework_id] = hw
|
||||
homework_list.append(build_homework_response(hw))
|
||||
db.close()
|
||||
return HomeworkListResponse(homework=homework_list, total=len(homework_list))
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed for homework: {e}")
|
||||
|
||||
for hw in _homework.values():
|
||||
if hw.teacher_id != teacher_id:
|
||||
continue
|
||||
if class_id and hw.class_id != class_id:
|
||||
continue
|
||||
if status and hw.status.value != status:
|
||||
continue
|
||||
if not include_completed and hw.status == HomeworkStatus.COMPLETED:
|
||||
continue
|
||||
homework_list.append(build_homework_response(hw))
|
||||
|
||||
return HomeworkListResponse(homework=homework_list[:limit], total=len(homework_list))
|
||||
|
||||
|
||||
@router.get("/homework/{homework_id}", response_model=HomeworkResponse)
|
||||
async def get_homework(homework_id: str) -> HomeworkResponse:
|
||||
"""Ruft eine einzelne Hausaufgabe ab (Feature f20)."""
|
||||
init_db_if_needed()
|
||||
|
||||
if homework_id in _homework:
|
||||
return build_homework_response(_homework[homework_id])
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = HomeworkRepository(db)
|
||||
db_hw = repo.get_by_id(homework_id)
|
||||
db.close()
|
||||
if db_hw:
|
||||
hw = repo.to_dataclass(db_hw)
|
||||
_homework[hw.homework_id] = hw
|
||||
return build_homework_response(hw)
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed: {e}")
|
||||
|
||||
raise HTTPException(status_code=404, detail="Hausaufgabe nicht gefunden")
|
||||
|
||||
|
||||
@router.put("/homework/{homework_id}", response_model=HomeworkResponse)
|
||||
async def update_homework(homework_id: str, request: UpdateHomeworkRequest) -> HomeworkResponse:
|
||||
"""Aktualisiert eine Hausaufgabe (Feature f20)."""
|
||||
init_db_if_needed()
|
||||
|
||||
homework = _homework.get(homework_id)
|
||||
|
||||
if not homework and DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = HomeworkRepository(db)
|
||||
db_hw = repo.get_by_id(homework_id)
|
||||
db.close()
|
||||
if db_hw:
|
||||
homework = repo.to_dataclass(db_hw)
|
||||
_homework[homework.homework_id] = homework
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed: {e}")
|
||||
|
||||
if not homework:
|
||||
raise HTTPException(status_code=404, detail="Hausaufgabe nicht gefunden")
|
||||
|
||||
if request.title is not None:
|
||||
homework.title = request.title
|
||||
if request.description is not None:
|
||||
homework.description = request.description
|
||||
if request.due_date is not None:
|
||||
try:
|
||||
homework.due_date = datetime.fromisoformat(request.due_date.replace('Z', '+00:00'))
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Ungueltiges Datumsformat")
|
||||
if request.status is not None:
|
||||
try:
|
||||
homework.status = HomeworkStatus(request.status)
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Ungueltiger Status")
|
||||
|
||||
homework.updated_at = datetime.utcnow()
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = HomeworkRepository(db)
|
||||
repo.update(homework)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB update failed: {e}")
|
||||
|
||||
_homework[homework_id] = homework
|
||||
return build_homework_response(homework)
|
||||
|
||||
|
||||
@router.patch("/homework/{homework_id}/status")
|
||||
async def update_homework_status(
|
||||
homework_id: str,
|
||||
status: str = Query(...)
|
||||
) -> HomeworkResponse:
|
||||
"""Aktualisiert nur den Status einer Hausaufgabe (Feature f20)."""
|
||||
return await update_homework(homework_id, UpdateHomeworkRequest(status=status))
|
||||
|
||||
|
||||
@router.delete("/homework/{homework_id}")
|
||||
async def delete_homework(homework_id: str) -> Dict[str, str]:
|
||||
"""Loescht eine Hausaufgabe (Feature f20)."""
|
||||
init_db_if_needed()
|
||||
|
||||
if homework_id in _homework:
|
||||
del _homework[homework_id]
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = HomeworkRepository(db)
|
||||
repo.delete(homework_id)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB delete failed: {e}")
|
||||
|
||||
return {"status": "deleted", "homework_id": homework_id}
|
||||
343
backend/api/classroom/materials.py
Normal file
343
backend/api/classroom/materials.py
Normal file
@@ -0,0 +1,343 @@
|
||||
"""
|
||||
Classroom API - Materials Endpoints.
|
||||
|
||||
Endpoints fuer Unterrichtsmaterialien (Feature f19).
|
||||
"""
|
||||
|
||||
from uuid import uuid4
|
||||
from typing import Dict, List, Optional
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from classroom_engine import PhaseMaterial, MaterialType
|
||||
|
||||
from .shared import init_db_if_needed, DB_ENABLED, logger
|
||||
|
||||
try:
|
||||
from classroom_engine.database import SessionLocal
|
||||
from classroom_engine.repository import MaterialRepository
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
router = APIRouter(tags=["Materials"])
|
||||
|
||||
# In-Memory Storage (Fallback)
|
||||
_materials: Dict[str, PhaseMaterial] = {}
|
||||
|
||||
|
||||
# === Pydantic Models ===
|
||||
|
||||
class CreateMaterialRequest(BaseModel):
|
||||
"""Request zum Erstellen eines Materials."""
|
||||
teacher_id: str
|
||||
title: str = Field(..., max_length=300)
|
||||
material_type: str = Field("document")
|
||||
url: Optional[str] = Field(None, max_length=2000)
|
||||
description: str = ""
|
||||
phase: Optional[str] = None
|
||||
subject: str = ""
|
||||
grade_level: str = ""
|
||||
tags: List[str] = []
|
||||
is_public: bool = False
|
||||
session_id: Optional[str] = None
|
||||
|
||||
|
||||
class UpdateMaterialRequest(BaseModel):
|
||||
"""Request zum Aktualisieren eines Materials."""
|
||||
title: Optional[str] = Field(None, max_length=300)
|
||||
material_type: Optional[str] = None
|
||||
url: Optional[str] = Field(None, max_length=2000)
|
||||
description: Optional[str] = None
|
||||
phase: Optional[str] = None
|
||||
subject: Optional[str] = None
|
||||
grade_level: Optional[str] = None
|
||||
tags: Optional[List[str]] = None
|
||||
is_public: Optional[bool] = None
|
||||
|
||||
|
||||
class MaterialResponse(BaseModel):
|
||||
"""Response fuer ein Material."""
|
||||
material_id: str
|
||||
teacher_id: str
|
||||
title: str
|
||||
material_type: str
|
||||
url: Optional[str]
|
||||
description: str
|
||||
phase: Optional[str]
|
||||
subject: str
|
||||
grade_level: str
|
||||
tags: List[str]
|
||||
is_public: bool
|
||||
usage_count: int
|
||||
session_id: Optional[str]
|
||||
created_at: Optional[str]
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
class MaterialListResponse(BaseModel):
|
||||
"""Response fuer Liste von Materialien."""
|
||||
materials: List[MaterialResponse]
|
||||
total: int
|
||||
|
||||
|
||||
# === Helper Functions ===
|
||||
|
||||
def build_material_response(mat: PhaseMaterial) -> MaterialResponse:
|
||||
"""Baut eine MaterialResponse aus einem PhaseMaterial-Objekt."""
|
||||
return MaterialResponse(
|
||||
material_id=mat.material_id,
|
||||
teacher_id=mat.teacher_id,
|
||||
title=mat.title,
|
||||
material_type=mat.material_type.value,
|
||||
url=mat.url,
|
||||
description=mat.description,
|
||||
phase=mat.phase,
|
||||
subject=mat.subject,
|
||||
grade_level=mat.grade_level,
|
||||
tags=mat.tags,
|
||||
is_public=mat.is_public,
|
||||
usage_count=mat.usage_count,
|
||||
session_id=mat.session_id,
|
||||
created_at=mat.created_at.isoformat() if mat.created_at else None,
|
||||
updated_at=mat.updated_at.isoformat() if mat.updated_at else None,
|
||||
)
|
||||
|
||||
|
||||
# === Endpoints ===
|
||||
|
||||
@router.post("/materials", response_model=MaterialResponse, status_code=201)
|
||||
async def create_material(request: CreateMaterialRequest) -> MaterialResponse:
|
||||
"""Erstellt ein neues Material (Feature f19)."""
|
||||
init_db_if_needed()
|
||||
|
||||
try:
|
||||
mat_type = MaterialType(request.material_type)
|
||||
except ValueError:
|
||||
mat_type = MaterialType.DOCUMENT
|
||||
|
||||
material = PhaseMaterial(
|
||||
material_id=str(uuid4()),
|
||||
teacher_id=request.teacher_id,
|
||||
title=request.title,
|
||||
material_type=mat_type,
|
||||
url=request.url,
|
||||
description=request.description,
|
||||
phase=request.phase,
|
||||
subject=request.subject,
|
||||
grade_level=request.grade_level,
|
||||
tags=request.tags,
|
||||
is_public=request.is_public,
|
||||
usage_count=0,
|
||||
session_id=request.session_id,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = MaterialRepository(db)
|
||||
repo.create(material)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB persist failed for material: {e}")
|
||||
|
||||
_materials[material.material_id] = material
|
||||
return build_material_response(material)
|
||||
|
||||
|
||||
@router.get("/materials", response_model=MaterialListResponse)
|
||||
async def list_materials(
|
||||
teacher_id: str = Query(...),
|
||||
phase: Optional[str] = Query(None),
|
||||
subject: Optional[str] = Query(None),
|
||||
include_public: bool = Query(True),
|
||||
limit: int = Query(50, ge=1, le=100)
|
||||
) -> MaterialListResponse:
|
||||
"""Listet Materialien eines Lehrers (Feature f19)."""
|
||||
init_db_if_needed()
|
||||
|
||||
materials_list = []
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = MaterialRepository(db)
|
||||
if phase:
|
||||
db_materials = repo.get_by_phase(phase, teacher_id, include_public)
|
||||
else:
|
||||
db_materials = repo.get_by_teacher(teacher_id, phase, subject, limit)
|
||||
|
||||
for db_mat in db_materials:
|
||||
mat = repo.to_dataclass(db_mat)
|
||||
_materials[mat.material_id] = mat
|
||||
materials_list.append(build_material_response(mat))
|
||||
db.close()
|
||||
return MaterialListResponse(materials=materials_list, total=len(materials_list))
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed for materials: {e}")
|
||||
|
||||
for mat in _materials.values():
|
||||
if mat.teacher_id != teacher_id and not (include_public and mat.is_public):
|
||||
continue
|
||||
if phase and mat.phase != phase:
|
||||
continue
|
||||
if subject and mat.subject != subject:
|
||||
continue
|
||||
materials_list.append(build_material_response(mat))
|
||||
|
||||
return MaterialListResponse(materials=materials_list[:limit], total=len(materials_list))
|
||||
|
||||
|
||||
@router.get("/materials/by-phase/{phase}", response_model=MaterialListResponse)
|
||||
async def get_materials_by_phase(
|
||||
phase: str,
|
||||
teacher_id: str = Query(...),
|
||||
subject: Optional[str] = Query(None),
|
||||
limit: int = Query(50, ge=1, le=100)
|
||||
) -> MaterialListResponse:
|
||||
"""Holt Materialien fuer eine bestimmte Phase (Feature f19)."""
|
||||
return await list_materials(teacher_id=teacher_id, phase=phase, subject=subject, limit=limit)
|
||||
|
||||
|
||||
@router.get("/materials/{material_id}", response_model=MaterialResponse)
|
||||
async def get_material(material_id: str) -> MaterialResponse:
|
||||
"""Ruft ein einzelnes Material ab (Feature f19)."""
|
||||
init_db_if_needed()
|
||||
|
||||
if material_id in _materials:
|
||||
return build_material_response(_materials[material_id])
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = MaterialRepository(db)
|
||||
db_mat = repo.get_by_id(material_id)
|
||||
db.close()
|
||||
if db_mat:
|
||||
mat = repo.to_dataclass(db_mat)
|
||||
_materials[mat.material_id] = mat
|
||||
return build_material_response(mat)
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed: {e}")
|
||||
|
||||
raise HTTPException(status_code=404, detail="Material nicht gefunden")
|
||||
|
||||
|
||||
@router.put("/materials/{material_id}", response_model=MaterialResponse)
|
||||
async def update_material(material_id: str, request: UpdateMaterialRequest) -> MaterialResponse:
|
||||
"""Aktualisiert ein Material (Feature f19)."""
|
||||
init_db_if_needed()
|
||||
|
||||
material = _materials.get(material_id)
|
||||
|
||||
if not material and DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = MaterialRepository(db)
|
||||
db_mat = repo.get_by_id(material_id)
|
||||
db.close()
|
||||
if db_mat:
|
||||
material = repo.to_dataclass(db_mat)
|
||||
_materials[material.material_id] = material
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed: {e}")
|
||||
|
||||
if not material:
|
||||
raise HTTPException(status_code=404, detail="Material nicht gefunden")
|
||||
|
||||
if request.title is not None:
|
||||
material.title = request.title
|
||||
if request.material_type is not None:
|
||||
try:
|
||||
material.material_type = MaterialType(request.material_type)
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Ungueltiger Material-Typ")
|
||||
if request.url is not None:
|
||||
material.url = request.url
|
||||
if request.description is not None:
|
||||
material.description = request.description
|
||||
if request.phase is not None:
|
||||
material.phase = request.phase
|
||||
if request.subject is not None:
|
||||
material.subject = request.subject
|
||||
if request.grade_level is not None:
|
||||
material.grade_level = request.grade_level
|
||||
if request.tags is not None:
|
||||
material.tags = request.tags
|
||||
if request.is_public is not None:
|
||||
material.is_public = request.is_public
|
||||
|
||||
material.updated_at = datetime.utcnow()
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = MaterialRepository(db)
|
||||
repo.update(material)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB update failed: {e}")
|
||||
|
||||
_materials[material_id] = material
|
||||
return build_material_response(material)
|
||||
|
||||
|
||||
@router.post("/materials/{material_id}/attach/{session_id}")
|
||||
async def attach_material_to_session(material_id: str, session_id: str) -> MaterialResponse:
|
||||
"""Verknuepft ein Material mit einer Session (Feature f19)."""
|
||||
init_db_if_needed()
|
||||
|
||||
material = _materials.get(material_id)
|
||||
|
||||
if not material and DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = MaterialRepository(db)
|
||||
db_mat = repo.get_by_id(material_id)
|
||||
if db_mat:
|
||||
material = repo.to_dataclass(db_mat)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed: {e}")
|
||||
|
||||
if not material:
|
||||
raise HTTPException(status_code=404, detail="Material nicht gefunden")
|
||||
|
||||
material.session_id = session_id
|
||||
material.usage_count += 1
|
||||
material.updated_at = datetime.utcnow()
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = MaterialRepository(db)
|
||||
repo.attach_to_session(material_id, session_id)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB update failed: {e}")
|
||||
|
||||
_materials[material_id] = material
|
||||
return build_material_response(material)
|
||||
|
||||
|
||||
@router.delete("/materials/{material_id}")
|
||||
async def delete_material(material_id: str) -> Dict[str, str]:
|
||||
"""Loescht ein Material (Feature f19)."""
|
||||
init_db_if_needed()
|
||||
|
||||
if material_id in _materials:
|
||||
del _materials[material_id]
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = MaterialRepository(db)
|
||||
repo.delete(material_id)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB delete failed: {e}")
|
||||
|
||||
return {"status": "deleted", "material_id": material_id}
|
||||
489
backend/api/classroom/models.py
Normal file
489
backend/api/classroom/models.py
Normal file
@@ -0,0 +1,489 @@
|
||||
"""
|
||||
Classroom API - Pydantic Models.
|
||||
|
||||
Alle Request/Response Models fuer die Classroom API.
|
||||
"""
|
||||
|
||||
from typing import Dict, List, Optional, Any
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
# === Request Models ===
|
||||
|
||||
class CreateSessionRequest(BaseModel):
|
||||
"""Request zum Erstellen einer neuen Session."""
|
||||
teacher_id: str = Field(..., description="ID des Lehrers")
|
||||
class_id: str = Field(..., description="ID der Klasse")
|
||||
subject: str = Field(..., description="Unterrichtsfach")
|
||||
topic: Optional[str] = Field(None, description="Thema der Stunde")
|
||||
phase_durations: Optional[Dict[str, int]] = Field(
|
||||
None,
|
||||
description="Optionale individuelle Phasendauern in Minuten"
|
||||
)
|
||||
|
||||
|
||||
class NotesRequest(BaseModel):
|
||||
"""Request zum Aktualisieren von Notizen."""
|
||||
notes: str = Field("", description="Stundennotizen")
|
||||
homework: str = Field("", description="Hausaufgaben")
|
||||
|
||||
|
||||
class ExtendTimeRequest(BaseModel):
|
||||
"""Request zum Verlaengern der aktuellen Phase (Feature f28)."""
|
||||
minutes: int = Field(5, ge=1, le=30, description="Zusaetzliche Minuten (1-30)")
|
||||
|
||||
|
||||
# === Response Models ===
|
||||
|
||||
class PhaseInfo(BaseModel):
|
||||
"""Informationen zu einer Phase."""
|
||||
phase: str
|
||||
display_name: str
|
||||
icon: str
|
||||
duration_minutes: int
|
||||
is_completed: bool
|
||||
is_current: bool
|
||||
is_future: bool
|
||||
|
||||
|
||||
class TimerStatus(BaseModel):
|
||||
"""Timer-Status einer Phase."""
|
||||
remaining_seconds: int
|
||||
remaining_formatted: str
|
||||
total_seconds: int
|
||||
total_formatted: str
|
||||
elapsed_seconds: int
|
||||
elapsed_formatted: str
|
||||
percentage_remaining: int
|
||||
percentage_elapsed: int
|
||||
percentage: int = Field(description="Alias fuer percentage_remaining (Visual Timer)")
|
||||
warning: bool
|
||||
overtime: bool
|
||||
overtime_seconds: int
|
||||
overtime_formatted: Optional[str]
|
||||
is_paused: bool = Field(False, description="Ist der Timer pausiert?")
|
||||
|
||||
|
||||
class SuggestionItem(BaseModel):
|
||||
"""Ein Aktivitaets-Vorschlag."""
|
||||
id: str
|
||||
title: str
|
||||
description: str
|
||||
activity_type: str
|
||||
estimated_minutes: int
|
||||
icon: str
|
||||
content_url: Optional[str]
|
||||
|
||||
|
||||
class SessionResponse(BaseModel):
|
||||
"""Vollstaendige Session-Response."""
|
||||
session_id: str
|
||||
teacher_id: str
|
||||
class_id: str
|
||||
subject: str
|
||||
topic: Optional[str]
|
||||
current_phase: str
|
||||
phase_display_name: str
|
||||
phase_started_at: Optional[str]
|
||||
lesson_started_at: Optional[str]
|
||||
lesson_ended_at: Optional[str]
|
||||
timer: TimerStatus
|
||||
phases: List[PhaseInfo]
|
||||
phase_history: List[Dict[str, Any]]
|
||||
notes: str
|
||||
homework: str
|
||||
is_active: bool
|
||||
is_ended: bool
|
||||
is_paused: bool = Field(False, description="Ist die Stunde pausiert?")
|
||||
|
||||
|
||||
class SuggestionsResponse(BaseModel):
|
||||
"""Response fuer Vorschlaege."""
|
||||
suggestions: List[SuggestionItem]
|
||||
current_phase: str
|
||||
phase_display_name: str
|
||||
total_available: int
|
||||
|
||||
|
||||
class PhasesListResponse(BaseModel):
|
||||
"""Liste aller verfuegbaren Phasen."""
|
||||
phases: List[Dict[str, Any]]
|
||||
|
||||
|
||||
class ActiveSessionsResponse(BaseModel):
|
||||
"""Liste aktiver Sessions."""
|
||||
sessions: List[Dict[str, Any]]
|
||||
count: int
|
||||
|
||||
|
||||
# === Session History Models (Feature f17) ===
|
||||
|
||||
class SessionHistoryItem(BaseModel):
|
||||
"""Ein Eintrag in der Session-Historie."""
|
||||
session_id: str
|
||||
teacher_id: str
|
||||
class_id: str
|
||||
subject: str
|
||||
topic: Optional[str]
|
||||
lesson_started_at: Optional[str]
|
||||
lesson_ended_at: Optional[str]
|
||||
total_duration_minutes: int
|
||||
phases_completed: int
|
||||
notes: str
|
||||
homework: str
|
||||
|
||||
|
||||
class SessionHistoryResponse(BaseModel):
|
||||
"""Response fuer Session-Historie."""
|
||||
sessions: List[SessionHistoryItem]
|
||||
total_count: int
|
||||
page: int
|
||||
page_size: int
|
||||
|
||||
|
||||
# === Template Models ===
|
||||
|
||||
class TemplatePhaseConfig(BaseModel):
|
||||
"""Konfiguration einer Phase im Template."""
|
||||
phase: str
|
||||
duration_minutes: int
|
||||
activities: List[str] = Field(default_factory=list)
|
||||
notes: str = ""
|
||||
|
||||
|
||||
class CreateTemplateRequest(BaseModel):
|
||||
"""Request zum Erstellen eines Templates."""
|
||||
name: str = Field(..., min_length=1, max_length=100)
|
||||
description: str = Field("", max_length=500)
|
||||
subject: str = Field(..., min_length=1)
|
||||
grade_level: Optional[str] = None
|
||||
phase_configs: Optional[List[TemplatePhaseConfig]] = None
|
||||
tags: List[str] = Field(default_factory=list)
|
||||
is_public: bool = False
|
||||
|
||||
|
||||
class UpdateTemplateRequest(BaseModel):
|
||||
"""Request zum Aktualisieren eines Templates."""
|
||||
name: Optional[str] = Field(None, min_length=1, max_length=100)
|
||||
description: Optional[str] = Field(None, max_length=500)
|
||||
subject: Optional[str] = None
|
||||
grade_level: Optional[str] = None
|
||||
phase_configs: Optional[List[TemplatePhaseConfig]] = None
|
||||
tags: Optional[List[str]] = None
|
||||
is_public: Optional[bool] = None
|
||||
|
||||
|
||||
class TemplateResponse(BaseModel):
|
||||
"""Response fuer ein einzelnes Template."""
|
||||
template_id: str
|
||||
name: str
|
||||
description: str
|
||||
subject: str
|
||||
grade_level: Optional[str]
|
||||
phase_configs: List[TemplatePhaseConfig]
|
||||
tags: List[str]
|
||||
is_public: bool
|
||||
is_system: bool
|
||||
created_by: str
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
usage_count: int
|
||||
|
||||
|
||||
class TemplateListResponse(BaseModel):
|
||||
"""Response fuer Template-Liste."""
|
||||
templates: List[TemplateResponse]
|
||||
total_count: int
|
||||
|
||||
|
||||
class CreateFromTemplateRequest(BaseModel):
|
||||
"""Request zum Erstellen einer Session aus Template."""
|
||||
template_id: str
|
||||
class_id: str
|
||||
topic: Optional[str] = None
|
||||
phase_duration_overrides: Optional[Dict[str, int]] = None
|
||||
|
||||
|
||||
# === Homework Models ===
|
||||
|
||||
class CreateHomeworkRequest(BaseModel):
|
||||
"""Request zum Erstellen einer Hausaufgabe."""
|
||||
session_id: Optional[str] = None
|
||||
teacher_id: str
|
||||
class_id: str
|
||||
subject: str
|
||||
title: str = Field(..., min_length=1, max_length=200)
|
||||
description: str = Field("", max_length=2000)
|
||||
due_date: Optional[str] = None
|
||||
estimated_minutes: Optional[int] = Field(None, ge=5, le=180)
|
||||
materials: List[str] = Field(default_factory=list)
|
||||
tags: List[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class UpdateHomeworkRequest(BaseModel):
|
||||
"""Request zum Aktualisieren einer Hausaufgabe."""
|
||||
title: Optional[str] = Field(None, min_length=1, max_length=200)
|
||||
description: Optional[str] = Field(None, max_length=2000)
|
||||
due_date: Optional[str] = None
|
||||
estimated_minutes: Optional[int] = Field(None, ge=5, le=180)
|
||||
status: Optional[str] = None
|
||||
materials: Optional[List[str]] = None
|
||||
tags: Optional[List[str]] = None
|
||||
|
||||
|
||||
class HomeworkResponse(BaseModel):
|
||||
"""Response fuer eine Hausaufgabe."""
|
||||
homework_id: str
|
||||
session_id: Optional[str]
|
||||
teacher_id: str
|
||||
class_id: str
|
||||
subject: str
|
||||
title: str
|
||||
description: str
|
||||
due_date: Optional[str]
|
||||
estimated_minutes: Optional[int]
|
||||
status: str
|
||||
materials: List[str]
|
||||
tags: List[str]
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
class HomeworkListResponse(BaseModel):
|
||||
"""Response fuer Hausaufgaben-Liste."""
|
||||
homework: List[HomeworkResponse]
|
||||
total_count: int
|
||||
|
||||
|
||||
# === Material Models ===
|
||||
|
||||
class CreateMaterialRequest(BaseModel):
|
||||
"""Request zum Erstellen eines Materials."""
|
||||
teacher_id: str
|
||||
title: str = Field(..., min_length=1, max_length=200)
|
||||
description: str = Field("", max_length=1000)
|
||||
material_type: str = Field(..., description="Type: link, document, video, interactive, image")
|
||||
content_url: Optional[str] = None
|
||||
content_data: Optional[Dict[str, Any]] = None
|
||||
phase: Optional[str] = None
|
||||
subject: Optional[str] = None
|
||||
tags: List[str] = Field(default_factory=list)
|
||||
is_public: bool = False
|
||||
|
||||
|
||||
class UpdateMaterialRequest(BaseModel):
|
||||
"""Request zum Aktualisieren eines Materials."""
|
||||
title: Optional[str] = Field(None, min_length=1, max_length=200)
|
||||
description: Optional[str] = Field(None, max_length=1000)
|
||||
material_type: Optional[str] = None
|
||||
content_url: Optional[str] = None
|
||||
content_data: Optional[Dict[str, Any]] = None
|
||||
phase: Optional[str] = None
|
||||
subject: Optional[str] = None
|
||||
tags: Optional[List[str]] = None
|
||||
is_public: Optional[bool] = None
|
||||
|
||||
|
||||
class MaterialResponse(BaseModel):
|
||||
"""Response fuer ein Material."""
|
||||
material_id: str
|
||||
teacher_id: str
|
||||
title: str
|
||||
description: str
|
||||
material_type: str
|
||||
content_url: Optional[str]
|
||||
content_data: Optional[Dict[str, Any]]
|
||||
phase: Optional[str]
|
||||
subject: Optional[str]
|
||||
tags: List[str]
|
||||
is_public: bool
|
||||
usage_count: int
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
class MaterialListResponse(BaseModel):
|
||||
"""Response fuer Material-Liste."""
|
||||
materials: List[MaterialResponse]
|
||||
total_count: int
|
||||
|
||||
|
||||
# === Feedback Models ===
|
||||
|
||||
class CreateFeedbackRequest(BaseModel):
|
||||
"""Request zum Erstellen von Feedback."""
|
||||
teacher_id: str
|
||||
session_id: Optional[str] = None
|
||||
category: str = Field(..., description="bug, feature, usability, content, other")
|
||||
title: str = Field(..., min_length=1, max_length=200)
|
||||
description: str = Field(..., min_length=10, max_length=5000)
|
||||
priority: str = Field("medium", description="low, medium, high, critical")
|
||||
context_data: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class FeedbackResponse(BaseModel):
|
||||
"""Response fuer ein Feedback."""
|
||||
feedback_id: str
|
||||
teacher_id: str
|
||||
session_id: Optional[str]
|
||||
category: str
|
||||
title: str
|
||||
description: str
|
||||
priority: str
|
||||
status: str
|
||||
context_data: Optional[Dict[str, Any]]
|
||||
admin_notes: Optional[str]
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
class FeedbackListResponse(BaseModel):
|
||||
"""Response fuer Feedback-Liste."""
|
||||
feedback: List[FeedbackResponse]
|
||||
total_count: int
|
||||
|
||||
|
||||
class FeedbackStatsResponse(BaseModel):
|
||||
"""Response fuer Feedback-Statistiken."""
|
||||
total: int
|
||||
by_category: Dict[str, int]
|
||||
by_status: Dict[str, int]
|
||||
by_priority: Dict[str, int]
|
||||
|
||||
|
||||
# === Settings Models ===
|
||||
|
||||
class PhaseDurationsUpdate(BaseModel):
|
||||
"""Update fuer Phasendauern."""
|
||||
einstieg: Optional[int] = Field(None, ge=1, le=30)
|
||||
erarbeitung: Optional[int] = Field(None, ge=5, le=45)
|
||||
sicherung: Optional[int] = Field(None, ge=3, le=20)
|
||||
transfer: Optional[int] = Field(None, ge=3, le=20)
|
||||
reflexion: Optional[int] = Field(None, ge=2, le=15)
|
||||
|
||||
|
||||
class PreferencesUpdate(BaseModel):
|
||||
"""Update fuer Lehrer-Praeferenzen."""
|
||||
auto_advance: Optional[bool] = None
|
||||
sound_enabled: Optional[bool] = None
|
||||
notification_enabled: Optional[bool] = None
|
||||
theme: Optional[str] = None
|
||||
language: Optional[str] = None
|
||||
|
||||
|
||||
class TeacherSettingsResponse(BaseModel):
|
||||
"""Response fuer Lehrer-Einstellungen."""
|
||||
teacher_id: str
|
||||
phase_durations: Dict[str, int]
|
||||
preferences: Dict[str, Any]
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
# === Analytics Models ===
|
||||
|
||||
class ReflectionRequest(BaseModel):
|
||||
"""Request zum Erstellen/Aktualisieren einer Reflexion."""
|
||||
session_id: str
|
||||
teacher_id: str
|
||||
overall_rating: int = Field(..., ge=1, le=5)
|
||||
time_management_rating: int = Field(..., ge=1, le=5)
|
||||
student_engagement_rating: int = Field(..., ge=1, le=5)
|
||||
goals_achieved_rating: int = Field(..., ge=1, le=5)
|
||||
what_worked_well: str = Field("", max_length=2000)
|
||||
what_to_improve: str = Field("", max_length=2000)
|
||||
notes_for_next_time: str = Field("", max_length=2000)
|
||||
tags: List[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class ReflectionResponse(BaseModel):
|
||||
"""Response fuer eine Reflexion."""
|
||||
reflection_id: str
|
||||
session_id: str
|
||||
teacher_id: str
|
||||
overall_rating: int
|
||||
time_management_rating: int
|
||||
student_engagement_rating: int
|
||||
goals_achieved_rating: int
|
||||
what_worked_well: str
|
||||
what_to_improve: str
|
||||
notes_for_next_time: str
|
||||
tags: List[str]
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
# === Teacher Context Models (v1 API) ===
|
||||
|
||||
class TeacherContextResponse(BaseModel):
|
||||
"""Response fuer Teacher Context."""
|
||||
teacher_id: str
|
||||
federal_state: Optional[str]
|
||||
school_type: Optional[str]
|
||||
subjects: List[str]
|
||||
class_levels: List[str]
|
||||
current_macro_phase: Optional[str]
|
||||
onboarding_completed: bool
|
||||
preferences: Dict[str, Any]
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
class UpdateTeacherContextRequest(BaseModel):
|
||||
"""Request zum Aktualisieren des Teacher Context."""
|
||||
federal_state: Optional[str] = None
|
||||
school_type: Optional[str] = None
|
||||
subjects: Optional[List[str]] = None
|
||||
class_levels: Optional[List[str]] = None
|
||||
current_macro_phase: Optional[str] = None
|
||||
preferences: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class EventResponse(BaseModel):
|
||||
"""Response fuer ein Schuljahres-Event."""
|
||||
event_id: str
|
||||
teacher_id: str
|
||||
title: str
|
||||
event_type: str
|
||||
start_date: str
|
||||
end_date: Optional[str]
|
||||
description: Optional[str]
|
||||
status: str
|
||||
metadata: Optional[Dict[str, Any]]
|
||||
created_at: str
|
||||
|
||||
|
||||
class CreateEventRequest(BaseModel):
|
||||
"""Request zum Erstellen eines Events."""
|
||||
title: str = Field(..., min_length=1, max_length=200)
|
||||
event_type: str
|
||||
start_date: str
|
||||
end_date: Optional[str] = None
|
||||
description: Optional[str] = Field(None, max_length=1000)
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class RoutineResponse(BaseModel):
|
||||
"""Response fuer eine wiederkehrende Routine."""
|
||||
routine_id: str
|
||||
teacher_id: str
|
||||
title: str
|
||||
routine_type: str
|
||||
recurrence_pattern: str
|
||||
day_of_week: Optional[int]
|
||||
time_of_day: Optional[str]
|
||||
description: Optional[str]
|
||||
is_active: bool
|
||||
metadata: Optional[Dict[str, Any]]
|
||||
created_at: str
|
||||
|
||||
|
||||
class CreateRoutineRequest(BaseModel):
|
||||
"""Request zum Erstellen einer Routine."""
|
||||
title: str = Field(..., min_length=1, max_length=200)
|
||||
routine_type: str
|
||||
recurrence_pattern: str
|
||||
day_of_week: Optional[int] = Field(None, ge=0, le=6)
|
||||
time_of_day: Optional[str] = None
|
||||
description: Optional[str] = Field(None, max_length=500)
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
434
backend/api/classroom/sessions.py
Normal file
434
backend/api/classroom/sessions.py
Normal file
@@ -0,0 +1,434 @@
|
||||
"""
|
||||
Classroom API - Session Endpoints.
|
||||
|
||||
Endpoints fuer Session-Management, Timer, Phasen-Kontrolle und History.
|
||||
"""
|
||||
|
||||
from uuid import uuid4
|
||||
from typing import Dict, List, Optional
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
|
||||
from classroom_engine import (
|
||||
LessonPhase,
|
||||
LessonSession,
|
||||
LessonStateMachine,
|
||||
PhaseTimer,
|
||||
SuggestionEngine,
|
||||
)
|
||||
|
||||
from .models import (
|
||||
CreateSessionRequest,
|
||||
NotesRequest,
|
||||
ExtendTimeRequest,
|
||||
SessionResponse,
|
||||
TimerStatus,
|
||||
SuggestionItem,
|
||||
SuggestionsResponse,
|
||||
PhaseInfo,
|
||||
SessionHistoryItem,
|
||||
SessionHistoryResponse,
|
||||
)
|
||||
from .shared import (
|
||||
init_db_if_needed,
|
||||
get_session_or_404,
|
||||
persist_session,
|
||||
get_sessions,
|
||||
add_session,
|
||||
ws_manager,
|
||||
DB_ENABLED,
|
||||
logger,
|
||||
)
|
||||
|
||||
# Database imports
|
||||
try:
|
||||
from classroom_engine.database import SessionLocal
|
||||
from classroom_engine.repository import SessionRepository
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
router = APIRouter(tags=["Sessions"])
|
||||
|
||||
|
||||
def build_session_response(session: LessonSession) -> SessionResponse:
|
||||
"""Baut die vollstaendige Session-Response."""
|
||||
fsm = LessonStateMachine()
|
||||
timer = PhaseTimer()
|
||||
|
||||
timer_status = timer.get_phase_status(session)
|
||||
phases_info = fsm.get_phases_info(session)
|
||||
|
||||
return SessionResponse(
|
||||
session_id=session.session_id,
|
||||
teacher_id=session.teacher_id,
|
||||
class_id=session.class_id,
|
||||
subject=session.subject,
|
||||
topic=session.topic,
|
||||
current_phase=session.current_phase.value,
|
||||
phase_display_name=session.get_phase_display_name(),
|
||||
phase_started_at=session.phase_started_at.isoformat() if session.phase_started_at else None,
|
||||
lesson_started_at=session.lesson_started_at.isoformat() if session.lesson_started_at else None,
|
||||
lesson_ended_at=session.lesson_ended_at.isoformat() if session.lesson_ended_at else None,
|
||||
timer=TimerStatus(**timer_status),
|
||||
phases=[PhaseInfo(**p) for p in phases_info],
|
||||
phase_history=session.phase_history,
|
||||
notes=session.notes,
|
||||
homework=session.homework,
|
||||
is_active=fsm.is_lesson_active(session),
|
||||
is_ended=fsm.is_lesson_ended(session),
|
||||
is_paused=session.is_paused,
|
||||
)
|
||||
|
||||
|
||||
async def notify_phase_change(session_id: str, phase: str, extra_data: dict = None):
|
||||
"""Benachrichtigt WebSocket-Clients ueber Phasenwechsel."""
|
||||
data = {"phase": phase}
|
||||
if extra_data:
|
||||
data.update(extra_data)
|
||||
await ws_manager.broadcast_phase_change(session_id, data)
|
||||
|
||||
|
||||
async def notify_session_ended(session_id: str):
|
||||
"""Benachrichtigt WebSocket-Clients ueber Session-Ende."""
|
||||
await ws_manager.broadcast_session_ended(session_id)
|
||||
|
||||
|
||||
# === Session CRUD Endpoints ===
|
||||
|
||||
@router.post("/sessions", response_model=SessionResponse)
|
||||
async def create_session(request: CreateSessionRequest) -> SessionResponse:
|
||||
"""
|
||||
Erstellt eine neue Unterrichtsstunde (Session).
|
||||
|
||||
Die Stunde ist nach Erstellung im Status NOT_STARTED.
|
||||
Zum Starten muss /sessions/{id}/start aufgerufen werden.
|
||||
"""
|
||||
init_db_if_needed()
|
||||
|
||||
phase_durations = {
|
||||
"einstieg": 8,
|
||||
"erarbeitung": 20,
|
||||
"sicherung": 10,
|
||||
"transfer": 7,
|
||||
"reflexion": 5,
|
||||
}
|
||||
if request.phase_durations:
|
||||
phase_durations.update(request.phase_durations)
|
||||
|
||||
session = LessonSession(
|
||||
session_id=str(uuid4()),
|
||||
teacher_id=request.teacher_id,
|
||||
class_id=request.class_id,
|
||||
subject=request.subject,
|
||||
topic=request.topic,
|
||||
phase_durations=phase_durations,
|
||||
)
|
||||
|
||||
add_session(session)
|
||||
return build_session_response(session)
|
||||
|
||||
|
||||
@router.get("/sessions/{session_id}", response_model=SessionResponse)
|
||||
async def get_session(session_id: str) -> SessionResponse:
|
||||
"""
|
||||
Ruft den aktuellen Status einer Session ab.
|
||||
|
||||
Enthaelt alle Informationen inkl. Timer-Status und Phasen-Timeline.
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
return build_session_response(session)
|
||||
|
||||
|
||||
@router.post("/sessions/{session_id}/start", response_model=SessionResponse)
|
||||
async def start_lesson(session_id: str) -> SessionResponse:
|
||||
"""
|
||||
Startet die Unterrichtsstunde.
|
||||
|
||||
Wechselt von NOT_STARTED zur ersten Phase (EINSTIEG).
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
|
||||
if session.current_phase != LessonPhase.NOT_STARTED:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Stunde bereits gestartet (aktuelle Phase: {session.current_phase.value})"
|
||||
)
|
||||
|
||||
fsm = LessonStateMachine()
|
||||
session = fsm.transition(session, LessonPhase.EINSTIEG)
|
||||
|
||||
persist_session(session)
|
||||
return build_session_response(session)
|
||||
|
||||
|
||||
@router.post("/sessions/{session_id}/next-phase", response_model=SessionResponse)
|
||||
async def next_phase(session_id: str) -> SessionResponse:
|
||||
"""
|
||||
Wechselt zur naechsten Phase.
|
||||
|
||||
Wirft 400 wenn keine naechste Phase verfuegbar (z.B. bei ENDED).
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
|
||||
fsm = LessonStateMachine()
|
||||
next_p = fsm.next_phase(session.current_phase)
|
||||
|
||||
if not next_p:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Keine naechste Phase verfuegbar (aktuelle Phase: {session.current_phase.value})"
|
||||
)
|
||||
|
||||
session = fsm.transition(session, next_p)
|
||||
persist_session(session)
|
||||
|
||||
response = build_session_response(session)
|
||||
await notify_phase_change(session_id, session.current_phase.value, {
|
||||
"phase_display_name": session.get_phase_display_name(),
|
||||
"is_ended": session.current_phase == LessonPhase.ENDED
|
||||
})
|
||||
return response
|
||||
|
||||
|
||||
@router.post("/sessions/{session_id}/end", response_model=SessionResponse)
|
||||
async def end_lesson(session_id: str) -> SessionResponse:
|
||||
"""
|
||||
Beendet die Unterrichtsstunde sofort.
|
||||
|
||||
Kann von jeder aktiven Phase aus aufgerufen werden.
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
|
||||
if session.current_phase == LessonPhase.ENDED:
|
||||
raise HTTPException(status_code=400, detail="Stunde bereits beendet")
|
||||
|
||||
if session.current_phase == LessonPhase.NOT_STARTED:
|
||||
raise HTTPException(status_code=400, detail="Stunde noch nicht gestartet")
|
||||
|
||||
fsm = LessonStateMachine()
|
||||
|
||||
while session.current_phase != LessonPhase.ENDED:
|
||||
next_p = fsm.next_phase(session.current_phase)
|
||||
if next_p:
|
||||
session = fsm.transition(session, next_p)
|
||||
else:
|
||||
break
|
||||
|
||||
persist_session(session)
|
||||
await notify_session_ended(session_id)
|
||||
return build_session_response(session)
|
||||
|
||||
|
||||
# === Quick Actions (Feature f26/f27/f28) ===
|
||||
|
||||
@router.post("/sessions/{session_id}/pause", response_model=SessionResponse)
|
||||
async def toggle_pause(session_id: str) -> SessionResponse:
|
||||
"""
|
||||
Pausiert oder setzt die laufende Stunde fort (Feature f27).
|
||||
|
||||
Toggle-Funktion: Wenn pausiert -> fortsetzen, wenn laufend -> pausieren.
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
|
||||
if session.current_phase in [LessonPhase.NOT_STARTED, LessonPhase.ENDED]:
|
||||
raise HTTPException(status_code=400, detail="Stunde ist nicht aktiv")
|
||||
|
||||
if session.is_paused:
|
||||
if session.pause_started_at:
|
||||
pause_duration = (datetime.utcnow() - session.pause_started_at).total_seconds()
|
||||
session.total_paused_seconds += int(pause_duration)
|
||||
|
||||
session.is_paused = False
|
||||
session.pause_started_at = None
|
||||
else:
|
||||
session.is_paused = True
|
||||
session.pause_started_at = datetime.utcnow()
|
||||
|
||||
persist_session(session)
|
||||
return build_session_response(session)
|
||||
|
||||
|
||||
@router.post("/sessions/{session_id}/extend", response_model=SessionResponse)
|
||||
async def extend_phase(session_id: str, request: ExtendTimeRequest) -> SessionResponse:
|
||||
"""
|
||||
Verlaengert die aktuelle Phase um zusaetzliche Minuten (Feature f28).
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
|
||||
if session.current_phase in [LessonPhase.NOT_STARTED, LessonPhase.ENDED]:
|
||||
raise HTTPException(status_code=400, detail="Stunde ist nicht aktiv")
|
||||
|
||||
phase_id = session.current_phase.value
|
||||
current_duration = session.phase_durations.get(phase_id, 10)
|
||||
session.phase_durations[phase_id] = current_duration + request.minutes
|
||||
|
||||
persist_session(session)
|
||||
return build_session_response(session)
|
||||
|
||||
|
||||
@router.get("/sessions/{session_id}/timer", response_model=TimerStatus)
|
||||
async def get_timer(session_id: str) -> TimerStatus:
|
||||
"""
|
||||
Ruft den Timer-Status der aktuellen Phase ab.
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
timer = PhaseTimer()
|
||||
status = timer.get_phase_status(session)
|
||||
return TimerStatus(**status)
|
||||
|
||||
|
||||
@router.get("/sessions/{session_id}/suggestions", response_model=SuggestionsResponse)
|
||||
async def get_suggestions(
|
||||
session_id: str,
|
||||
limit: int = Query(3, ge=1, le=10, description="Anzahl Vorschlaege")
|
||||
) -> SuggestionsResponse:
|
||||
"""
|
||||
Ruft phasenspezifische Aktivitaets-Vorschlaege ab.
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
engine = SuggestionEngine()
|
||||
response = engine.get_suggestions_response(session, limit)
|
||||
|
||||
return SuggestionsResponse(
|
||||
suggestions=[SuggestionItem(**s) for s in response["suggestions"]],
|
||||
current_phase=response["current_phase"],
|
||||
phase_display_name=response["phase_display_name"],
|
||||
total_available=response["total_available"],
|
||||
)
|
||||
|
||||
|
||||
@router.put("/sessions/{session_id}/notes", response_model=SessionResponse)
|
||||
async def update_notes(session_id: str, request: NotesRequest) -> SessionResponse:
|
||||
"""
|
||||
Aktualisiert Notizen und Hausaufgaben der Stunde.
|
||||
"""
|
||||
session = get_session_or_404(session_id)
|
||||
session.notes = request.notes
|
||||
session.homework = request.homework
|
||||
persist_session(session)
|
||||
return build_session_response(session)
|
||||
|
||||
|
||||
@router.delete("/sessions/{session_id}")
|
||||
async def delete_session(session_id: str) -> Dict[str, str]:
|
||||
"""
|
||||
Loescht eine Session.
|
||||
"""
|
||||
sessions = get_sessions()
|
||||
if session_id not in sessions:
|
||||
raise HTTPException(status_code=404, detail="Session nicht gefunden")
|
||||
|
||||
del sessions[session_id]
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SessionRepository(db)
|
||||
repo.delete(session_id)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete session {session_id} from DB: {e}")
|
||||
|
||||
return {"status": "deleted", "session_id": session_id}
|
||||
|
||||
|
||||
# === Session History (Feature f17) ===
|
||||
|
||||
@router.get("/history/{teacher_id}", response_model=SessionHistoryResponse)
|
||||
async def get_session_history(
|
||||
teacher_id: str,
|
||||
limit: int = Query(20, ge=1, le=100, description="Max. Anzahl Eintraege"),
|
||||
offset: int = Query(0, ge=0, description="Offset fuer Pagination")
|
||||
) -> SessionHistoryResponse:
|
||||
"""
|
||||
Ruft die Session-History eines Lehrers ab (Feature f17).
|
||||
"""
|
||||
init_db_if_needed()
|
||||
sessions = get_sessions()
|
||||
|
||||
if not DB_ENABLED:
|
||||
ended_sessions = [
|
||||
s for s in sessions.values()
|
||||
if s.teacher_id == teacher_id and s.current_phase == LessonPhase.ENDED
|
||||
]
|
||||
ended_sessions.sort(
|
||||
key=lambda x: x.lesson_ended_at or datetime.min,
|
||||
reverse=True
|
||||
)
|
||||
paginated = ended_sessions[offset:offset + limit]
|
||||
|
||||
items = []
|
||||
for s in paginated:
|
||||
duration = None
|
||||
if s.lesson_started_at and s.lesson_ended_at:
|
||||
duration = int((s.lesson_ended_at - s.lesson_started_at).total_seconds() / 60)
|
||||
|
||||
items.append(SessionHistoryItem(
|
||||
session_id=s.session_id,
|
||||
teacher_id=s.teacher_id,
|
||||
class_id=s.class_id,
|
||||
subject=s.subject,
|
||||
topic=s.topic,
|
||||
lesson_started_at=s.lesson_started_at.isoformat() if s.lesson_started_at else None,
|
||||
lesson_ended_at=s.lesson_ended_at.isoformat() if s.lesson_ended_at else None,
|
||||
total_duration_minutes=duration,
|
||||
phases_completed=len(s.phase_history),
|
||||
notes=s.notes,
|
||||
homework=s.homework,
|
||||
))
|
||||
|
||||
return SessionHistoryResponse(
|
||||
sessions=items,
|
||||
total_count=len(ended_sessions),
|
||||
page=offset // limit + 1,
|
||||
page_size=limit,
|
||||
)
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SessionRepository(db)
|
||||
|
||||
db_sessions = repo.get_history_by_teacher(teacher_id, limit, offset)
|
||||
|
||||
from classroom_engine.db_models import LessonSessionDB, LessonPhaseEnum
|
||||
total_count = db.query(LessonSessionDB).filter(
|
||||
LessonSessionDB.teacher_id == teacher_id,
|
||||
LessonSessionDB.current_phase == LessonPhaseEnum.ENDED
|
||||
).count()
|
||||
|
||||
items = []
|
||||
for db_session in db_sessions:
|
||||
duration = None
|
||||
if db_session.lesson_started_at and db_session.lesson_ended_at:
|
||||
duration = int((db_session.lesson_ended_at - db_session.lesson_started_at).total_seconds() / 60)
|
||||
|
||||
phase_history = db_session.phase_history or []
|
||||
|
||||
items.append(SessionHistoryItem(
|
||||
session_id=db_session.id,
|
||||
teacher_id=db_session.teacher_id,
|
||||
class_id=db_session.class_id,
|
||||
subject=db_session.subject,
|
||||
topic=db_session.topic,
|
||||
lesson_started_at=db_session.lesson_started_at.isoformat() if db_session.lesson_started_at else None,
|
||||
lesson_ended_at=db_session.lesson_ended_at.isoformat() if db_session.lesson_ended_at else None,
|
||||
total_duration_minutes=duration,
|
||||
phases_completed=len(phase_history),
|
||||
notes=db_session.notes or "",
|
||||
homework=db_session.homework or "",
|
||||
))
|
||||
|
||||
db.close()
|
||||
|
||||
return SessionHistoryResponse(
|
||||
sessions=items,
|
||||
total_count=total_count,
|
||||
page=offset // limit + 1,
|
||||
page_size=limit,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get session history: {e}")
|
||||
raise HTTPException(status_code=500, detail="Fehler beim Laden der History")
|
||||
201
backend/api/classroom/settings.py
Normal file
201
backend/api/classroom/settings.py
Normal file
@@ -0,0 +1,201 @@
|
||||
"""
|
||||
Classroom API - Settings Endpoints.
|
||||
|
||||
Endpoints fuer Lehrer-Einstellungen.
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional, Any
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from classroom_engine import get_default_durations
|
||||
|
||||
from .shared import init_db_if_needed, DB_ENABLED, logger
|
||||
|
||||
try:
|
||||
from classroom_engine.database import SessionLocal
|
||||
from classroom_engine.repository import TeacherSettingsRepository
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
router = APIRouter(tags=["Settings"])
|
||||
|
||||
# In-Memory Storage (Fallback)
|
||||
_settings: Dict[str, dict] = {}
|
||||
|
||||
|
||||
# === Pydantic Models ===
|
||||
|
||||
class PhaseDurationsUpdate(BaseModel):
|
||||
"""Update fuer Phasendauern."""
|
||||
einstieg: Optional[int] = Field(None, ge=1, le=30)
|
||||
erarbeitung: Optional[int] = Field(None, ge=5, le=45)
|
||||
sicherung: Optional[int] = Field(None, ge=3, le=20)
|
||||
transfer: Optional[int] = Field(None, ge=3, le=20)
|
||||
reflexion: Optional[int] = Field(None, ge=2, le=15)
|
||||
|
||||
|
||||
class PreferencesUpdate(BaseModel):
|
||||
"""Update fuer Lehrer-Praeferenzen."""
|
||||
auto_advance: Optional[bool] = None
|
||||
sound_enabled: Optional[bool] = None
|
||||
notification_enabled: Optional[bool] = None
|
||||
theme: Optional[str] = None
|
||||
language: Optional[str] = None
|
||||
|
||||
|
||||
class TeacherSettingsResponse(BaseModel):
|
||||
"""Response fuer Lehrer-Einstellungen."""
|
||||
teacher_id: str
|
||||
phase_durations: Dict[str, int]
|
||||
preferences: Dict[str, Any]
|
||||
created_at: str
|
||||
updated_at: Optional[str]
|
||||
|
||||
|
||||
# === Helper Functions ===
|
||||
|
||||
def get_default_settings(teacher_id: str) -> dict:
|
||||
"""Gibt die Default-Einstellungen zurueck."""
|
||||
return {
|
||||
"teacher_id": teacher_id,
|
||||
"phase_durations": get_default_durations(),
|
||||
"preferences": {
|
||||
"auto_advance": False,
|
||||
"sound_enabled": True,
|
||||
"notification_enabled": True,
|
||||
"theme": "light",
|
||||
"language": "de",
|
||||
},
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"updated_at": None,
|
||||
}
|
||||
|
||||
|
||||
# === Endpoints ===
|
||||
|
||||
@router.get("/settings/{teacher_id}", response_model=TeacherSettingsResponse)
|
||||
async def get_teacher_settings(teacher_id: str) -> TeacherSettingsResponse:
|
||||
"""Ruft die Einstellungen eines Lehrers ab."""
|
||||
init_db_if_needed()
|
||||
|
||||
# Aus Memory pruefen
|
||||
if teacher_id in _settings:
|
||||
return TeacherSettingsResponse(**_settings[teacher_id])
|
||||
|
||||
# Aus DB laden
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherSettingsRepository(db)
|
||||
db_settings = repo.get_by_teacher(teacher_id)
|
||||
db.close()
|
||||
|
||||
if db_settings:
|
||||
settings_data = repo.to_dict(db_settings)
|
||||
_settings[teacher_id] = settings_data
|
||||
return TeacherSettingsResponse(**settings_data)
|
||||
except Exception as e:
|
||||
logger.warning(f"DB read failed for settings: {e}")
|
||||
|
||||
# Default-Einstellungen erstellen
|
||||
settings_data = get_default_settings(teacher_id)
|
||||
_settings[teacher_id] = settings_data
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherSettingsRepository(db)
|
||||
repo.create(settings_data)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB persist failed for settings: {e}")
|
||||
|
||||
return TeacherSettingsResponse(**settings_data)
|
||||
|
||||
|
||||
@router.put("/settings/{teacher_id}/durations", response_model=TeacherSettingsResponse)
|
||||
async def update_phase_durations(
|
||||
teacher_id: str,
|
||||
request: PhaseDurationsUpdate
|
||||
) -> TeacherSettingsResponse:
|
||||
"""Aktualisiert die Phasendauern eines Lehrers."""
|
||||
init_db_if_needed()
|
||||
|
||||
# Aktuelle Einstellungen laden
|
||||
current = await get_teacher_settings(teacher_id)
|
||||
settings_data = _settings.get(teacher_id, get_default_settings(teacher_id))
|
||||
|
||||
# Nur uebergebene Werte aktualisieren
|
||||
durations = settings_data["phase_durations"]
|
||||
if request.einstieg is not None:
|
||||
durations["einstieg"] = request.einstieg
|
||||
if request.erarbeitung is not None:
|
||||
durations["erarbeitung"] = request.erarbeitung
|
||||
if request.sicherung is not None:
|
||||
durations["sicherung"] = request.sicherung
|
||||
if request.transfer is not None:
|
||||
durations["transfer"] = request.transfer
|
||||
if request.reflexion is not None:
|
||||
durations["reflexion"] = request.reflexion
|
||||
|
||||
settings_data["phase_durations"] = durations
|
||||
settings_data["updated_at"] = datetime.utcnow().isoformat()
|
||||
|
||||
# In DB speichern
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherSettingsRepository(db)
|
||||
repo.update_durations(teacher_id, durations)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB update failed for durations: {e}")
|
||||
|
||||
_settings[teacher_id] = settings_data
|
||||
return TeacherSettingsResponse(**settings_data)
|
||||
|
||||
|
||||
@router.put("/settings/{teacher_id}/preferences", response_model=TeacherSettingsResponse)
|
||||
async def update_preferences(
|
||||
teacher_id: str,
|
||||
request: PreferencesUpdate
|
||||
) -> TeacherSettingsResponse:
|
||||
"""Aktualisiert die Praeferenzen eines Lehrers."""
|
||||
init_db_if_needed()
|
||||
|
||||
# Aktuelle Einstellungen laden
|
||||
current = await get_teacher_settings(teacher_id)
|
||||
settings_data = _settings.get(teacher_id, get_default_settings(teacher_id))
|
||||
|
||||
# Nur uebergebene Werte aktualisieren
|
||||
prefs = settings_data["preferences"]
|
||||
if request.auto_advance is not None:
|
||||
prefs["auto_advance"] = request.auto_advance
|
||||
if request.sound_enabled is not None:
|
||||
prefs["sound_enabled"] = request.sound_enabled
|
||||
if request.notification_enabled is not None:
|
||||
prefs["notification_enabled"] = request.notification_enabled
|
||||
if request.theme is not None:
|
||||
prefs["theme"] = request.theme
|
||||
if request.language is not None:
|
||||
prefs["language"] = request.language
|
||||
|
||||
settings_data["preferences"] = prefs
|
||||
settings_data["updated_at"] = datetime.utcnow().isoformat()
|
||||
|
||||
# In DB speichern
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TeacherSettingsRepository(db)
|
||||
repo.update_preferences(teacher_id, prefs)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"DB update failed for preferences: {e}")
|
||||
|
||||
_settings[teacher_id] = settings_data
|
||||
return TeacherSettingsResponse(**settings_data)
|
||||
341
backend/api/classroom/shared.py
Normal file
341
backend/api/classroom/shared.py
Normal file
@@ -0,0 +1,341 @@
|
||||
"""
|
||||
Classroom API - Shared State und Helper Functions.
|
||||
|
||||
Zentrale Komponenten die von allen Classroom-Modulen verwendet werden.
|
||||
"""
|
||||
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
from fastapi import HTTPException, WebSocket, Request
|
||||
|
||||
# Auth imports (Phase 7: Keycloak Integration)
|
||||
try:
|
||||
from auth import get_current_user
|
||||
AUTH_ENABLED = True
|
||||
except ImportError:
|
||||
AUTH_ENABLED = False
|
||||
logging.warning("Auth module not available, using demo user fallback")
|
||||
|
||||
from classroom_engine import (
|
||||
LessonPhase,
|
||||
LessonSession,
|
||||
LessonStateMachine,
|
||||
PhaseTimer,
|
||||
)
|
||||
|
||||
# Database imports (Feature f22)
|
||||
try:
|
||||
from classroom_engine.database import get_db, init_db, SessionLocal
|
||||
from classroom_engine.repository import SessionRepository
|
||||
DB_ENABLED = True
|
||||
except ImportError:
|
||||
DB_ENABLED = False
|
||||
logging.warning("Classroom DB not available, using in-memory storage only")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# === WebSocket Connection Manager (Phase 6: Real-time) ===
|
||||
|
||||
class ConnectionManager:
|
||||
"""
|
||||
Verwaltet WebSocket-Verbindungen fuer Echtzeit-Timer-Updates.
|
||||
|
||||
Features:
|
||||
- Session-basierte Verbindungen (jede Session hat eigene Clients)
|
||||
- Automatisches Cleanup bei Disconnect
|
||||
- Broadcast an alle Clients einer Session
|
||||
- Multi-Device Support
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# session_id -> Set[WebSocket]
|
||||
self._connections: Dict[str, set] = {}
|
||||
# WebSocket -> session_id (reverse lookup)
|
||||
self._websocket_sessions: Dict[WebSocket, str] = {}
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
async def connect(self, websocket: WebSocket, session_id: str):
|
||||
"""Verbindet einen Client mit einer Session."""
|
||||
await websocket.accept()
|
||||
async with self._lock:
|
||||
if session_id not in self._connections:
|
||||
self._connections[session_id] = set()
|
||||
self._connections[session_id].add(websocket)
|
||||
self._websocket_sessions[websocket] = session_id
|
||||
logger.info(f"WebSocket connected to session {session_id}, total clients: {len(self._connections[session_id])}")
|
||||
|
||||
async def disconnect(self, websocket: WebSocket):
|
||||
"""Trennt einen Client."""
|
||||
async with self._lock:
|
||||
session_id = self._websocket_sessions.pop(websocket, None)
|
||||
if session_id and session_id in self._connections:
|
||||
self._connections[session_id].discard(websocket)
|
||||
if not self._connections[session_id]:
|
||||
del self._connections[session_id]
|
||||
logger.info(f"WebSocket disconnected from session {session_id}")
|
||||
|
||||
async def broadcast_to_session(self, session_id: str, message: dict):
|
||||
"""Sendet eine Nachricht an alle Clients einer Session."""
|
||||
async with self._lock:
|
||||
connections = self._connections.get(session_id, set()).copy()
|
||||
|
||||
if not connections:
|
||||
return
|
||||
|
||||
message_json = json.dumps(message)
|
||||
dead_connections = []
|
||||
|
||||
for websocket in connections:
|
||||
try:
|
||||
await websocket.send_text(message_json)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to send to websocket: {e}")
|
||||
dead_connections.append(websocket)
|
||||
|
||||
# Cleanup dead connections
|
||||
for ws in dead_connections:
|
||||
await self.disconnect(ws)
|
||||
|
||||
async def broadcast_timer_update(self, session_id: str, timer_data: dict):
|
||||
"""Sendet Timer-Update an alle Clients einer Session."""
|
||||
await self.broadcast_to_session(session_id, {
|
||||
"type": "timer_update",
|
||||
"data": timer_data
|
||||
})
|
||||
|
||||
async def broadcast_phase_change(self, session_id: str, phase_data: dict):
|
||||
"""Sendet Phasenwechsel-Event an alle Clients."""
|
||||
await self.broadcast_to_session(session_id, {
|
||||
"type": "phase_change",
|
||||
"data": phase_data
|
||||
})
|
||||
|
||||
async def broadcast_session_ended(self, session_id: str):
|
||||
"""Sendet Session-Ende-Event an alle Clients."""
|
||||
await self.broadcast_to_session(session_id, {
|
||||
"type": "session_ended",
|
||||
"data": {"session_id": session_id}
|
||||
})
|
||||
|
||||
def get_client_count(self, session_id: str) -> int:
|
||||
"""Gibt die Anzahl der verbundenen Clients fuer eine Session zurueck."""
|
||||
return len(self._connections.get(session_id, set()))
|
||||
|
||||
def get_active_sessions(self) -> List[str]:
|
||||
"""Gibt alle Sessions mit aktiven WebSocket-Verbindungen zurueck."""
|
||||
return list(self._connections.keys())
|
||||
|
||||
|
||||
# Global instances
|
||||
ws_manager = ConnectionManager()
|
||||
_sessions: Dict[str, LessonSession] = {}
|
||||
_db_initialized = False
|
||||
_timer_broadcast_task: Optional[asyncio.Task] = None
|
||||
|
||||
|
||||
# === Demo User ===
|
||||
|
||||
DEMO_USER = {
|
||||
"user_id": "demo-teacher",
|
||||
"email": "demo@breakpilot.app",
|
||||
"name": "Demo Lehrer",
|
||||
"given_name": "Demo",
|
||||
"family_name": "Lehrer",
|
||||
"role": "teacher",
|
||||
"is_demo": True
|
||||
}
|
||||
|
||||
|
||||
# === Timer Broadcast Functions ===
|
||||
|
||||
async def _timer_broadcast_loop():
|
||||
"""
|
||||
Hintergrund-Task der Timer-Updates alle 1 Sekunde an verbundene Clients sendet.
|
||||
"""
|
||||
logger.info("Timer broadcast loop started")
|
||||
while True:
|
||||
try:
|
||||
await asyncio.sleep(1)
|
||||
|
||||
active_ws_sessions = ws_manager.get_active_sessions()
|
||||
if not active_ws_sessions:
|
||||
continue
|
||||
|
||||
for session_id in active_ws_sessions:
|
||||
session = _sessions.get(session_id)
|
||||
if not session or session.is_ended:
|
||||
continue
|
||||
|
||||
timer_status = build_timer_status(session)
|
||||
await ws_manager.broadcast_timer_update(session_id, timer_status)
|
||||
except asyncio.CancelledError:
|
||||
logger.info("Timer broadcast loop cancelled")
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"Error in timer broadcast loop: {e}")
|
||||
await asyncio.sleep(5)
|
||||
|
||||
|
||||
def start_timer_broadcast():
|
||||
"""Startet den Timer-Broadcast-Task wenn noch nicht laufend."""
|
||||
global _timer_broadcast_task
|
||||
if _timer_broadcast_task is None or _timer_broadcast_task.done():
|
||||
_timer_broadcast_task = asyncio.create_task(_timer_broadcast_loop())
|
||||
logger.info("Timer broadcast task created")
|
||||
|
||||
|
||||
def stop_timer_broadcast():
|
||||
"""Stoppt den Timer-Broadcast-Task."""
|
||||
global _timer_broadcast_task
|
||||
if _timer_broadcast_task and not _timer_broadcast_task.done():
|
||||
_timer_broadcast_task.cancel()
|
||||
logger.info("Timer broadcast task cancelled")
|
||||
|
||||
|
||||
# === Database Functions ===
|
||||
|
||||
def init_db_if_needed():
|
||||
"""Initialisiert DB und laedt aktive Sessions beim ersten Aufruf."""
|
||||
global _db_initialized
|
||||
if _db_initialized or not DB_ENABLED:
|
||||
return
|
||||
|
||||
try:
|
||||
init_db()
|
||||
_load_active_sessions_from_db()
|
||||
_db_initialized = True
|
||||
logger.info("Classroom DB initialized, loaded active sessions")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize Classroom DB: {e}")
|
||||
|
||||
|
||||
def _load_active_sessions_from_db():
|
||||
"""Laedt alle aktiven Sessions aus der DB in den Memory-Cache."""
|
||||
if not DB_ENABLED:
|
||||
return
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SessionRepository(db)
|
||||
|
||||
from classroom_engine.db_models import LessonSessionDB, LessonPhaseEnum
|
||||
active_db_sessions = db.query(LessonSessionDB).filter(
|
||||
LessonSessionDB.current_phase != LessonPhaseEnum.ENDED
|
||||
).all()
|
||||
|
||||
for db_session in active_db_sessions:
|
||||
session = repo.to_dataclass(db_session)
|
||||
_sessions[session.session_id] = session
|
||||
logger.info(f"Loaded session {session.session_id} from DB")
|
||||
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load sessions from DB: {e}")
|
||||
|
||||
|
||||
def persist_session(session: LessonSession):
|
||||
"""Speichert/aktualisiert Session in der DB."""
|
||||
if not DB_ENABLED:
|
||||
return
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SessionRepository(db)
|
||||
|
||||
existing = repo.get_by_id(session.session_id)
|
||||
if existing:
|
||||
repo.update(session)
|
||||
else:
|
||||
repo.create(session)
|
||||
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to persist session {session.session_id}: {e}")
|
||||
|
||||
|
||||
# === Auth Functions ===
|
||||
|
||||
async def get_optional_current_user(request: Request) -> Dict[str, Any]:
|
||||
"""
|
||||
Optionale Authentifizierung - gibt Demo-User zurueck wenn kein Token.
|
||||
"""
|
||||
if not AUTH_ENABLED:
|
||||
return DEMO_USER
|
||||
|
||||
auth_header = request.headers.get("Authorization", "")
|
||||
if not auth_header or not auth_header.startswith("Bearer "):
|
||||
env = os.environ.get("ENVIRONMENT", "development")
|
||||
if env == "development":
|
||||
return DEMO_USER
|
||||
raise HTTPException(status_code=401, detail="Nicht authentifiziert")
|
||||
|
||||
try:
|
||||
return await get_current_user(request)
|
||||
except Exception as e:
|
||||
logger.warning(f"Auth failed: {e}")
|
||||
env = os.environ.get("ENVIRONMENT", "development")
|
||||
if env == "development":
|
||||
return DEMO_USER
|
||||
raise HTTPException(status_code=401, detail="Authentifizierung fehlgeschlagen")
|
||||
|
||||
|
||||
# === Session Helpers ===
|
||||
|
||||
def get_session_or_404(session_id: str) -> LessonSession:
|
||||
"""Holt eine Session oder wirft 404. Prueft auch DB bei Cache-Miss."""
|
||||
init_db_if_needed()
|
||||
|
||||
session = _sessions.get(session_id)
|
||||
if session:
|
||||
return session
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = SessionRepository(db)
|
||||
db_session = repo.get_by_id(session_id)
|
||||
if db_session:
|
||||
session = repo.to_dataclass(db_session)
|
||||
_sessions[session.session_id] = session
|
||||
db.close()
|
||||
return session
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load session {session_id} from DB: {e}")
|
||||
|
||||
raise HTTPException(status_code=404, detail="Session nicht gefunden")
|
||||
|
||||
|
||||
def build_timer_status(session: LessonSession) -> dict:
|
||||
"""Baut Timer-Status als dict fuer WebSocket-Broadcast."""
|
||||
timer = PhaseTimer()
|
||||
status = timer.get_phase_status(session)
|
||||
|
||||
status["session_id"] = session.session_id
|
||||
status["current_phase"] = session.current_phase.value
|
||||
status["is_paused"] = session.is_paused
|
||||
status["timestamp"] = datetime.utcnow().isoformat()
|
||||
|
||||
return status
|
||||
|
||||
|
||||
def get_sessions() -> Dict[str, LessonSession]:
|
||||
"""Gibt das Sessions-Dictionary zurueck."""
|
||||
return _sessions
|
||||
|
||||
|
||||
def add_session(session: LessonSession):
|
||||
"""Fuegt eine Session zum Cache hinzu und persistiert sie."""
|
||||
_sessions[session.session_id] = session
|
||||
persist_session(session)
|
||||
|
||||
|
||||
def remove_session(session_id: str):
|
||||
"""Entfernt eine Session aus dem Cache."""
|
||||
_sessions.pop(session_id, None)
|
||||
392
backend/api/classroom/templates.py
Normal file
392
backend/api/classroom/templates.py
Normal file
@@ -0,0 +1,392 @@
|
||||
"""
|
||||
Classroom API - Template Endpoints.
|
||||
|
||||
Endpoints fuer Stunden-Vorlagen (Feature f37).
|
||||
"""
|
||||
|
||||
from uuid import uuid4
|
||||
from typing import Dict, List, Optional
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from classroom_engine import (
|
||||
LessonSession,
|
||||
LessonTemplate,
|
||||
SYSTEM_TEMPLATES,
|
||||
get_default_durations,
|
||||
)
|
||||
|
||||
from .models import SessionResponse
|
||||
from .shared import (
|
||||
init_db_if_needed,
|
||||
get_sessions,
|
||||
persist_session,
|
||||
DB_ENABLED,
|
||||
logger,
|
||||
)
|
||||
from .sessions import build_session_response
|
||||
|
||||
try:
|
||||
from classroom_engine.database import SessionLocal
|
||||
from classroom_engine.repository import TemplateRepository
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
router = APIRouter(tags=["Templates"])
|
||||
|
||||
|
||||
# === Pydantic Models ===
|
||||
|
||||
class TemplateCreate(BaseModel):
|
||||
"""Request zum Erstellen einer Vorlage."""
|
||||
name: str = Field(..., min_length=1, max_length=200)
|
||||
description: str = Field("", max_length=1000)
|
||||
subject: str = Field("", max_length=100)
|
||||
grade_level: str = Field("", max_length=50)
|
||||
phase_durations: Dict[str, int] = Field(default_factory=get_default_durations)
|
||||
default_topic: str = Field("", max_length=500)
|
||||
default_notes: str = Field("")
|
||||
is_public: bool = Field(False)
|
||||
|
||||
|
||||
class TemplateUpdate(BaseModel):
|
||||
"""Request zum Aktualisieren einer Vorlage."""
|
||||
name: Optional[str] = Field(None, min_length=1, max_length=200)
|
||||
description: Optional[str] = Field(None, max_length=1000)
|
||||
subject: Optional[str] = Field(None, max_length=100)
|
||||
grade_level: Optional[str] = Field(None, max_length=50)
|
||||
phase_durations: Optional[Dict[str, int]] = None
|
||||
default_topic: Optional[str] = Field(None, max_length=500)
|
||||
default_notes: Optional[str] = None
|
||||
is_public: Optional[bool] = None
|
||||
|
||||
|
||||
class TemplateResponse(BaseModel):
|
||||
"""Response fuer eine einzelne Vorlage."""
|
||||
template_id: str
|
||||
teacher_id: str
|
||||
name: str
|
||||
description: str
|
||||
subject: str
|
||||
grade_level: str
|
||||
phase_durations: Dict[str, int]
|
||||
default_topic: str
|
||||
default_notes: str
|
||||
is_public: bool
|
||||
usage_count: int
|
||||
total_duration_minutes: int
|
||||
created_at: Optional[str]
|
||||
updated_at: Optional[str]
|
||||
is_system_template: bool = False
|
||||
|
||||
|
||||
class TemplateListResponse(BaseModel):
|
||||
"""Response fuer Template-Liste."""
|
||||
templates: List[TemplateResponse]
|
||||
total_count: int
|
||||
|
||||
|
||||
# === Helper Functions ===
|
||||
|
||||
def build_template_response(template: LessonTemplate, is_system: bool = False) -> TemplateResponse:
|
||||
"""Baut eine Template-Response."""
|
||||
return TemplateResponse(
|
||||
template_id=template.template_id,
|
||||
teacher_id=template.teacher_id,
|
||||
name=template.name,
|
||||
description=template.description,
|
||||
subject=template.subject,
|
||||
grade_level=template.grade_level,
|
||||
phase_durations=template.phase_durations,
|
||||
default_topic=template.default_topic,
|
||||
default_notes=template.default_notes,
|
||||
is_public=template.is_public,
|
||||
usage_count=template.usage_count,
|
||||
total_duration_minutes=sum(template.phase_durations.values()),
|
||||
created_at=template.created_at.isoformat() if template.created_at else None,
|
||||
updated_at=template.updated_at.isoformat() if template.updated_at else None,
|
||||
is_system_template=is_system,
|
||||
)
|
||||
|
||||
|
||||
def get_system_templates() -> List[TemplateResponse]:
|
||||
"""Gibt die vordefinierten System-Templates zurueck."""
|
||||
templates = []
|
||||
for t in SYSTEM_TEMPLATES:
|
||||
template = LessonTemplate(
|
||||
template_id=t["template_id"],
|
||||
teacher_id="system",
|
||||
name=t["name"],
|
||||
description=t.get("description", ""),
|
||||
phase_durations=t["phase_durations"],
|
||||
is_public=True,
|
||||
usage_count=0,
|
||||
)
|
||||
templates.append(build_template_response(template, is_system=True))
|
||||
return templates
|
||||
|
||||
|
||||
# === Endpoints ===
|
||||
|
||||
@router.get("/templates", response_model=TemplateListResponse)
|
||||
async def list_templates(
|
||||
teacher_id: Optional[str] = Query(None),
|
||||
subject: Optional[str] = Query(None),
|
||||
include_system: bool = Query(True)
|
||||
) -> TemplateListResponse:
|
||||
"""Listet verfuegbare Stunden-Vorlagen (Feature f37)."""
|
||||
init_db_if_needed()
|
||||
|
||||
templates: List[TemplateResponse] = []
|
||||
|
||||
if include_system:
|
||||
templates.extend(get_system_templates())
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TemplateRepository(db)
|
||||
|
||||
if subject:
|
||||
db_templates = repo.get_by_subject(subject, teacher_id)
|
||||
elif teacher_id:
|
||||
db_templates = repo.get_by_teacher(teacher_id, include_public=True)
|
||||
else:
|
||||
db_templates = repo.get_public_templates()
|
||||
|
||||
for db_t in db_templates:
|
||||
template = repo.to_dataclass(db_t)
|
||||
templates.append(build_template_response(template))
|
||||
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load templates from DB: {e}")
|
||||
|
||||
return TemplateListResponse(templates=templates, total_count=len(templates))
|
||||
|
||||
|
||||
@router.get("/templates/{template_id}", response_model=TemplateResponse)
|
||||
async def get_template(template_id: str) -> TemplateResponse:
|
||||
"""Ruft eine einzelne Vorlage ab."""
|
||||
init_db_if_needed()
|
||||
|
||||
for t in SYSTEM_TEMPLATES:
|
||||
if t["template_id"] == template_id:
|
||||
template = LessonTemplate(
|
||||
template_id=t["template_id"],
|
||||
teacher_id="system",
|
||||
name=t["name"],
|
||||
description=t.get("description", ""),
|
||||
phase_durations=t["phase_durations"],
|
||||
is_public=True,
|
||||
)
|
||||
return build_template_response(template, is_system=True)
|
||||
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TemplateRepository(db)
|
||||
db_template = repo.get_by_id(template_id)
|
||||
if db_template:
|
||||
template = repo.to_dataclass(db_template)
|
||||
db.close()
|
||||
return build_template_response(template)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get template {template_id}: {e}")
|
||||
|
||||
raise HTTPException(status_code=404, detail="Vorlage nicht gefunden")
|
||||
|
||||
|
||||
@router.post("/templates", response_model=TemplateResponse, status_code=201)
|
||||
async def create_template(
|
||||
request: TemplateCreate,
|
||||
teacher_id: str = Query(...)
|
||||
) -> TemplateResponse:
|
||||
"""Erstellt eine neue Stunden-Vorlage."""
|
||||
init_db_if_needed()
|
||||
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Datenbank nicht verfuegbar")
|
||||
|
||||
template = LessonTemplate(
|
||||
template_id=str(uuid4()),
|
||||
teacher_id=teacher_id,
|
||||
name=request.name,
|
||||
description=request.description,
|
||||
subject=request.subject,
|
||||
grade_level=request.grade_level,
|
||||
phase_durations=request.phase_durations,
|
||||
default_topic=request.default_topic,
|
||||
default_notes=request.default_notes,
|
||||
is_public=request.is_public,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TemplateRepository(db)
|
||||
db_template = repo.create(template)
|
||||
template = repo.to_dataclass(db_template)
|
||||
db.close()
|
||||
return build_template_response(template)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create template: {e}")
|
||||
raise HTTPException(status_code=500, detail="Fehler beim Erstellen der Vorlage")
|
||||
|
||||
|
||||
@router.put("/templates/{template_id}", response_model=TemplateResponse)
|
||||
async def update_template(
|
||||
template_id: str,
|
||||
request: TemplateUpdate,
|
||||
teacher_id: str = Query(...)
|
||||
) -> TemplateResponse:
|
||||
"""Aktualisiert eine Stunden-Vorlage."""
|
||||
init_db_if_needed()
|
||||
|
||||
for t in SYSTEM_TEMPLATES:
|
||||
if t["template_id"] == template_id:
|
||||
raise HTTPException(status_code=403, detail="System-Vorlagen koennen nicht bearbeitet werden")
|
||||
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Datenbank nicht verfuegbar")
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TemplateRepository(db)
|
||||
|
||||
db_template = repo.get_by_id(template_id)
|
||||
if not db_template:
|
||||
db.close()
|
||||
raise HTTPException(status_code=404, detail="Vorlage nicht gefunden")
|
||||
|
||||
if db_template.teacher_id != teacher_id:
|
||||
db.close()
|
||||
raise HTTPException(status_code=403, detail="Keine Berechtigung")
|
||||
|
||||
template = repo.to_dataclass(db_template)
|
||||
if request.name is not None:
|
||||
template.name = request.name
|
||||
if request.description is not None:
|
||||
template.description = request.description
|
||||
if request.subject is not None:
|
||||
template.subject = request.subject
|
||||
if request.grade_level is not None:
|
||||
template.grade_level = request.grade_level
|
||||
if request.phase_durations is not None:
|
||||
template.phase_durations = request.phase_durations
|
||||
if request.default_topic is not None:
|
||||
template.default_topic = request.default_topic
|
||||
if request.default_notes is not None:
|
||||
template.default_notes = request.default_notes
|
||||
if request.is_public is not None:
|
||||
template.is_public = request.is_public
|
||||
|
||||
db_template = repo.update(template)
|
||||
template = repo.to_dataclass(db_template)
|
||||
db.close()
|
||||
return build_template_response(template)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update template {template_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail="Fehler beim Aktualisieren der Vorlage")
|
||||
|
||||
|
||||
@router.delete("/templates/{template_id}")
|
||||
async def delete_template(
|
||||
template_id: str,
|
||||
teacher_id: str = Query(...)
|
||||
) -> Dict[str, str]:
|
||||
"""Loescht eine Stunden-Vorlage."""
|
||||
init_db_if_needed()
|
||||
|
||||
for t in SYSTEM_TEMPLATES:
|
||||
if t["template_id"] == template_id:
|
||||
raise HTTPException(status_code=403, detail="System-Vorlagen koennen nicht geloescht werden")
|
||||
|
||||
if not DB_ENABLED:
|
||||
raise HTTPException(status_code=503, detail="Datenbank nicht verfuegbar")
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TemplateRepository(db)
|
||||
|
||||
db_template = repo.get_by_id(template_id)
|
||||
if not db_template:
|
||||
db.close()
|
||||
raise HTTPException(status_code=404, detail="Vorlage nicht gefunden")
|
||||
|
||||
if db_template.teacher_id != teacher_id:
|
||||
db.close()
|
||||
raise HTTPException(status_code=403, detail="Keine Berechtigung")
|
||||
|
||||
repo.delete(template_id)
|
||||
db.close()
|
||||
return {"status": "deleted", "template_id": template_id}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete template {template_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail="Fehler beim Loeschen der Vorlage")
|
||||
|
||||
|
||||
@router.post("/sessions/from-template", response_model=SessionResponse)
|
||||
async def create_session_from_template(
|
||||
template_id: str = Query(...),
|
||||
teacher_id: str = Query(...),
|
||||
class_id: str = Query(...),
|
||||
topic: Optional[str] = Query(None)
|
||||
) -> SessionResponse:
|
||||
"""Erstellt eine neue Session basierend auf einer Vorlage."""
|
||||
init_db_if_needed()
|
||||
|
||||
template_data = None
|
||||
is_system = False
|
||||
|
||||
for t in SYSTEM_TEMPLATES:
|
||||
if t["template_id"] == template_id:
|
||||
template_data = t
|
||||
is_system = True
|
||||
break
|
||||
|
||||
if not template_data and DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
repo = TemplateRepository(db)
|
||||
db_template = repo.get_by_id(template_id)
|
||||
if db_template:
|
||||
template_data = {
|
||||
"phase_durations": db_template.phase_durations or get_default_durations(),
|
||||
"subject": db_template.subject or "",
|
||||
"default_topic": db_template.default_topic or "",
|
||||
"default_notes": db_template.default_notes or "",
|
||||
}
|
||||
repo.increment_usage(template_id)
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load template {template_id}: {e}")
|
||||
|
||||
if not template_data:
|
||||
raise HTTPException(status_code=404, detail="Vorlage nicht gefunden")
|
||||
|
||||
session = LessonSession(
|
||||
session_id=str(uuid4()),
|
||||
teacher_id=teacher_id,
|
||||
class_id=class_id,
|
||||
subject=template_data.get("subject", ""),
|
||||
topic=topic or template_data.get("default_topic", ""),
|
||||
phase_durations=template_data["phase_durations"],
|
||||
notes=template_data.get("default_notes", ""),
|
||||
)
|
||||
|
||||
sessions = get_sessions()
|
||||
sessions[session.session_id] = session
|
||||
persist_session(session)
|
||||
|
||||
return build_session_response(session)
|
||||
185
backend/api/classroom/utility.py
Normal file
185
backend/api/classroom/utility.py
Normal file
@@ -0,0 +1,185 @@
|
||||
"""
|
||||
Classroom API - Utility Endpoints.
|
||||
|
||||
Health-Check, Phasen-Liste und andere Utility-Endpoints.
|
||||
"""
|
||||
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
from fastapi.responses import HTMLResponse
|
||||
from sqlalchemy import text
|
||||
from pydantic import BaseModel
|
||||
|
||||
from classroom_engine import LESSON_PHASES, LessonStateMachine
|
||||
|
||||
from .shared import (
|
||||
init_db_if_needed,
|
||||
get_sessions,
|
||||
get_session_or_404,
|
||||
ws_manager,
|
||||
DB_ENABLED,
|
||||
logger,
|
||||
)
|
||||
|
||||
try:
|
||||
from classroom_engine.database import SessionLocal
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
router = APIRouter(tags=["Utility"])
|
||||
|
||||
|
||||
# === Pydantic Models ===
|
||||
|
||||
class PhasesListResponse(BaseModel):
|
||||
"""Liste aller verfuegbaren Phasen."""
|
||||
phases: List[Dict[str, Any]]
|
||||
|
||||
|
||||
class ActiveSessionsResponse(BaseModel):
|
||||
"""Liste aktiver Sessions."""
|
||||
sessions: List[Dict[str, Any]]
|
||||
count: int
|
||||
|
||||
|
||||
# === Endpoints ===
|
||||
|
||||
@router.get("/phases", response_model=PhasesListResponse)
|
||||
async def list_phases() -> PhasesListResponse:
|
||||
"""Listet alle verfuegbaren Unterrichtsphasen mit Metadaten."""
|
||||
phases = []
|
||||
for phase_id, config in LESSON_PHASES.items():
|
||||
phases.append({
|
||||
"phase": phase_id,
|
||||
"display_name": config["display_name"],
|
||||
"default_duration_minutes": config["default_duration_minutes"],
|
||||
"activities": config["activities"],
|
||||
"icon": config["icon"],
|
||||
"description": config.get("description", ""),
|
||||
})
|
||||
return PhasesListResponse(phases=phases)
|
||||
|
||||
|
||||
@router.get("/sessions", response_model=ActiveSessionsResponse)
|
||||
async def list_active_sessions(
|
||||
teacher_id: Optional[str] = Query(None)
|
||||
) -> ActiveSessionsResponse:
|
||||
"""Listet alle (optionally gefilterten) Sessions."""
|
||||
sessions = get_sessions()
|
||||
sessions_list = []
|
||||
|
||||
for session in sessions.values():
|
||||
if teacher_id and session.teacher_id != teacher_id:
|
||||
continue
|
||||
|
||||
fsm = LessonStateMachine()
|
||||
sessions_list.append({
|
||||
"session_id": session.session_id,
|
||||
"teacher_id": session.teacher_id,
|
||||
"class_id": session.class_id,
|
||||
"subject": session.subject,
|
||||
"current_phase": session.current_phase.value,
|
||||
"is_active": fsm.is_lesson_active(session),
|
||||
"lesson_started_at": session.lesson_started_at.isoformat() if session.lesson_started_at else None,
|
||||
})
|
||||
|
||||
return ActiveSessionsResponse(sessions=sessions_list, count=len(sessions_list))
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def health_check() -> Dict[str, Any]:
|
||||
"""Health-Check fuer den Classroom Service."""
|
||||
db_status = "disabled"
|
||||
if DB_ENABLED:
|
||||
try:
|
||||
db = SessionLocal()
|
||||
db.execute(text("SELECT 1"))
|
||||
db.close()
|
||||
db_status = "connected"
|
||||
except Exception as e:
|
||||
db_status = f"error: {str(e)}"
|
||||
|
||||
sessions = get_sessions()
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "classroom-engine",
|
||||
"active_sessions": len(sessions),
|
||||
"db_enabled": DB_ENABLED,
|
||||
"db_status": db_status,
|
||||
"websocket_connections": sum(
|
||||
ws_manager.get_client_count(sid) for sid in ws_manager.get_active_sessions()
|
||||
),
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
}
|
||||
|
||||
|
||||
@router.get("/ws/status")
|
||||
async def websocket_status() -> Dict[str, Any]:
|
||||
"""Status der WebSocket-Verbindungen."""
|
||||
active_sessions = ws_manager.get_active_sessions()
|
||||
session_counts = {
|
||||
sid: ws_manager.get_client_count(sid) for sid in active_sessions
|
||||
}
|
||||
|
||||
return {
|
||||
"active_sessions": len(active_sessions),
|
||||
"session_connections": session_counts,
|
||||
"total_connections": sum(session_counts.values()),
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
}
|
||||
|
||||
|
||||
@router.get("/export/session/{session_id}", response_class=HTMLResponse)
|
||||
async def export_session_html(session_id: str) -> HTMLResponse:
|
||||
"""Exportiert eine Session als HTML-Dokument."""
|
||||
session = get_session_or_404(session_id)
|
||||
|
||||
# Einfacher HTML-Export
|
||||
html = f"""
|
||||
<!DOCTYPE html>
|
||||
<html lang="de">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Session Export - {session.subject}</title>
|
||||
<style>
|
||||
body {{ font-family: Arial, sans-serif; max-width: 800px; margin: 0 auto; padding: 20px; }}
|
||||
h1 {{ color: #333; }}
|
||||
.meta {{ color: #666; margin-bottom: 20px; }}
|
||||
.section {{ margin: 20px 0; padding: 15px; background: #f5f5f5; border-radius: 8px; }}
|
||||
.phase {{ display: flex; justify-content: space-between; padding: 10px 0; border-bottom: 1px solid #ddd; }}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{session.subject}: {session.topic or 'Ohne Thema'}</h1>
|
||||
<div class="meta">
|
||||
<p>Klasse: {session.class_id}</p>
|
||||
<p>Datum: {session.lesson_started_at.strftime('%d.%m.%Y %H:%M') if session.lesson_started_at else 'Nicht gestartet'}</p>
|
||||
<p>Status: {session.current_phase.value}</p>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Phasen</h2>
|
||||
{"".join(f'<div class="phase"><span>{p.get("phase", "")}</span><span>{p.get("duration_seconds", 0) // 60} min</span></div>' for p in session.phase_history)}
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Notizen</h2>
|
||||
<p>{session.notes or 'Keine Notizen'}</p>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Hausaufgaben</h2>
|
||||
<p>{session.homework or 'Keine Hausaufgaben'}</p>
|
||||
</div>
|
||||
|
||||
<footer style="margin-top: 40px; color: #999; font-size: 12px;">
|
||||
Exportiert am {datetime.utcnow().strftime('%d.%m.%Y %H:%M')} UTC - BreakPilot Classroom
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
return HTMLResponse(content=html)
|
||||
35
backend/api/tests/__init__.py
Normal file
35
backend/api/tests/__init__.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Test Registry API
|
||||
|
||||
Zentrales Dashboard fuer alle Tests im Breakpilot-System.
|
||||
Aggregiert Tests aus allen Services.
|
||||
|
||||
Phase 1 Update (2026-02-02):
|
||||
- PostgreSQL-Integration fuer persistente Speicherung
|
||||
- Backlog-Management mit Status-Workflow
|
||||
- Historie und Trends ueber Zeit
|
||||
"""
|
||||
|
||||
from .registry import router
|
||||
from .database import get_db, get_db_session, init_db
|
||||
from .repository import TestRepository
|
||||
from .db_models import (
|
||||
TestRunDB,
|
||||
TestResultDB,
|
||||
FailedTestBacklogDB,
|
||||
TestFixHistoryDB,
|
||||
TestServiceStatsDB
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"router",
|
||||
"get_db",
|
||||
"get_db_session",
|
||||
"init_db",
|
||||
"TestRepository",
|
||||
"TestRunDB",
|
||||
"TestResultDB",
|
||||
"FailedTestBacklogDB",
|
||||
"TestFixHistoryDB",
|
||||
"TestServiceStatsDB"
|
||||
]
|
||||
91
backend/api/tests/database.py
Normal file
91
backend/api/tests/database.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""
|
||||
Database Configuration fuer Test Registry.
|
||||
|
||||
PostgreSQL-Anbindung fuer persistente Test-Speicherung.
|
||||
Ersetzt die bisherige JSON-basierte Speicherung.
|
||||
"""
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker, Session, declarative_base
|
||||
|
||||
# Eigene Base fuer Test Registry - unabhaengig von anderen Modulen
|
||||
# Dies vermeidet Import-Probleme in CI/CD Umgebungen
|
||||
Base = declarative_base()
|
||||
|
||||
# Database URL from environment (nutzt gleiche DB wie Backend)
|
||||
_raw_url = os.getenv(
|
||||
"DATABASE_URL",
|
||||
"postgresql://breakpilot:breakpilot123@postgres:5432/breakpilot_db"
|
||||
)
|
||||
# SQLAlchemy 2.0 erfordert "postgresql://" statt "postgres://"
|
||||
DATABASE_URL = _raw_url.replace("postgres://", "postgresql://", 1) if _raw_url.startswith("postgres://") else _raw_url
|
||||
|
||||
# Engine configuration mit Connection Pool
|
||||
engine = create_engine(
|
||||
DATABASE_URL,
|
||||
pool_pre_ping=True, # Prueft Connections vor Nutzung
|
||||
pool_size=5, # Standard Pool-Groesse
|
||||
max_overflow=10, # Zusaetzliche Connections bei Bedarf
|
||||
pool_recycle=3600, # Recycle nach 1 Stunde
|
||||
echo=os.getenv("SQL_ECHO", "false").lower() == "true"
|
||||
)
|
||||
|
||||
# Session factory
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
|
||||
def get_db():
|
||||
"""
|
||||
Database dependency for FastAPI endpoints.
|
||||
Yields a database session and ensures cleanup.
|
||||
"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_db_session():
|
||||
"""
|
||||
Context manager for database sessions.
|
||||
Use this for background tasks and non-FastAPI code.
|
||||
|
||||
Example:
|
||||
with get_db_session() as db:
|
||||
db.query(TestRun).all()
|
||||
"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
db.commit()
|
||||
except Exception:
|
||||
db.rollback()
|
||||
raise
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
def init_db():
|
||||
"""
|
||||
Erstellt alle Tabellen.
|
||||
In Produktion sollte Alembic verwendet werden.
|
||||
"""
|
||||
from . import db_models # Import models to register them
|
||||
Base.metadata.create_all(bind=engine)
|
||||
|
||||
|
||||
def check_db_connection() -> bool:
|
||||
"""
|
||||
Prueft ob die Datenbankverbindung funktioniert.
|
||||
Nuetzlich fuer Health-Checks.
|
||||
"""
|
||||
try:
|
||||
from sqlalchemy import text
|
||||
with get_db_session() as db:
|
||||
db.execute(text("SELECT 1"))
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
227
backend/api/tests/db_models.py
Normal file
227
backend/api/tests/db_models.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""
|
||||
SQLAlchemy Models fuer Test Registry.
|
||||
|
||||
Definiert die Datenbank-Tabellen fuer persistente Test-Speicherung:
|
||||
- TestRunDB: Jeder Test-Durchlauf
|
||||
- TestResultDB: Einzelne Test-Ergebnisse
|
||||
- FailedTestBacklogDB: Persistenter Backlog fuer zu fixende Tests
|
||||
- TestFixHistoryDB: Historie aller Fix-Versuche
|
||||
- TestServiceStatsDB: Aggregierte Statistiken pro Service
|
||||
"""
|
||||
from datetime import datetime
|
||||
from sqlalchemy import (
|
||||
Column, Integer, String, Float, Text, DateTime, Boolean,
|
||||
ForeignKey, UniqueConstraint, Index
|
||||
)
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
# Nutze die gleiche Base wie Classroom Engine fuer konsistente Migrations
|
||||
from classroom_engine.database import Base
|
||||
|
||||
|
||||
class TestRunDB(Base):
|
||||
"""
|
||||
Speichert jeden Test-Durchlauf.
|
||||
Enthaelt Metadaten und Aggregat-Statistiken.
|
||||
"""
|
||||
__tablename__ = 'test_runs'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
run_id = Column(String(50), unique=True, nullable=False, index=True)
|
||||
service = Column(String(100), nullable=False, index=True)
|
||||
framework = Column(String(50), nullable=False)
|
||||
started_at = Column(DateTime, nullable=False, index=True)
|
||||
completed_at = Column(DateTime, nullable=True)
|
||||
status = Column(String(20), nullable=False) # queued, running, completed, failed
|
||||
total_tests = Column(Integer, default=0)
|
||||
passed_tests = Column(Integer, default=0)
|
||||
failed_tests = Column(Integer, default=0)
|
||||
skipped_tests = Column(Integer, default=0)
|
||||
duration_seconds = Column(Float, default=0)
|
||||
git_commit = Column(String(40), nullable=True)
|
||||
git_branch = Column(String(100), nullable=True)
|
||||
triggered_by = Column(String(50), nullable=True) # manual, ci, schedule
|
||||
output = Column(Text, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship zu einzelnen Test-Ergebnissen
|
||||
results = relationship("TestResultDB", back_populates="run", cascade="all, delete-orphan")
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"id": self.run_id,
|
||||
"run_id": self.run_id,
|
||||
"service": self.service,
|
||||
"framework": self.framework,
|
||||
"started_at": self.started_at.isoformat() if self.started_at else None,
|
||||
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
|
||||
"status": self.status,
|
||||
"total_tests": self.total_tests,
|
||||
"passed_tests": self.passed_tests,
|
||||
"failed_tests": self.failed_tests,
|
||||
"skipped_tests": self.skipped_tests,
|
||||
"duration_seconds": self.duration_seconds,
|
||||
"git_commit": self.git_commit,
|
||||
"git_branch": self.git_branch,
|
||||
"triggered_by": self.triggered_by,
|
||||
}
|
||||
|
||||
|
||||
class TestResultDB(Base):
|
||||
"""
|
||||
Speichert einzelne Test-Ergebnisse pro Run.
|
||||
Ermoeglicht detaillierte Analyse fehlgeschlagener Tests.
|
||||
"""
|
||||
__tablename__ = 'test_results'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
run_id = Column(String(50), ForeignKey('test_runs.run_id', ondelete='CASCADE'), nullable=False, index=True)
|
||||
test_name = Column(String(500), nullable=False, index=True)
|
||||
test_file = Column(String(500), nullable=True)
|
||||
line_number = Column(Integer, nullable=True)
|
||||
status = Column(String(20), nullable=False, index=True) # passed, failed, skipped, error
|
||||
duration_ms = Column(Float, nullable=True)
|
||||
error_message = Column(Text, nullable=True)
|
||||
error_type = Column(String(100), nullable=True)
|
||||
output = Column(Text, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship zum Run
|
||||
run = relationship("TestRunDB", back_populates="results")
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"id": self.id,
|
||||
"run_id": self.run_id,
|
||||
"test_name": self.test_name,
|
||||
"test_file": self.test_file,
|
||||
"line_number": self.line_number,
|
||||
"status": self.status,
|
||||
"duration_ms": self.duration_ms,
|
||||
"error_message": self.error_message,
|
||||
"error_type": self.error_type,
|
||||
}
|
||||
|
||||
|
||||
class FailedTestBacklogDB(Base):
|
||||
"""
|
||||
Persistenter Backlog fuer fehlgeschlagene Tests.
|
||||
Aggregiert Fehler ueber mehrere Runs hinweg.
|
||||
"""
|
||||
__tablename__ = 'failed_tests_backlog'
|
||||
__table_args__ = (
|
||||
UniqueConstraint('test_name', 'service', name='uq_backlog_test_service'),
|
||||
)
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
test_name = Column(String(500), nullable=False)
|
||||
test_file = Column(String(500), nullable=True)
|
||||
service = Column(String(100), nullable=False, index=True)
|
||||
framework = Column(String(50), nullable=True)
|
||||
error_message = Column(Text, nullable=True)
|
||||
error_type = Column(String(100), nullable=True)
|
||||
first_failed_at = Column(DateTime, nullable=False)
|
||||
last_failed_at = Column(DateTime, nullable=False)
|
||||
failure_count = Column(Integer, default=1)
|
||||
status = Column(String(30), default='open', index=True) # open, in_progress, fixed, wont_fix, flaky
|
||||
priority = Column(String(20), default='medium', index=True) # critical, high, medium, low
|
||||
assigned_to = Column(String(100), nullable=True)
|
||||
fix_suggestion = Column(Text, nullable=True)
|
||||
notes = Column(Text, nullable=True)
|
||||
# Resolution-Felder (auto-close wenn Tests bestehen)
|
||||
resolved_at = Column(DateTime, nullable=True)
|
||||
resolution_commit = Column(String(50), nullable=True)
|
||||
resolution_notes = Column(Text, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationship zu Fix-Historie
|
||||
fixes = relationship("TestFixHistoryDB", back_populates="backlog_item", cascade="all, delete-orphan")
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"id": self.id,
|
||||
"test_name": self.test_name,
|
||||
"test_file": self.test_file,
|
||||
"service": self.service,
|
||||
"framework": self.framework,
|
||||
"error_message": self.error_message,
|
||||
"error_type": self.error_type,
|
||||
"first_failed_at": self.first_failed_at.isoformat() if self.first_failed_at else None,
|
||||
"last_failed_at": self.last_failed_at.isoformat() if self.last_failed_at else None,
|
||||
"failure_count": self.failure_count,
|
||||
"status": self.status,
|
||||
"priority": self.priority,
|
||||
"assigned_to": self.assigned_to,
|
||||
"fix_suggestion": self.fix_suggestion,
|
||||
"notes": self.notes,
|
||||
"resolved_at": self.resolved_at.isoformat() if self.resolved_at else None,
|
||||
"resolution_commit": self.resolution_commit,
|
||||
"resolution_notes": self.resolution_notes,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
|
||||
class TestFixHistoryDB(Base):
|
||||
"""
|
||||
Historie aller Fix-Versuche fuer einen Backlog-Eintrag.
|
||||
Ermoeglicht Tracking von Auto-Fix und manuellen Fixes.
|
||||
"""
|
||||
__tablename__ = 'test_fixes_history'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
backlog_id = Column(Integer, ForeignKey('failed_tests_backlog.id', ondelete='CASCADE'), nullable=False, index=True)
|
||||
fix_type = Column(String(50), nullable=True) # manual, auto_claude, auto_script
|
||||
fix_description = Column(Text, nullable=True)
|
||||
commit_hash = Column(String(40), nullable=True)
|
||||
success = Column(Boolean, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship zum Backlog-Item
|
||||
backlog_item = relationship("FailedTestBacklogDB", back_populates="fixes")
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"id": self.id,
|
||||
"backlog_id": self.backlog_id,
|
||||
"fix_type": self.fix_type,
|
||||
"fix_description": self.fix_description,
|
||||
"commit_hash": self.commit_hash,
|
||||
"success": self.success,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
}
|
||||
|
||||
|
||||
class TestServiceStatsDB(Base):
|
||||
"""
|
||||
Aggregierte Statistiken pro Service.
|
||||
Wird nach jedem Test-Run aktualisiert fuer schnelle Abfragen.
|
||||
"""
|
||||
__tablename__ = 'test_service_stats'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
service = Column(String(100), unique=True, nullable=False)
|
||||
total_tests = Column(Integer, default=0)
|
||||
passed_tests = Column(Integer, default=0)
|
||||
failed_tests = Column(Integer, default=0)
|
||||
skipped_tests = Column(Integer, default=0)
|
||||
pass_rate = Column(Float, default=0.0)
|
||||
last_run_id = Column(String(50), nullable=True)
|
||||
last_run_at = Column(DateTime, nullable=True)
|
||||
last_status = Column(String(20), nullable=True)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"service": self.service,
|
||||
"total_tests": self.total_tests,
|
||||
"passed_tests": self.passed_tests,
|
||||
"failed_tests": self.failed_tests,
|
||||
"skipped_tests": self.skipped_tests,
|
||||
"pass_rate": round(self.pass_rate, 1) if self.pass_rate else 0.0,
|
||||
"last_run_id": self.last_run_id,
|
||||
"last_run_at": self.last_run_at.isoformat() if self.last_run_at else None,
|
||||
"last_status": self.last_status,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
277
backend/api/tests/models.py
Normal file
277
backend/api/tests/models.py
Normal file
@@ -0,0 +1,277 @@
|
||||
"""
|
||||
Test Registry Data Models
|
||||
|
||||
Definiert die Datenstrukturen fuer das zentrale Test-Dashboard.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
|
||||
class TestFramework(str, Enum):
|
||||
GO_TEST = "go_test"
|
||||
PYTEST = "pytest"
|
||||
JEST = "jest"
|
||||
PLAYWRIGHT = "playwright"
|
||||
BQAS_GOLDEN = "bqas_golden"
|
||||
BQAS_RAG = "bqas_rag"
|
||||
BQAS_SYNTHETIC = "bqas_synthetic"
|
||||
|
||||
|
||||
class TestCategory(str, Enum):
|
||||
UNIT = "unit"
|
||||
INTEGRATION = "integration"
|
||||
E2E = "e2e"
|
||||
BQAS = "bqas"
|
||||
SECURITY = "security"
|
||||
PERFORMANCE = "performance"
|
||||
|
||||
|
||||
class TestStatus(str, Enum):
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
PASSED = "passed"
|
||||
FAILED = "failed"
|
||||
SKIPPED = "skipped"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
class RunStatus(str, Enum):
|
||||
QUEUED = "queued"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
CANCELLED = "cancelled"
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestCase:
|
||||
"""Einzelner Testfall"""
|
||||
id: str
|
||||
name: str
|
||||
file_path: str
|
||||
line_number: Optional[int] = None
|
||||
framework: TestFramework = TestFramework.GO_TEST
|
||||
category: TestCategory = TestCategory.UNIT
|
||||
duration_ms: Optional[float] = None
|
||||
status: TestStatus = TestStatus.PENDING
|
||||
error_message: Optional[str] = None
|
||||
output: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestSuite:
|
||||
"""Test-Suite eines Services"""
|
||||
id: str
|
||||
service: str
|
||||
name: str
|
||||
framework: TestFramework
|
||||
category: TestCategory
|
||||
base_path: str
|
||||
pattern: str # z.B. "*_test.go" oder "test_*.py"
|
||||
tests: List[TestCase] = field(default_factory=list)
|
||||
total_tests: int = 0
|
||||
passed_tests: int = 0
|
||||
failed_tests: int = 0
|
||||
skipped_tests: int = 0
|
||||
duration_ms: float = 0.0
|
||||
coverage_percent: Optional[float] = None
|
||||
last_run: Optional[datetime] = None
|
||||
status: TestStatus = TestStatus.PENDING
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestRun:
|
||||
"""Ein Test-Durchlauf"""
|
||||
id: str
|
||||
suite_id: str
|
||||
service: str
|
||||
started_at: datetime
|
||||
completed_at: Optional[datetime] = None
|
||||
status: RunStatus = RunStatus.QUEUED
|
||||
total_tests: int = 0
|
||||
passed_tests: int = 0
|
||||
failed_tests: int = 0
|
||||
skipped_tests: int = 0
|
||||
duration_seconds: float = 0.0
|
||||
git_commit: Optional[str] = None
|
||||
git_branch: Optional[str] = None
|
||||
coverage_percent: Optional[float] = None
|
||||
triggered_by: str = "manual"
|
||||
output: Optional[str] = None
|
||||
failed_test_ids: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CoverageReport:
|
||||
"""Coverage-Bericht fuer einen Service"""
|
||||
service: str
|
||||
framework: TestFramework
|
||||
line_coverage: float
|
||||
branch_coverage: Optional[float] = None
|
||||
function_coverage: Optional[float] = None
|
||||
statement_coverage: Optional[float] = None
|
||||
uncovered_files: List[str] = field(default_factory=list)
|
||||
timestamp: datetime = field(default_factory=datetime.now)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServiceTestInfo:
|
||||
"""Aggregierte Test-Informationen fuer einen Service"""
|
||||
service: str
|
||||
display_name: str
|
||||
port: Optional[int] = None
|
||||
language: str = "unknown"
|
||||
total_tests: int = 0
|
||||
passed_tests: int = 0
|
||||
failed_tests: int = 0
|
||||
skipped_tests: int = 0
|
||||
pass_rate: float = 0.0
|
||||
coverage_percent: Optional[float] = None
|
||||
last_run: Optional[datetime] = None
|
||||
status: TestStatus = TestStatus.PENDING
|
||||
suites: List[TestSuite] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestRegistryStats:
|
||||
"""Gesamtstatistik des Test-Registrys"""
|
||||
total_tests: int = 0
|
||||
total_passed: int = 0
|
||||
total_failed: int = 0
|
||||
total_skipped: int = 0
|
||||
overall_pass_rate: float = 0.0
|
||||
average_coverage: Optional[float] = None
|
||||
services_count: int = 0
|
||||
last_full_run: Optional[datetime] = None
|
||||
by_category: Dict[str, int] = field(default_factory=dict)
|
||||
by_framework: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
|
||||
# Service-Definitionen mit Test-Informationen
|
||||
SERVICE_DEFINITIONS = [
|
||||
{
|
||||
"service": "consent-service",
|
||||
"display_name": "Consent Service",
|
||||
"port": 8081,
|
||||
"language": "go",
|
||||
"base_path": "/consent-service",
|
||||
"test_pattern": "*_test.go",
|
||||
"framework": TestFramework.GO_TEST,
|
||||
},
|
||||
{
|
||||
"service": "backend",
|
||||
"display_name": "Python Backend",
|
||||
"port": 8000,
|
||||
"language": "python",
|
||||
"base_path": "/backend/tests",
|
||||
"test_pattern": "test_*.py",
|
||||
"framework": TestFramework.PYTEST,
|
||||
},
|
||||
{
|
||||
"service": "voice-service",
|
||||
"display_name": "Voice Service",
|
||||
"port": 8091,
|
||||
"language": "python",
|
||||
"base_path": "/app/tests",
|
||||
"test_pattern": "test_*.py",
|
||||
"framework": TestFramework.PYTEST,
|
||||
"container_name": "breakpilot-pwa-voice-service",
|
||||
"run_in_container": True,
|
||||
"pytest_args": "--ignore=/app/tests/bqas", # Exclude BQAS tests - run separately
|
||||
},
|
||||
{
|
||||
"service": "klausur-service",
|
||||
"display_name": "Klausur Service",
|
||||
"port": 8086,
|
||||
"language": "python",
|
||||
"base_path": "/app/tests",
|
||||
"test_pattern": "test_*.py",
|
||||
"framework": TestFramework.PYTEST,
|
||||
"container_name": "breakpilot-pwa-klausur-service",
|
||||
"run_in_container": True,
|
||||
},
|
||||
{
|
||||
"service": "billing-service",
|
||||
"display_name": "Billing Service",
|
||||
"port": 8082,
|
||||
"language": "go",
|
||||
"base_path": "/billing-service",
|
||||
"test_pattern": "*_test.go",
|
||||
"framework": TestFramework.GO_TEST,
|
||||
},
|
||||
{
|
||||
"service": "school-service",
|
||||
"display_name": "School Service",
|
||||
"port": 8084,
|
||||
"language": "go",
|
||||
"base_path": "/school-service",
|
||||
"test_pattern": "*_test.go",
|
||||
"framework": TestFramework.GO_TEST,
|
||||
},
|
||||
{
|
||||
"service": "edu-search-service",
|
||||
"display_name": "Edu Search Service",
|
||||
"port": 8088,
|
||||
"language": "go",
|
||||
"base_path": "/edu-search-service",
|
||||
"test_pattern": "*_test.go",
|
||||
"framework": TestFramework.GO_TEST,
|
||||
},
|
||||
{
|
||||
"service": "ai-compliance-sdk",
|
||||
"display_name": "AI Compliance SDK",
|
||||
"port": None,
|
||||
"language": "go",
|
||||
"base_path": "/ai-compliance-sdk",
|
||||
"test_pattern": "*_test.go",
|
||||
"framework": TestFramework.GO_TEST,
|
||||
},
|
||||
{
|
||||
"service": "geo-service",
|
||||
"display_name": "Geo Service",
|
||||
"port": 8089,
|
||||
"language": "mixed",
|
||||
"base_path": "/geo-service",
|
||||
"test_pattern": "*_test.go",
|
||||
"framework": TestFramework.GO_TEST,
|
||||
"disabled": True, # Keine Tests vorhanden - Verzeichnis ist leer
|
||||
"disabled_reason": "Keine Test-Dateien vorhanden",
|
||||
},
|
||||
{
|
||||
"service": "website",
|
||||
"display_name": "Website (Jest)",
|
||||
"port": 3000,
|
||||
"language": "typescript",
|
||||
"base_path": "/website",
|
||||
"test_pattern": "*.test.{ts,tsx}",
|
||||
"framework": TestFramework.JEST,
|
||||
"requires_setup": True, # Erfordert npm install im Website-Verzeichnis
|
||||
"setup_note": "Fuehren Sie 'npm install' im website-Verzeichnis aus, um Tests lokal auszufuehren",
|
||||
},
|
||||
# Website E2E entfernt - keine Playwright-Tests vorhanden
|
||||
{
|
||||
"service": "bqas-golden",
|
||||
"display_name": "BQAS Golden Suite",
|
||||
"port": 8091,
|
||||
"language": "python",
|
||||
"base_path": "/app/tests/bqas/test_golden.py",
|
||||
"test_pattern": "test_*.py",
|
||||
"framework": TestFramework.PYTEST,
|
||||
"container_name": "breakpilot-pwa-voice-service",
|
||||
"run_in_container": True,
|
||||
},
|
||||
{
|
||||
"service": "bqas-rag",
|
||||
"display_name": "BQAS RAG Tests",
|
||||
"port": 8091,
|
||||
"language": "python",
|
||||
"base_path": "/app/tests/bqas/test_rag.py",
|
||||
"test_pattern": "test_*.py",
|
||||
"framework": TestFramework.PYTEST,
|
||||
"container_name": "breakpilot-pwa-voice-service",
|
||||
"run_in_container": True,
|
||||
},
|
||||
]
|
||||
84
backend/api/tests/registry.py
Normal file
84
backend/api/tests/registry.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""
|
||||
Test Registry API - Legacy Compatibility Wrapper
|
||||
|
||||
This file provides backward compatibility for code importing from registry.py.
|
||||
All functionality has been moved to the registry/ module.
|
||||
|
||||
For new code, import directly from:
|
||||
from api.tests.registry import router
|
||||
from api.tests.registry.config import PROJECT_ROOT
|
||||
etc.
|
||||
"""
|
||||
|
||||
# Re-export router for backward compatibility
|
||||
from .registry import router
|
||||
|
||||
# Re-export all public APIs from the modular structure
|
||||
from .registry import (
|
||||
# Config
|
||||
PROJECT_ROOT,
|
||||
RUN_MODE,
|
||||
DATA_DIR,
|
||||
RESULTS_FILE,
|
||||
check_go_available,
|
||||
check_pytest_available,
|
||||
get_go_version,
|
||||
get_pytest_version,
|
||||
load_persisted_results,
|
||||
save_persisted_results,
|
||||
migrate_json_to_postgres,
|
||||
is_postgres_available,
|
||||
get_persisted_results,
|
||||
get_test_runs,
|
||||
get_current_runs,
|
||||
get_running_tests,
|
||||
# API Models
|
||||
TestRunRequest,
|
||||
TestRunResponse,
|
||||
RegistryResponse,
|
||||
BacklogStatusUpdate,
|
||||
BacklogPriorityUpdate,
|
||||
FixAttempt,
|
||||
ManualBacklogEntry,
|
||||
CIResultRequest,
|
||||
# Discovery
|
||||
discover_go_tests,
|
||||
discover_python_tests,
|
||||
discover_bqas_tests,
|
||||
build_service_info,
|
||||
# Executors
|
||||
run_go_tests,
|
||||
run_python_tests,
|
||||
run_bqas_tests,
|
||||
run_jest_tests,
|
||||
run_playwright_tests,
|
||||
run_tests_in_container,
|
||||
execute_test_run,
|
||||
# Services
|
||||
extract_go_error,
|
||||
classify_go_error,
|
||||
suggest_go_fix,
|
||||
extract_pytest_error,
|
||||
classify_pytest_error,
|
||||
suggest_pytest_fix,
|
||||
)
|
||||
|
||||
# Legacy aliases for in-memory storage access
|
||||
_test_runs = get_test_runs()
|
||||
_current_runs = get_current_runs()
|
||||
_running_tests = get_running_tests()
|
||||
_persisted_results = get_persisted_results()
|
||||
_use_postgres = is_postgres_available()
|
||||
|
||||
# Legacy function aliases (for compatibility with old function names)
|
||||
_extract_go_error = extract_go_error
|
||||
_classify_go_error = classify_go_error
|
||||
_suggest_go_fix = suggest_go_fix
|
||||
_extract_pytest_error = extract_pytest_error
|
||||
_classify_pytest_error = classify_pytest_error
|
||||
_suggest_pytest_fix = suggest_pytest_fix
|
||||
_check_go_available = check_go_available
|
||||
_check_pytest_available = check_pytest_available
|
||||
_get_go_version = get_go_version
|
||||
_get_pytest_version = get_pytest_version
|
||||
_check_postgres_available = is_postgres_available
|
||||
129
backend/api/tests/registry/__init__.py
Normal file
129
backend/api/tests/registry/__init__.py
Normal file
@@ -0,0 +1,129 @@
|
||||
"""
|
||||
Test Registry Module
|
||||
|
||||
Zentrale API fuer das Test-Dashboard.
|
||||
Entdeckt, registriert und fuehrt Tests aus allen Services aus.
|
||||
|
||||
Phase 1 Update (2026-02-02):
|
||||
- PostgreSQL-Integration fuer persistente Speicherung
|
||||
- Backlog-Management mit Status-Workflow
|
||||
- Historie und Trends ueber Zeit
|
||||
|
||||
Modular Refactoring (2026-02-03):
|
||||
- Split into sub-modules for maintainability
|
||||
"""
|
||||
|
||||
# Re-export the router for FastAPI
|
||||
from .routes import router
|
||||
|
||||
# Re-export config for external access
|
||||
from .config import (
|
||||
PROJECT_ROOT,
|
||||
RUN_MODE,
|
||||
DATA_DIR,
|
||||
RESULTS_FILE,
|
||||
check_go_available,
|
||||
check_pytest_available,
|
||||
get_go_version,
|
||||
get_pytest_version,
|
||||
load_persisted_results,
|
||||
save_persisted_results,
|
||||
migrate_json_to_postgres,
|
||||
is_postgres_available,
|
||||
get_persisted_results,
|
||||
get_test_runs,
|
||||
get_current_runs,
|
||||
get_running_tests,
|
||||
)
|
||||
|
||||
# Re-export API models
|
||||
from .api_models import (
|
||||
TestRunRequest,
|
||||
TestRunResponse,
|
||||
RegistryResponse,
|
||||
BacklogStatusUpdate,
|
||||
BacklogPriorityUpdate,
|
||||
FixAttempt,
|
||||
ManualBacklogEntry,
|
||||
CIResultRequest,
|
||||
)
|
||||
|
||||
# Re-export discovery functions
|
||||
from .discovery import (
|
||||
discover_go_tests,
|
||||
discover_python_tests,
|
||||
discover_bqas_tests,
|
||||
build_service_info,
|
||||
)
|
||||
|
||||
# Re-export executors
|
||||
from .executors import (
|
||||
run_go_tests,
|
||||
run_python_tests,
|
||||
run_bqas_tests,
|
||||
run_jest_tests,
|
||||
run_playwright_tests,
|
||||
run_tests_in_container,
|
||||
execute_test_run,
|
||||
)
|
||||
|
||||
# Re-export services
|
||||
from .services import (
|
||||
extract_go_error,
|
||||
classify_go_error,
|
||||
suggest_go_fix,
|
||||
extract_pytest_error,
|
||||
classify_pytest_error,
|
||||
suggest_pytest_fix,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Router
|
||||
"router",
|
||||
# Config
|
||||
"PROJECT_ROOT",
|
||||
"RUN_MODE",
|
||||
"DATA_DIR",
|
||||
"RESULTS_FILE",
|
||||
"check_go_available",
|
||||
"check_pytest_available",
|
||||
"get_go_version",
|
||||
"get_pytest_version",
|
||||
"load_persisted_results",
|
||||
"save_persisted_results",
|
||||
"migrate_json_to_postgres",
|
||||
"is_postgres_available",
|
||||
"get_persisted_results",
|
||||
"get_test_runs",
|
||||
"get_current_runs",
|
||||
"get_running_tests",
|
||||
# API Models
|
||||
"TestRunRequest",
|
||||
"TestRunResponse",
|
||||
"RegistryResponse",
|
||||
"BacklogStatusUpdate",
|
||||
"BacklogPriorityUpdate",
|
||||
"FixAttempt",
|
||||
"ManualBacklogEntry",
|
||||
"CIResultRequest",
|
||||
# Discovery
|
||||
"discover_go_tests",
|
||||
"discover_python_tests",
|
||||
"discover_bqas_tests",
|
||||
"build_service_info",
|
||||
# Executors
|
||||
"run_go_tests",
|
||||
"run_python_tests",
|
||||
"run_bqas_tests",
|
||||
"run_jest_tests",
|
||||
"run_playwright_tests",
|
||||
"run_tests_in_container",
|
||||
"execute_test_run",
|
||||
# Services
|
||||
"extract_go_error",
|
||||
"classify_go_error",
|
||||
"suggest_go_fix",
|
||||
"extract_pytest_error",
|
||||
"classify_pytest_error",
|
||||
"suggest_pytest_fix",
|
||||
]
|
||||
73
backend/api/tests/registry/api_models.py
Normal file
73
backend/api/tests/registry/api_models.py
Normal file
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
Test Registry API Models
|
||||
|
||||
Pydantic models for API requests and responses.
|
||||
"""
|
||||
|
||||
from typing import Optional, Dict, Any, List
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# Test Run Models
|
||||
# ==============================================================================
|
||||
|
||||
class TestRunRequest(BaseModel):
|
||||
suite_id: str
|
||||
service: Optional[str] = None
|
||||
triggered_by: str = "manual"
|
||||
|
||||
|
||||
class TestRunResponse(BaseModel):
|
||||
run_id: str
|
||||
status: str
|
||||
message: str
|
||||
|
||||
|
||||
class RegistryResponse(BaseModel):
|
||||
services: List[Dict[str, Any]]
|
||||
stats: Dict[str, Any]
|
||||
last_updated: str
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# Backlog Models
|
||||
# ==============================================================================
|
||||
|
||||
class BacklogStatusUpdate(BaseModel):
|
||||
status: str
|
||||
notes: Optional[str] = None
|
||||
assigned_to: Optional[str] = None
|
||||
|
||||
|
||||
class BacklogPriorityUpdate(BaseModel):
|
||||
priority: str
|
||||
|
||||
|
||||
class FixAttempt(BaseModel):
|
||||
fix_type: str # manual, auto_claude, auto_script
|
||||
fix_description: str
|
||||
commit_hash: Optional[str] = None
|
||||
success: bool = False
|
||||
|
||||
|
||||
class ManualBacklogEntry(BaseModel):
|
||||
"""Manueller Backlog-Eintrag fuer nicht-integrierte Features."""
|
||||
test_name: str
|
||||
service: str
|
||||
error_message: str
|
||||
priority: str = "medium" # critical, high, medium, low
|
||||
fix_suggestion: Optional[str] = None
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# CI/CD Models
|
||||
# ==============================================================================
|
||||
|
||||
class CIResultRequest(BaseModel):
|
||||
"""Daten von der CI/CD-Pipeline (Woodpecker)"""
|
||||
pipeline_id: str
|
||||
commit: str
|
||||
branch: str
|
||||
status: str # "completed", "failed", "success"
|
||||
test_results: Optional[Dict[str, Any]] = None # Detaillierte Ergebnisse
|
||||
230
backend/api/tests/registry/config.py
Normal file
230
backend/api/tests/registry/config.py
Normal file
@@ -0,0 +1,230 @@
|
||||
"""
|
||||
Test Registry Configuration
|
||||
|
||||
Project paths, environment setup, and global state management.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
# Projekt-Basisverzeichnis - prüfe verschiedene Pfade
|
||||
# 1. Docker mit Volume-Mount: /app/project
|
||||
# 2. Lokale Entwicklung: /Users/benjaminadmin/Projekte/breakpilot-pwa
|
||||
# 3. Fallback: Demo-Modus
|
||||
DOCKER_PROJECT_PATH = Path("/app/project")
|
||||
LOCAL_PROJECT_PATH = Path("/Users/benjaminadmin/Projekte/breakpilot-pwa")
|
||||
|
||||
if DOCKER_PROJECT_PATH.exists():
|
||||
PROJECT_ROOT = DOCKER_PROJECT_PATH
|
||||
RUN_MODE = "docker"
|
||||
elif LOCAL_PROJECT_PATH.exists():
|
||||
PROJECT_ROOT = LOCAL_PROJECT_PATH
|
||||
RUN_MODE = "local"
|
||||
else:
|
||||
PROJECT_ROOT = LOCAL_PROJECT_PATH # Fallback für Demo
|
||||
RUN_MODE = "demo"
|
||||
|
||||
# Pfad fuer persistierte Ergebnisse (Legacy JSON - wird noch als Fallback verwendet)
|
||||
DATA_DIR = Path("/app/data")
|
||||
RESULTS_FILE = DATA_DIR / "test_results.json"
|
||||
|
||||
# Deaktiviert - wir wollen IMMER echte Tests wenn Tools verfügbar sind
|
||||
IS_DOCKER = False # Nie Demo-Modus verwenden
|
||||
|
||||
# Flag fuer PostgreSQL-Verfuegbarkeit
|
||||
_use_postgres = True
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# In-Memory Storage
|
||||
# ==============================================================================
|
||||
|
||||
# In-Memory Storage (wird parallel zu PostgreSQL gepflegt fuer Abwaertskompatibilitaet)
|
||||
_test_runs: List[Dict] = []
|
||||
_current_runs: Dict[str, Any] = {}
|
||||
_running_tests: Dict[str, Dict] = {} # Progress-Tracking fuer laufende Tests
|
||||
_persisted_results: Dict[str, Dict] = {} # Persistierte Testergebnisse (Legacy)
|
||||
|
||||
|
||||
def get_test_runs() -> List[Dict]:
|
||||
"""Get all test runs."""
|
||||
return _test_runs
|
||||
|
||||
|
||||
def get_current_runs() -> Dict[str, Any]:
|
||||
"""Get currently running tests."""
|
||||
return _current_runs
|
||||
|
||||
|
||||
def get_running_tests() -> Dict[str, Dict]:
|
||||
"""Get running test progress."""
|
||||
return _running_tests
|
||||
|
||||
|
||||
def get_persisted_results() -> Dict[str, Dict]:
|
||||
"""Get persisted test results."""
|
||||
return _persisted_results
|
||||
|
||||
|
||||
def set_persisted_results(results: Dict[str, Dict]):
|
||||
"""Set persisted test results."""
|
||||
global _persisted_results
|
||||
_persisted_results = results
|
||||
|
||||
|
||||
def is_postgres_available() -> bool:
|
||||
"""Check if PostgreSQL is available."""
|
||||
return _use_postgres
|
||||
|
||||
|
||||
def set_postgres_available(available: bool):
|
||||
"""Set PostgreSQL availability flag."""
|
||||
global _use_postgres
|
||||
_use_postgres = available
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# Tool Availability Checks
|
||||
# ==============================================================================
|
||||
|
||||
def check_go_available() -> bool:
|
||||
"""Prüft ob Go installiert ist"""
|
||||
try:
|
||||
result = subprocess.run(["go", "version"], capture_output=True, timeout=5)
|
||||
return result.returncode == 0
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def check_pytest_available() -> bool:
|
||||
"""Prüft ob pytest installiert ist"""
|
||||
pytest_paths = ["/opt/venv/bin/pytest", "pytest"]
|
||||
for path in pytest_paths:
|
||||
try:
|
||||
result = subprocess.run(path.split() + ["--version"], capture_output=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
return True
|
||||
except:
|
||||
continue
|
||||
return False
|
||||
|
||||
|
||||
def get_go_version() -> Optional[str]:
|
||||
"""Gibt die Go-Version zurueck"""
|
||||
try:
|
||||
result = subprocess.run(["go", "version"], capture_output=True, text=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
# "go version go1.23.5 linux/arm64" -> "1.23.5"
|
||||
parts = result.stdout.strip().split()
|
||||
if len(parts) >= 3:
|
||||
return parts[2].replace("go", "")
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def get_pytest_version() -> Optional[str]:
|
||||
"""Gibt die pytest-Version zurueck"""
|
||||
try:
|
||||
result = subprocess.run(["/opt/venv/bin/pytest", "--version"], capture_output=True, text=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
# "pytest 8.x.x" -> "8.x.x"
|
||||
return result.stdout.strip().split()[1] if result.stdout else None
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# Persistence Functions
|
||||
# ==============================================================================
|
||||
|
||||
def check_postgres_available() -> bool:
|
||||
"""Prueft ob PostgreSQL verfuegbar ist."""
|
||||
global _use_postgres
|
||||
try:
|
||||
from ..database import check_db_connection
|
||||
_use_postgres = check_db_connection()
|
||||
except Exception:
|
||||
_use_postgres = False
|
||||
return _use_postgres
|
||||
|
||||
|
||||
def load_persisted_results():
|
||||
"""Laedt persistierte Testergebnisse beim Start - erst aus DB, dann JSON als Fallback"""
|
||||
global _persisted_results
|
||||
|
||||
# Versuche zuerst aus PostgreSQL zu laden
|
||||
if check_postgres_available():
|
||||
try:
|
||||
from ..database import get_db_session
|
||||
from ..repository import TestRepository
|
||||
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
stats = repo.get_all_service_stats()
|
||||
for stat in stats:
|
||||
_persisted_results[stat.service] = {
|
||||
"total": stat.total_tests,
|
||||
"passed": stat.passed_tests,
|
||||
"failed": stat.failed_tests,
|
||||
"last_run": stat.last_run_at.isoformat() if stat.last_run_at else None,
|
||||
"status": stat.last_status or "unknown",
|
||||
"failed_test_ids": [] # Wird spaeter nachgeladen
|
||||
}
|
||||
print(f"Test-Ergebnisse aus PostgreSQL geladen: {len(stats)} Services")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"Fehler beim Laden aus PostgreSQL: {e}")
|
||||
|
||||
# Fallback: JSON-Datei
|
||||
if RESULTS_FILE.exists():
|
||||
try:
|
||||
with open(RESULTS_FILE, "r") as f:
|
||||
_persisted_results = json.load(f)
|
||||
print(f"Test-Ergebnisse aus JSON geladen: {len(_persisted_results)} Services")
|
||||
except Exception as e:
|
||||
print(f"Fehler beim Laden der Testergebnisse: {e}")
|
||||
_persisted_results = {}
|
||||
|
||||
|
||||
def save_persisted_results():
|
||||
"""Speichert Testergebnisse - in PostgreSQL und JSON als Backup"""
|
||||
# JSON als Backup speichern
|
||||
try:
|
||||
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
with open(RESULTS_FILE, "w") as f:
|
||||
json.dump(_persisted_results, f, indent=2, default=str)
|
||||
except Exception as e:
|
||||
print(f"Fehler beim Speichern der JSON-Testergebnisse: {e}")
|
||||
|
||||
|
||||
def migrate_json_to_postgres() -> int:
|
||||
"""Migriert bestehende JSON-Daten nach PostgreSQL (einmalig)."""
|
||||
if not _use_postgres:
|
||||
return 0
|
||||
|
||||
if not _persisted_results:
|
||||
return 0
|
||||
|
||||
try:
|
||||
from ..database import get_db_session
|
||||
from ..repository import TestRepository
|
||||
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
count = repo.migrate_from_json(_persisted_results)
|
||||
print(f"Migration abgeschlossen: {count} Services migriert")
|
||||
return count
|
||||
except Exception as e:
|
||||
print(f"Fehler bei Migration: {e}")
|
||||
return 0
|
||||
|
||||
|
||||
# Lade persistierte Ergebnisse beim Import
|
||||
load_persisted_results()
|
||||
16
backend/api/tests/registry/discovery/__init__.py
Normal file
16
backend/api/tests/registry/discovery/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""
|
||||
Test Discovery Module
|
||||
|
||||
Functions for discovering tests in various frameworks.
|
||||
"""
|
||||
|
||||
from .go_discovery import discover_go_tests
|
||||
from .python_discovery import discover_python_tests, discover_bqas_tests
|
||||
from .service_builder import build_service_info
|
||||
|
||||
__all__ = [
|
||||
"discover_go_tests",
|
||||
"discover_python_tests",
|
||||
"discover_bqas_tests",
|
||||
"build_service_info",
|
||||
]
|
||||
45
backend/api/tests/registry/discovery/go_discovery.py
Normal file
45
backend/api/tests/registry/discovery/go_discovery.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""
|
||||
Go Test Discovery
|
||||
|
||||
Functions for discovering Go tests in a codebase.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from ...models import TestCase, TestFramework, TestCategory
|
||||
from ..config import PROJECT_ROOT
|
||||
|
||||
|
||||
def discover_go_tests(base_path: Path) -> List[TestCase]:
|
||||
"""Entdeckt Go-Tests in einem Verzeichnis"""
|
||||
tests = []
|
||||
if not base_path.exists():
|
||||
return tests
|
||||
|
||||
# Suche nach *_test.go Dateien
|
||||
test_files = list(base_path.rglob("*_test.go"))
|
||||
|
||||
for test_file in test_files:
|
||||
# Parse Test-Funktionen aus der Datei
|
||||
try:
|
||||
content = test_file.read_text()
|
||||
for i, line in enumerate(content.split("\n"), 1):
|
||||
if line.strip().startswith("func Test"):
|
||||
# Extrahiere Funktionsname
|
||||
name_start = line.find("Test")
|
||||
name_end = line.find("(", name_start)
|
||||
if name_end > name_start:
|
||||
func_name = line[name_start:name_end]
|
||||
tests.append(TestCase(
|
||||
id=f"{test_file.stem}_{func_name}",
|
||||
name=func_name,
|
||||
file_path=str(test_file.relative_to(PROJECT_ROOT)),
|
||||
line_number=i,
|
||||
framework=TestFramework.GO_TEST,
|
||||
category=TestCategory.UNIT,
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return tests
|
||||
86
backend/api/tests/registry/discovery/python_discovery.py
Normal file
86
backend/api/tests/registry/discovery/python_discovery.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""
|
||||
Python Test Discovery
|
||||
|
||||
Functions for discovering Python and BQAS tests in a codebase.
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from ...models import TestCase, TestFramework, TestCategory
|
||||
from ..config import PROJECT_ROOT
|
||||
|
||||
|
||||
def discover_python_tests(base_path: Path) -> List[TestCase]:
|
||||
"""Entdeckt Python-Tests in einem Verzeichnis"""
|
||||
tests = []
|
||||
if not base_path.exists():
|
||||
return tests
|
||||
|
||||
# Suche nach test_*.py Dateien
|
||||
test_files = list(base_path.rglob("test_*.py"))
|
||||
|
||||
for test_file in test_files:
|
||||
try:
|
||||
content = test_file.read_text()
|
||||
for i, line in enumerate(content.split("\n"), 1):
|
||||
stripped = line.strip()
|
||||
# Test-Funktionen
|
||||
if stripped.startswith("def test_"):
|
||||
name_end = stripped.find("(")
|
||||
if name_end > 4:
|
||||
func_name = stripped[4:name_end]
|
||||
tests.append(TestCase(
|
||||
id=f"{test_file.stem}_{func_name}",
|
||||
name=func_name,
|
||||
file_path=str(test_file.relative_to(PROJECT_ROOT)),
|
||||
line_number=i,
|
||||
framework=TestFramework.PYTEST,
|
||||
category=TestCategory.UNIT,
|
||||
))
|
||||
# Async Test-Methoden
|
||||
elif stripped.startswith("async def test_"):
|
||||
name_end = stripped.find("(")
|
||||
if name_end > 10:
|
||||
func_name = stripped[10:name_end]
|
||||
tests.append(TestCase(
|
||||
id=f"{test_file.stem}_{func_name}",
|
||||
name=func_name,
|
||||
file_path=str(test_file.relative_to(PROJECT_ROOT)),
|
||||
line_number=i,
|
||||
framework=TestFramework.PYTEST,
|
||||
category=TestCategory.UNIT,
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return tests
|
||||
|
||||
|
||||
def discover_bqas_tests(base_path: Path, test_type: str) -> List[TestCase]:
|
||||
"""Entdeckt BQAS-Tests (Golden/RAG)"""
|
||||
tests = []
|
||||
if not base_path.exists():
|
||||
return tests
|
||||
|
||||
# Suche nach JSON-Dateien
|
||||
test_files = list(base_path.rglob("*.json"))
|
||||
|
||||
for test_file in test_files:
|
||||
try:
|
||||
content = json.loads(test_file.read_text())
|
||||
if isinstance(content, list):
|
||||
for i, test_case in enumerate(content):
|
||||
test_id = test_case.get("id", f"{test_file.stem}_{i}")
|
||||
tests.append(TestCase(
|
||||
id=test_id,
|
||||
name=test_case.get("name", test_id),
|
||||
file_path=str(test_file.relative_to(PROJECT_ROOT)),
|
||||
framework=TestFramework.BQAS_GOLDEN if test_type == "golden" else TestFramework.BQAS_RAG,
|
||||
category=TestCategory.BQAS,
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return tests
|
||||
115
backend/api/tests/registry/discovery/service_builder.py
Normal file
115
backend/api/tests/registry/discovery/service_builder.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""
|
||||
Service Info Builder
|
||||
|
||||
Builds ServiceTestInfo from service definitions.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ...models import ServiceTestInfo, TestStatus, TestFramework
|
||||
from ..config import PROJECT_ROOT, get_persisted_results
|
||||
from .go_discovery import discover_go_tests
|
||||
from .python_discovery import discover_python_tests, discover_bqas_tests
|
||||
|
||||
|
||||
def build_service_info(service_def: Dict) -> ServiceTestInfo:
|
||||
"""Erstellt ServiceTestInfo aus einer Service-Definition"""
|
||||
service_id = service_def["service"]
|
||||
persisted_results = get_persisted_results()
|
||||
|
||||
# Prüfe ob Service deaktiviert ist
|
||||
if service_def.get("disabled", False):
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=f"{service_def['display_name']} (deaktiviert)",
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=0,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.SKIPPED,
|
||||
)
|
||||
|
||||
# Prüfe zuerst persistierte Ergebnisse
|
||||
if service_id in persisted_results:
|
||||
persisted = persisted_results[service_id]
|
||||
total = persisted.get("total", 0)
|
||||
passed = persisted.get("passed", 0)
|
||||
failed = persisted.get("failed", 0)
|
||||
skipped = max(0, total - passed - failed)
|
||||
pass_rate = (passed / total * 100) if total > 0 else 0.0
|
||||
last_run_str = persisted.get("last_run")
|
||||
last_run = datetime.fromisoformat(last_run_str) if last_run_str else None
|
||||
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=total,
|
||||
passed_tests=passed,
|
||||
failed_tests=failed,
|
||||
skipped_tests=skipped,
|
||||
pass_rate=pass_rate,
|
||||
coverage_percent=None,
|
||||
last_run=last_run,
|
||||
status=TestStatus.PASSED if failed == 0 and total > 0 else TestStatus.FAILED if failed > 0 else TestStatus.PENDING,
|
||||
)
|
||||
|
||||
# Falls keine persistierten Ergebnisse: Test-Discovery
|
||||
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
|
||||
framework = service_def["framework"]
|
||||
|
||||
# Fuer Container-basierte Services: keine lokale Discovery moeglich
|
||||
if service_def.get("run_in_container", False):
|
||||
# Keine lokalen Tests - warte auf tatsaechliche Ausfuehrung
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=0,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.PENDING,
|
||||
)
|
||||
|
||||
# Test-Discovery basierend auf Framework
|
||||
if framework == TestFramework.GO_TEST:
|
||||
tests = discover_go_tests(base_path)
|
||||
elif framework == TestFramework.PYTEST:
|
||||
tests = discover_python_tests(base_path)
|
||||
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
|
||||
tests = discover_bqas_tests(base_path, test_type)
|
||||
else:
|
||||
tests = []
|
||||
|
||||
total = len(tests)
|
||||
|
||||
# Ohne persistierte Ergebnisse: Tests gefunden aber noch nicht ausgefuehrt
|
||||
# Zeige nur die Anzahl entdeckter Tests, alle anderen Werte sind 0
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=total,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.PENDING,
|
||||
)
|
||||
23
backend/api/tests/registry/executors/__init__.py
Normal file
23
backend/api/tests/registry/executors/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Test Executors Module
|
||||
|
||||
Functions for running tests in various frameworks.
|
||||
"""
|
||||
|
||||
from .go_executor import run_go_tests
|
||||
from .python_executor import run_python_tests
|
||||
from .bqas_executor import run_bqas_tests
|
||||
from .jest_executor import run_jest_tests
|
||||
from .playwright_executor import run_playwright_tests
|
||||
from .container_executor import run_tests_in_container
|
||||
from .test_runner import execute_test_run
|
||||
|
||||
__all__ = [
|
||||
"run_go_tests",
|
||||
"run_python_tests",
|
||||
"run_bqas_tests",
|
||||
"run_jest_tests",
|
||||
"run_playwright_tests",
|
||||
"run_tests_in_container",
|
||||
"execute_test_run",
|
||||
]
|
||||
44
backend/api/tests/registry/executors/bqas_executor.py
Normal file
44
backend/api/tests/registry/executors/bqas_executor.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""
|
||||
BQAS Test Executor
|
||||
|
||||
Executes BQAS tests via API proxy.
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import Dict
|
||||
|
||||
import httpx
|
||||
|
||||
from ...models import TestFramework
|
||||
|
||||
|
||||
async def run_bqas_tests(service_def: Dict) -> Dict:
|
||||
"""Proxy zu BQAS API im Voice-Service"""
|
||||
test_type = "golden" if service_def["framework"] == TestFramework.BQAS_GOLDEN else "rag"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
response = await client.post(
|
||||
f"http://localhost:8091/api/v1/bqas/run/{test_type}",
|
||||
)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
metrics = data.get("metrics", {})
|
||||
return {
|
||||
"passed": metrics.get("passed_tests", 0),
|
||||
"failed": metrics.get("failed_tests", 0),
|
||||
"total": metrics.get("total_tests", 0),
|
||||
"output": json.dumps(data, indent=2)[:5000],
|
||||
"failed_test_ids": [],
|
||||
}
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
# Fehler wenn API nicht erreichbar - KEINE Demo-Daten
|
||||
return {
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"total": 0,
|
||||
"output": f"BQAS API nicht erreichbar. Nutze docker exec fuer {test_type} Tests.",
|
||||
"failed_test_ids": [],
|
||||
}
|
||||
106
backend/api/tests/registry/executors/container_executor.py
Normal file
106
backend/api/tests/registry/executors/container_executor.py
Normal file
@@ -0,0 +1,106 @@
|
||||
"""
|
||||
Container Test Executor
|
||||
|
||||
Executes tests inside Docker containers via docker exec.
|
||||
"""
|
||||
|
||||
import re
|
||||
import asyncio
|
||||
import subprocess
|
||||
from typing import Dict
|
||||
|
||||
from ..config import get_running_tests
|
||||
|
||||
|
||||
async def run_tests_in_container(
|
||||
container_name: str,
|
||||
framework: str,
|
||||
base_path: str,
|
||||
service_id: str,
|
||||
pytest_args: str = ""
|
||||
) -> Dict:
|
||||
"""Fuehrt Tests in einem anderen Docker-Container aus via docker exec"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "",
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
try:
|
||||
if framework == "pytest":
|
||||
cmd = ["docker", "exec", container_name, "python", "-m", "pytest", base_path, "-v", "--tb=short", "-q"]
|
||||
# Fuege zusaetzliche pytest Argumente hinzu
|
||||
if pytest_args:
|
||||
cmd.extend(pytest_args.split())
|
||||
else:
|
||||
cmd = ["docker", "exec", container_name, "go", "test", "-v", "./..."]
|
||||
|
||||
def run_docker_exec():
|
||||
return subprocess.run(cmd, capture_output=True, text=True, timeout=600)
|
||||
|
||||
result = await asyncio.to_thread(run_docker_exec)
|
||||
|
||||
output = result.stdout + result.stderr
|
||||
passed = 0
|
||||
failed = 0
|
||||
failed_test_ids = []
|
||||
|
||||
if framework == "pytest":
|
||||
# Parse pytest output
|
||||
for line in output.split("\n"):
|
||||
if "passed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+passed", line)
|
||||
if match:
|
||||
passed = int(match.group(1))
|
||||
if "failed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+failed", line)
|
||||
if match:
|
||||
failed = int(match.group(1))
|
||||
if line.startswith("FAILED"):
|
||||
test_name = line.replace("FAILED", "").strip()
|
||||
failed_test_ids.append(test_name)
|
||||
else:
|
||||
# Parse go test output
|
||||
for line in output.split("\n"):
|
||||
if line.startswith("--- PASS:"):
|
||||
passed += 1
|
||||
elif line.startswith("--- FAIL:"):
|
||||
failed += 1
|
||||
match = re.search(r"--- FAIL: (\S+)", line)
|
||||
if match:
|
||||
failed_test_ids.append(match.group(1))
|
||||
|
||||
total = passed + failed
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": total,
|
||||
"output": output,
|
||||
"failed_test_ids": failed_test_ids
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
running_tests[service_id] = {
|
||||
"current_file": str(e),
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "error"
|
||||
}
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
|
||||
137
backend/api/tests/registry/executors/go_executor.py
Normal file
137
backend/api/tests/registry/executors/go_executor.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""
|
||||
Go Test Executor
|
||||
|
||||
Executes Go tests and parses results.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import subprocess
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ..config import check_go_available, get_running_tests
|
||||
from ..services.error_handling import extract_go_error, classify_go_error, suggest_go_fix
|
||||
|
||||
|
||||
async def run_go_tests(base_path: Path, service_id: str = "") -> Dict:
|
||||
"""Fuehrt Go-Tests aus (Thread-basiert, blockiert nicht den Event Loop)"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
if not base_path.exists():
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
|
||||
|
||||
# Prüfe ob Go installiert ist
|
||||
go_available = check_go_available()
|
||||
|
||||
if not go_available:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Go nicht installiert - Tests koennen nicht ausgefuehrt werden", "failed_test_ids": []}
|
||||
|
||||
# Initialer Progress-Status
|
||||
if service_id:
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Starte Go-Tests...",
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
def run_go_tests_sync():
|
||||
"""Laeuft in separatem Thread"""
|
||||
try:
|
||||
env = os.environ.copy()
|
||||
env["GOPATH"] = "/tmp/go"
|
||||
env["GOCACHE"] = "/tmp/go-cache"
|
||||
env["CGO_ENABLED"] = "0"
|
||||
|
||||
result = subprocess.run(
|
||||
["go", "test", "-v", "-json", "./..."],
|
||||
cwd=str(base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300,
|
||||
env=env,
|
||||
)
|
||||
|
||||
passed = failed = 0
|
||||
failed_test_ids = []
|
||||
test_outputs = {}
|
||||
|
||||
for line in result.stdout.split("\n"):
|
||||
if line.strip():
|
||||
try:
|
||||
event = json.loads(line)
|
||||
action = event.get("Action")
|
||||
test_name = event.get("Test", "")
|
||||
pkg = event.get("Package", "")
|
||||
|
||||
if action == "pass" and test_name:
|
||||
passed += 1
|
||||
if service_id:
|
||||
running_tests[service_id] = {
|
||||
"current_file": f"{test_name}",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "running"
|
||||
}
|
||||
elif action == "fail" and test_name:
|
||||
failed += 1
|
||||
test_key = f"{pkg}::{test_name}"
|
||||
error_output = test_outputs.get(test_key, "")
|
||||
error_message = extract_go_error(error_output)
|
||||
failed_test_ids.append({
|
||||
"id": test_key,
|
||||
"name": test_name,
|
||||
"package": pkg,
|
||||
"file_path": pkg.replace("github.com/", ""),
|
||||
"error_message": error_message or "Test fehlgeschlagen - keine Details",
|
||||
"error_type": classify_go_error(error_message),
|
||||
"suggestion": suggest_go_fix(error_message),
|
||||
})
|
||||
if service_id:
|
||||
running_tests[service_id] = {
|
||||
"current_file": f"{test_name}",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "running"
|
||||
}
|
||||
elif action == "output" and test_name:
|
||||
test_key = f"{pkg}::{test_name}"
|
||||
test_outputs[test_key] = test_outputs.get(test_key, "") + event.get("Output", "")
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": passed + failed,
|
||||
"output": result.stdout[:5000] if result.stdout else result.stderr[:5000],
|
||||
"failed_test_ids": failed_test_ids,
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 5 Minuten", "failed_test_ids": []}
|
||||
except Exception as e:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(None, run_go_tests_sync)
|
||||
|
||||
# Finaler Status
|
||||
if service_id:
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": result.get("passed", 0),
|
||||
"failed": result.get("failed", 0),
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return result
|
||||
130
backend/api/tests/registry/executors/jest_executor.py
Normal file
130
backend/api/tests/registry/executors/jest_executor.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
Jest Test Executor
|
||||
|
||||
Executes Jest tests for JavaScript/TypeScript projects.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ..config import get_running_tests
|
||||
|
||||
|
||||
async def run_jest_tests(base_path: Path, service_id: str = "") -> Dict:
|
||||
"""Fuehrt Jest-Tests aus"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
if not base_path.exists():
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Starte Jest...",
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
# Prüfe ob Node.js verfügbar ist
|
||||
try:
|
||||
node_check = subprocess.run(["node", "--version"], capture_output=True, timeout=5)
|
||||
if node_check.returncode != 0:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Node.js nicht installiert", "failed_test_ids": []}
|
||||
except:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Node.js nicht verfuegbar", "failed_test_ids": []}
|
||||
|
||||
try:
|
||||
# Wechsle ins Projektverzeichnis und fuehre Jest aus
|
||||
env = os.environ.copy()
|
||||
env["CI"] = "true" # Nicht-interaktiver Modus
|
||||
|
||||
result = subprocess.run(
|
||||
["npm", "test", "--", "--json", "--passWithNoTests"],
|
||||
cwd=str(base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300,
|
||||
env=env,
|
||||
)
|
||||
|
||||
output = result.stdout + result.stderr
|
||||
passed = 0
|
||||
failed = 0
|
||||
failed_test_ids = []
|
||||
|
||||
# Versuche JSON-Output zu parsen
|
||||
try:
|
||||
# Jest JSON beginnt mit {"num... - finde den Start
|
||||
json_start = output.find('{"num')
|
||||
if json_start == -1:
|
||||
json_start = output.rfind('{"')
|
||||
|
||||
if json_start != -1:
|
||||
json_str = output[json_start:]
|
||||
# Versuche JSON zu parsen mit json.JSONDecoder
|
||||
decoder = json.JSONDecoder()
|
||||
try:
|
||||
jest_result, _ = decoder.raw_decode(json_str)
|
||||
passed = jest_result.get("numPassedTests", 0)
|
||||
failed = jest_result.get("numFailedTests", 0)
|
||||
|
||||
# Extrahiere fehlgeschlagene Tests
|
||||
for test_result in jest_result.get("testResults", []):
|
||||
for assertion in test_result.get("assertionResults", []):
|
||||
if assertion.get("status") == "failed":
|
||||
failed_test_ids.append({
|
||||
"id": f"{test_result.get('name', '')}::{assertion.get('fullName', '')}",
|
||||
"name": assertion.get("fullName", ""),
|
||||
"file_path": test_result.get("name", ""),
|
||||
"error_message": " ".join(assertion.get("failureMessages", []))[:500],
|
||||
"error_type": "assertion",
|
||||
"suggestion": "Pruefe die Test-Assertions und erwarteten Werte",
|
||||
})
|
||||
except json.JSONDecodeError:
|
||||
# Fallback: Parse Text-Output mit Regex
|
||||
for line in output.split("\n"):
|
||||
if "passed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+passed", line)
|
||||
if match:
|
||||
passed = int(match.group(1))
|
||||
if "failed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+failed", line)
|
||||
if match:
|
||||
failed = int(match.group(1))
|
||||
except Exception:
|
||||
# Allgemeiner Fallback: Parse Text-Output
|
||||
for line in output.split("\n"):
|
||||
if "passed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+passed", line)
|
||||
if match:
|
||||
passed = int(match.group(1))
|
||||
if "failed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+failed", line)
|
||||
if match:
|
||||
failed = int(match.group(1))
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": passed + failed,
|
||||
"output": output[:5000],
|
||||
"failed_test_ids": failed_test_ids,
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 5 Minuten", "failed_test_ids": []}
|
||||
except Exception as e:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
|
||||
101
backend/api/tests/registry/executors/playwright_executor.py
Normal file
101
backend/api/tests/registry/executors/playwright_executor.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""
|
||||
Playwright Test Executor
|
||||
|
||||
Executes Playwright E2E tests.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ..config import get_running_tests
|
||||
|
||||
|
||||
async def run_playwright_tests(base_path: Path, service_id: str = "") -> Dict:
|
||||
"""Fuehrt Playwright E2E-Tests aus"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
if not base_path.exists():
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Starte Playwright...",
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
try:
|
||||
env = os.environ.copy()
|
||||
env["CI"] = "true"
|
||||
|
||||
result = subprocess.run(
|
||||
["npx", "playwright", "test", "--reporter=json"],
|
||||
cwd=str(base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=600, # E2E Tests brauchen laenger
|
||||
env=env,
|
||||
)
|
||||
|
||||
output = result.stdout + result.stderr
|
||||
passed = 0
|
||||
failed = 0
|
||||
failed_test_ids = []
|
||||
|
||||
# Parse Playwright JSON Output
|
||||
try:
|
||||
pw_result = json.loads(output)
|
||||
for suite in pw_result.get("suites", []):
|
||||
for spec in suite.get("specs", []):
|
||||
for test in spec.get("tests", []):
|
||||
for result_item in test.get("results", []):
|
||||
if result_item.get("status") == "passed":
|
||||
passed += 1
|
||||
elif result_item.get("status") == "failed":
|
||||
failed += 1
|
||||
failed_test_ids.append({
|
||||
"id": spec.get("title", ""),
|
||||
"name": spec.get("title", ""),
|
||||
"file_path": spec.get("file", ""),
|
||||
"error_message": result_item.get("error", {}).get("message", "")[:500],
|
||||
"error_type": "e2e",
|
||||
"suggestion": "Pruefe den E2E-Test und die Anwendung",
|
||||
})
|
||||
except json.JSONDecodeError:
|
||||
# Fallback: Parse Text-Output
|
||||
for line in output.split("\n"):
|
||||
if "passed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+passed", line)
|
||||
if match:
|
||||
passed = int(match.group(1))
|
||||
if "failed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+failed", line)
|
||||
if match:
|
||||
failed = int(match.group(1))
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": passed + failed,
|
||||
"output": output[:5000],
|
||||
"failed_test_ids": failed_test_ids,
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 10 Minuten", "failed_test_ids": []}
|
||||
except Exception as e:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
|
||||
187
backend/api/tests/registry/executors/python_executor.py
Normal file
187
backend/api/tests/registry/executors/python_executor.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""
|
||||
Python Test Executor
|
||||
|
||||
Executes pytest tests with live progress updates.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ..config import get_running_tests
|
||||
from ..services.error_handling import extract_pytest_error, classify_pytest_error, suggest_pytest_fix
|
||||
|
||||
|
||||
async def run_python_tests(base_path: Path, service_id: str = "") -> Dict:
|
||||
"""Fuehrt Python-Tests aus mit Live-Progress-Updates (Thread-basiert)"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
if not base_path.exists():
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
|
||||
|
||||
# Versuche verschiedene pytest-Pfade
|
||||
pytest_paths = [
|
||||
"/opt/venv/bin/pytest", # Docker venv
|
||||
"pytest", # System pytest
|
||||
"python -m pytest", # Als Modul
|
||||
]
|
||||
|
||||
pytest_cmd = None
|
||||
for path in pytest_paths:
|
||||
try:
|
||||
check = subprocess.run(
|
||||
path.split() + ["--version"],
|
||||
capture_output=True,
|
||||
timeout=5,
|
||||
)
|
||||
if check.returncode == 0:
|
||||
pytest_cmd = path.split()
|
||||
break
|
||||
except:
|
||||
continue
|
||||
|
||||
if not pytest_cmd:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "pytest nicht gefunden", "failed_test_ids": []}
|
||||
|
||||
# Erst alle Test-Dateien zaehlen
|
||||
test_files = []
|
||||
test_dir = base_path if base_path.is_dir() else base_path.parent
|
||||
for f in test_dir.rglob("test_*.py"):
|
||||
test_files.append(f.name)
|
||||
total_files = len(test_files) if test_files else 1
|
||||
|
||||
# Initialer Progress-Status
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Starte Tests...",
|
||||
"files_done": 0,
|
||||
"files_total": total_files,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
# Ergebnis-Container
|
||||
result_container = {
|
||||
"output_lines": [],
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"files_seen": set(),
|
||||
"current_file": "",
|
||||
"done": False,
|
||||
"error": None
|
||||
}
|
||||
|
||||
def run_pytest_with_progress():
|
||||
"""Laeuft in separatem Thread - blockiert nicht den Event Loop"""
|
||||
try:
|
||||
cwd = str(base_path.parent) if base_path.is_file() else str(base_path)
|
||||
|
||||
# Unbuffered output fuer Echtzeit-Fortschritt
|
||||
env = os.environ.copy()
|
||||
env["PYTHONUNBUFFERED"] = "1"
|
||||
|
||||
process = subprocess.Popen(
|
||||
pytest_cmd + ["-v", "-s", "--tb=short", str(base_path)],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
bufsize=1,
|
||||
env=env,
|
||||
)
|
||||
|
||||
for line in iter(process.stdout.readline, ''):
|
||||
if not line:
|
||||
break
|
||||
|
||||
result_container["output_lines"].append(line)
|
||||
line_stripped = line.strip()
|
||||
|
||||
# Parse Test-Ergebnisse
|
||||
match = re.match(r'(\S+\.py)::(\S+)\s+(PASSED|FAILED|SKIPPED|ERROR)', line_stripped)
|
||||
if match:
|
||||
file_path = match.group(1)
|
||||
status = match.group(3)
|
||||
|
||||
file_name = Path(file_path).name
|
||||
if file_name not in result_container["files_seen"]:
|
||||
result_container["files_seen"].add(file_name)
|
||||
result_container["current_file"] = file_name
|
||||
|
||||
if status == "PASSED":
|
||||
result_container["passed"] += 1
|
||||
elif status in ("FAILED", "ERROR"):
|
||||
result_container["failed"] += 1
|
||||
|
||||
# Progress aktualisieren
|
||||
running_tests[service_id] = {
|
||||
"current_file": result_container["current_file"],
|
||||
"files_done": len(result_container["files_seen"]),
|
||||
"files_total": max(total_files, len(result_container["files_seen"])),
|
||||
"passed": result_container["passed"],
|
||||
"failed": result_container["failed"],
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
process.wait()
|
||||
result_container["done"] = True
|
||||
|
||||
except Exception as e:
|
||||
result_container["error"] = str(e)
|
||||
result_container["done"] = True
|
||||
|
||||
# Starte Test-Ausführung in separatem Thread
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, run_pytest_with_progress)
|
||||
|
||||
full_output = "".join(result_container["output_lines"])
|
||||
passed = result_container["passed"]
|
||||
failed = result_container["failed"]
|
||||
files_seen = result_container["files_seen"]
|
||||
|
||||
if result_container["error"]:
|
||||
running_tests[service_id] = {
|
||||
"current_file": result_container["error"],
|
||||
"files_done": 0,
|
||||
"files_total": total_files,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "error"
|
||||
}
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": result_container["error"], "failed_test_ids": []}
|
||||
|
||||
# Parse fehlgeschlagene Tests aus Output
|
||||
failed_test_ids = []
|
||||
for match in re.finditer(r'FAILED\s+(\S+)::(\S+)', full_output):
|
||||
file_path = match.group(1)
|
||||
test_name = match.group(2)
|
||||
error_msg = extract_pytest_error(full_output, test_name)
|
||||
failed_test_ids.append({
|
||||
"id": f"{file_path}::{test_name}",
|
||||
"name": test_name,
|
||||
"file_path": file_path,
|
||||
"error_message": error_msg or "Test fehlgeschlagen - keine Details",
|
||||
"error_type": classify_pytest_error(error_msg),
|
||||
"suggestion": suggest_pytest_fix(error_msg),
|
||||
})
|
||||
|
||||
# Finaler Status
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": len(files_seen),
|
||||
"files_total": len(files_seen),
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": passed + failed,
|
||||
"output": full_output[:5000],
|
||||
"failed_test_ids": failed_test_ids,
|
||||
}
|
||||
192
backend/api/tests/registry/executors/test_runner.py
Normal file
192
backend/api/tests/registry/executors/test_runner.py
Normal file
@@ -0,0 +1,192 @@
|
||||
"""
|
||||
Test Runner
|
||||
|
||||
Orchestrates test execution and persists results.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Dict
|
||||
|
||||
from ...models import TestRun, RunStatus, TestFramework
|
||||
from ..config import (
|
||||
PROJECT_ROOT,
|
||||
get_test_runs,
|
||||
get_current_runs,
|
||||
get_persisted_results,
|
||||
save_persisted_results,
|
||||
is_postgres_available,
|
||||
)
|
||||
from .go_executor import run_go_tests
|
||||
from .python_executor import run_python_tests
|
||||
from .bqas_executor import run_bqas_tests
|
||||
from .jest_executor import run_jest_tests
|
||||
from .playwright_executor import run_playwright_tests
|
||||
from .container_executor import run_tests_in_container
|
||||
|
||||
|
||||
async def execute_test_run(run_id: str, service_def: Dict):
|
||||
"""Fuehrt einen Test-Run im Hintergrund aus"""
|
||||
test_runs = get_test_runs()
|
||||
current_runs = get_current_runs()
|
||||
persisted_results = get_persisted_results()
|
||||
|
||||
framework = service_def["framework"]
|
||||
service_id = service_def["service"]
|
||||
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
|
||||
|
||||
# Pruefe ob Service deaktiviert ist
|
||||
if service_def.get("disabled", False):
|
||||
reason = service_def.get("disabled_reason", "Service deaktiviert")
|
||||
run = TestRun(
|
||||
id=run_id,
|
||||
suite_id=service_id,
|
||||
service=service_id,
|
||||
started_at=datetime.now(),
|
||||
completed_at=datetime.now(),
|
||||
status=RunStatus.COMPLETED,
|
||||
output=f"Service deaktiviert: {reason}",
|
||||
)
|
||||
current_runs[run_id] = run
|
||||
test_runs.append({
|
||||
"id": run.id,
|
||||
"suite_id": run.suite_id,
|
||||
"service": run.service,
|
||||
"started_at": run.started_at.isoformat(),
|
||||
"completed_at": run.completed_at.isoformat(),
|
||||
"status": run.status.value,
|
||||
"total_tests": 0,
|
||||
"passed_tests": 0,
|
||||
"failed_tests": 0,
|
||||
"failed_test_ids": [],
|
||||
"duration_seconds": 0,
|
||||
})
|
||||
return
|
||||
|
||||
# Pruefe ob Tests in einem anderen Container laufen sollen
|
||||
run_in_container = service_def.get("run_in_container", False)
|
||||
container_name = service_def.get("container_name", "")
|
||||
|
||||
run = TestRun(
|
||||
id=run_id,
|
||||
suite_id=service_id,
|
||||
service=service_id,
|
||||
started_at=datetime.now(),
|
||||
status=RunStatus.RUNNING,
|
||||
)
|
||||
current_runs[run_id] = run
|
||||
|
||||
try:
|
||||
# Echte Test-Ausführung basierend auf Framework
|
||||
if run_in_container and container_name:
|
||||
# Tests im externen Container ausfuehren
|
||||
framework_str = "pytest" if framework == TestFramework.PYTEST else "go"
|
||||
container_base_path = service_def.get("base_path", "/app/tests")
|
||||
pytest_args = service_def.get("pytest_args", "")
|
||||
result = await run_tests_in_container(container_name, framework_str, container_base_path, service_id, pytest_args)
|
||||
elif framework == TestFramework.GO_TEST:
|
||||
result = await run_go_tests(base_path, service_id=service_id)
|
||||
elif framework == TestFramework.PYTEST:
|
||||
result = await run_python_tests(base_path, service_id=service_id)
|
||||
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
result = await run_bqas_tests(service_def)
|
||||
elif framework == TestFramework.JEST:
|
||||
result = await run_jest_tests(base_path, service_id=service_id)
|
||||
elif framework == TestFramework.PLAYWRIGHT:
|
||||
result = await run_playwright_tests(base_path, service_id=service_id)
|
||||
else:
|
||||
result = {"passed": 0, "failed": 0, "total": 0, "output": "Framework nicht unterstuetzt"}
|
||||
|
||||
run.completed_at = datetime.now()
|
||||
run.status = RunStatus.COMPLETED if result.get("failed", 0) == 0 else RunStatus.FAILED
|
||||
run.total_tests = result.get("total", 0)
|
||||
run.passed_tests = result.get("passed", 0)
|
||||
run.failed_tests = result.get("failed", 0)
|
||||
run.failed_test_ids = result.get("failed_test_ids", [])
|
||||
run.duration_seconds = (run.completed_at - run.started_at).total_seconds()
|
||||
run.output = result.get("output", "")
|
||||
|
||||
except Exception as e:
|
||||
run.completed_at = datetime.now()
|
||||
run.status = RunStatus.FAILED
|
||||
run.output = str(e)
|
||||
|
||||
# In Historie speichern (In-Memory)
|
||||
test_runs.append({
|
||||
"id": run.id,
|
||||
"suite_id": run.suite_id,
|
||||
"service": run.service,
|
||||
"started_at": run.started_at.isoformat(),
|
||||
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
|
||||
"status": run.status.value,
|
||||
"total_tests": run.total_tests,
|
||||
"passed_tests": run.passed_tests,
|
||||
"failed_tests": run.failed_tests,
|
||||
"failed_test_ids": run.failed_test_ids,
|
||||
"duration_seconds": run.duration_seconds,
|
||||
})
|
||||
|
||||
# Persistiere Ergebnisse (Legacy In-Memory Dict)
|
||||
persisted_results[service_id] = {
|
||||
"total": run.total_tests,
|
||||
"passed": run.passed_tests,
|
||||
"failed": run.failed_tests,
|
||||
"failed_test_ids": run.failed_test_ids,
|
||||
"last_run": run.completed_at.isoformat() if run.completed_at else datetime.now().isoformat(),
|
||||
"status": run.status.value,
|
||||
}
|
||||
save_persisted_results()
|
||||
|
||||
# PostgreSQL-Persistierung
|
||||
if is_postgres_available():
|
||||
try:
|
||||
from ...database import get_db_session
|
||||
from ...repository import TestRepository
|
||||
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
|
||||
# Run erstellen falls noch nicht vorhanden
|
||||
db_run = repo.get_run(run.id)
|
||||
if not db_run:
|
||||
db_run = repo.create_run(
|
||||
run_id=run.id,
|
||||
service=service_id,
|
||||
framework=framework.value,
|
||||
triggered_by="manual"
|
||||
)
|
||||
|
||||
# Run abschliessen
|
||||
repo.complete_run(
|
||||
run_id=run.id,
|
||||
status=run.status.value,
|
||||
total_tests=run.total_tests,
|
||||
passed_tests=run.passed_tests,
|
||||
failed_tests=run.failed_tests,
|
||||
skipped_tests=0,
|
||||
duration_seconds=run.duration_seconds,
|
||||
output=run.output
|
||||
)
|
||||
|
||||
# Einzelne Test-Ergebnisse speichern (fehlgeschlagene Tests)
|
||||
if run.failed_test_ids:
|
||||
results_to_add = []
|
||||
for failed in run.failed_test_ids:
|
||||
if isinstance(failed, dict):
|
||||
results_to_add.append({
|
||||
"name": failed.get("name") or failed.get("id", "unknown"),
|
||||
"file_path": failed.get("file_path"),
|
||||
"status": "failed",
|
||||
"error_message": failed.get("error_message"),
|
||||
"error_type": failed.get("error_type"),
|
||||
"suggestion": failed.get("suggestion")
|
||||
})
|
||||
elif isinstance(failed, str):
|
||||
results_to_add.append({
|
||||
"name": failed,
|
||||
"status": "failed"
|
||||
})
|
||||
if results_to_add:
|
||||
repo.add_results(run.id, results_to_add)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Fehler beim PostgreSQL-Speichern: {e}")
|
||||
21
backend/api/tests/registry/routes/__init__.py
Normal file
21
backend/api/tests/registry/routes/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""
|
||||
Test Registry Routes Module
|
||||
|
||||
All API endpoints for the test registry.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter
|
||||
|
||||
from .tests import router as tests_router
|
||||
from .backlog import router as backlog_router
|
||||
from .ci import router as ci_router
|
||||
|
||||
# Create main router
|
||||
router = APIRouter(prefix="/api/tests", tags=["Test Registry"])
|
||||
|
||||
# Include sub-routers
|
||||
router.include_router(tests_router)
|
||||
router.include_router(backlog_router)
|
||||
router.include_router(ci_router)
|
||||
|
||||
__all__ = ["router"]
|
||||
580
backend/api/tests/registry/routes/backlog.py
Normal file
580
backend/api/tests/registry/routes/backlog.py
Normal file
@@ -0,0 +1,580 @@
|
||||
"""
|
||||
Test Registry - Backlog Endpoints
|
||||
|
||||
Endpoints for failed test backlog management.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
|
||||
from ...database import get_db_session
|
||||
from ...repository import TestRepository
|
||||
from ..api_models import (
|
||||
BacklogStatusUpdate,
|
||||
BacklogPriorityUpdate,
|
||||
FixAttempt,
|
||||
ManualBacklogEntry,
|
||||
)
|
||||
from ..config import (
|
||||
get_test_runs,
|
||||
get_persisted_results,
|
||||
is_postgres_available,
|
||||
migrate_json_to_postgres,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/failed")
|
||||
async def get_failed_tests():
|
||||
"""
|
||||
Gibt alle fehlgeschlagenen Tests aus den persistierten Ergebnissen zurueck.
|
||||
Fuer Backlog-Verwaltung mit menschenverstaendlichen Fehlerbeschreibungen.
|
||||
"""
|
||||
persisted_results = get_persisted_results()
|
||||
failed_tests = []
|
||||
|
||||
# Sammle fehlgeschlagene Tests aus persistierten Ergebnissen
|
||||
for service, data in persisted_results.items():
|
||||
run_time = data.get("last_run", "")
|
||||
run_id = f"persisted_{service}"
|
||||
|
||||
# Hole fehlgeschlagene Test-IDs
|
||||
for failed in data.get("failed_test_ids", []):
|
||||
if isinstance(failed, dict):
|
||||
failed_tests.append({
|
||||
"id": failed.get("id", ""),
|
||||
"name": failed.get("name", ""),
|
||||
"service": service,
|
||||
"file_path": failed.get("file_path", ""),
|
||||
"line_number": failed.get("line_number"),
|
||||
"error_message": failed.get("error_message", "Keine Fehlermeldung verfuegbar"),
|
||||
"error_type": failed.get("error_type", "unknown"),
|
||||
"suggestion": failed.get("suggestion", ""),
|
||||
"run_id": run_id,
|
||||
"last_failed": run_time,
|
||||
"status": "open", # open, in_progress, fixed
|
||||
})
|
||||
elif isinstance(failed, str):
|
||||
# Legacy-Format: nur Test-ID als String
|
||||
failed_tests.append({
|
||||
"id": failed,
|
||||
"name": failed,
|
||||
"service": service,
|
||||
"file_path": "",
|
||||
"line_number": None,
|
||||
"error_message": "Keine Details verfuegbar",
|
||||
"error_type": "unknown",
|
||||
"suggestion": "",
|
||||
"run_id": run_id,
|
||||
"last_failed": run_time,
|
||||
"status": "open",
|
||||
})
|
||||
|
||||
# Dedupliziere nach Test-ID (nur neueste Version behalten)
|
||||
seen = {}
|
||||
for test in failed_tests:
|
||||
test_id = test["id"]
|
||||
if test_id not in seen or test["last_failed"] > seen[test_id]["last_failed"]:
|
||||
seen[test_id] = test
|
||||
|
||||
unique_failed = list(seen.values())
|
||||
|
||||
# Gruppiere nach Service
|
||||
by_service = {}
|
||||
for test in unique_failed:
|
||||
service = test["service"]
|
||||
if service not in by_service:
|
||||
by_service[service] = []
|
||||
by_service[service].append(test)
|
||||
|
||||
return {
|
||||
"total_failed": len(unique_failed),
|
||||
"by_service": by_service,
|
||||
"tests": unique_failed,
|
||||
"last_updated": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
|
||||
@router.post("/failed/{test_id}/status")
|
||||
async def update_failed_test_status(test_id: str, status: str):
|
||||
"""
|
||||
Aktualisiert den Status eines fehlgeschlagenen Tests.
|
||||
Status: 'open', 'in_progress', 'fixed', 'wont_fix'
|
||||
|
||||
Legacy-Endpoint - nutzt nun PostgreSQL wenn verfuegbar.
|
||||
"""
|
||||
valid_statuses = ["open", "in_progress", "fixed", "wont_fix", "flaky"]
|
||||
if status not in valid_statuses:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Ungueltiger Status. Erlaubt: {', '.join(valid_statuses)}"
|
||||
)
|
||||
|
||||
# Versuche in PostgreSQL zu speichern
|
||||
if is_postgres_available():
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
# Suche nach Backlog-Item mit test_id
|
||||
backlog_items = repo.get_backlog()
|
||||
for item in backlog_items:
|
||||
if item.test_name == test_id or str(item.id) == test_id:
|
||||
repo.update_backlog_status(item.id, status)
|
||||
return {
|
||||
"test_id": test_id,
|
||||
"backlog_id": item.id,
|
||||
"status": status,
|
||||
"updated_at": datetime.now().isoformat(),
|
||||
"message": f"Test-Status auf '{status}' gesetzt (PostgreSQL)",
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"PostgreSQL-Fehler: {e}")
|
||||
|
||||
# Fallback: nur Bestaetigung zurueckgeben
|
||||
return {
|
||||
"test_id": test_id,
|
||||
"status": status,
|
||||
"updated_at": datetime.now().isoformat(),
|
||||
"message": f"Test-Status auf '{status}' gesetzt",
|
||||
}
|
||||
|
||||
|
||||
@router.get("/backlog")
|
||||
async def get_backlog(
|
||||
status: Optional[str] = Query(None, description="Filter nach Status: open, in_progress, fixed, wont_fix, flaky"),
|
||||
service: Optional[str] = Query(None, description="Filter nach Service"),
|
||||
priority: Optional[str] = Query(None, description="Filter nach Prioritaet: critical, high, medium, low"),
|
||||
limit: int = Query(100, ge=1, le=500),
|
||||
offset: int = Query(0, ge=0)
|
||||
):
|
||||
"""
|
||||
Gibt den persistenten Backlog fehlgeschlagener Tests zurueck.
|
||||
|
||||
Der Backlog aggregiert fehlgeschlagene Tests ueber mehrere Runs hinweg
|
||||
und ermoeglicht Status-Management (open -> in_progress -> fixed).
|
||||
"""
|
||||
if not is_postgres_available():
|
||||
# Fallback auf legacy /failed Endpoint
|
||||
return await get_failed_tests()
|
||||
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
items = repo.get_backlog(
|
||||
status=status,
|
||||
service=service,
|
||||
priority=priority,
|
||||
limit=limit,
|
||||
offset=offset
|
||||
)
|
||||
total = repo.get_backlog_count(status=status, service=service)
|
||||
|
||||
# Gruppiere nach Service
|
||||
by_service = {}
|
||||
for item in items:
|
||||
svc = item.service
|
||||
if svc not in by_service:
|
||||
by_service[svc] = []
|
||||
by_service[svc].append(item.to_dict())
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"items": [item.to_dict() for item in items],
|
||||
"by_service": by_service,
|
||||
"filters": {
|
||||
"status": status,
|
||||
"service": service,
|
||||
"priority": priority
|
||||
},
|
||||
"pagination": {
|
||||
"limit": limit,
|
||||
"offset": offset
|
||||
}
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/backlog/{backlog_id}")
|
||||
async def get_backlog_item(backlog_id: int):
|
||||
"""
|
||||
Gibt Details zu einem einzelnen Backlog-Eintrag zurueck.
|
||||
Inklusive Fix-Historie.
|
||||
"""
|
||||
if not is_postgres_available():
|
||||
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
|
||||
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
item = repo.get_backlog_item(backlog_id)
|
||||
if not item:
|
||||
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
|
||||
|
||||
# Hole Fix-Historie
|
||||
fixes = repo.get_fix_history(backlog_id)
|
||||
|
||||
result = item.to_dict()
|
||||
result["fixes"] = [fix.to_dict() for fix in fixes]
|
||||
return result
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/backlog/{backlog_id}/status")
|
||||
async def update_backlog_item_status(backlog_id: int, update: BacklogStatusUpdate):
|
||||
"""
|
||||
Aktualisiert den Status eines Backlog-Eintrags.
|
||||
|
||||
Moegliche Status:
|
||||
- open: Noch nicht bearbeitet
|
||||
- in_progress: Wird gerade bearbeitet
|
||||
- fixed: Test wurde gefixt
|
||||
- wont_fix: Wird nicht gefixt (mit Begruendung)
|
||||
- flaky: Flaky Test, wird separat behandelt
|
||||
"""
|
||||
valid_statuses = ["open", "in_progress", "fixed", "wont_fix", "flaky"]
|
||||
if update.status not in valid_statuses:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Ungueltiger Status. Erlaubt: {', '.join(valid_statuses)}"
|
||||
)
|
||||
|
||||
if not is_postgres_available():
|
||||
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
|
||||
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
item = repo.update_backlog_status(
|
||||
backlog_id=backlog_id,
|
||||
status=update.status,
|
||||
notes=update.notes,
|
||||
assigned_to=update.assigned_to
|
||||
)
|
||||
if not item:
|
||||
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
|
||||
|
||||
return item.to_dict()
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/backlog/{backlog_id}/priority")
|
||||
async def update_backlog_item_priority(backlog_id: int, update: BacklogPriorityUpdate):
|
||||
"""
|
||||
Aktualisiert die Prioritaet eines Backlog-Eintrags.
|
||||
|
||||
Moegliche Prioritaeten:
|
||||
- critical: Kritisch - sofort beheben
|
||||
- high: Hoch - bald beheben
|
||||
- medium: Mittel - bei Gelegenheit
|
||||
- low: Niedrig - irgendwann
|
||||
"""
|
||||
valid_priorities = ["critical", "high", "medium", "low"]
|
||||
if update.priority not in valid_priorities:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Ungueltige Prioritaet. Erlaubt: {', '.join(valid_priorities)}"
|
||||
)
|
||||
|
||||
if not is_postgres_available():
|
||||
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
|
||||
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
item = repo.update_backlog_priority(backlog_id, update.priority)
|
||||
if not item:
|
||||
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
|
||||
|
||||
return item.to_dict()
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/backlog/{backlog_id}/fix")
|
||||
async def add_fix_attempt(backlog_id: int, fix: FixAttempt):
|
||||
"""
|
||||
Fuegt einen Fix-Versuch zur Historie hinzu.
|
||||
|
||||
Bei success=True wird der Backlog-Status automatisch auf 'fixed' gesetzt.
|
||||
"""
|
||||
if not is_postgres_available():
|
||||
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
|
||||
|
||||
valid_fix_types = ["manual", "auto_claude", "auto_script"]
|
||||
if fix.fix_type not in valid_fix_types:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Ungueltiger Fix-Typ. Erlaubt: {', '.join(valid_fix_types)}"
|
||||
)
|
||||
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
|
||||
# Pruefe ob Backlog-Item existiert
|
||||
item = repo.get_backlog_item(backlog_id)
|
||||
if not item:
|
||||
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
|
||||
|
||||
# Fix-Versuch hinzufuegen
|
||||
fix_record = repo.add_fix_attempt(
|
||||
backlog_id=backlog_id,
|
||||
fix_type=fix.fix_type,
|
||||
fix_description=fix.fix_description,
|
||||
commit_hash=fix.commit_hash,
|
||||
success=fix.success
|
||||
)
|
||||
|
||||
return {
|
||||
"fix": fix_record.to_dict(),
|
||||
"backlog_status": "fixed" if fix.success else item.status
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/backlog")
|
||||
async def create_backlog_entry(entry: ManualBacklogEntry):
|
||||
"""
|
||||
Erstellt einen manuellen Backlog-Eintrag.
|
||||
|
||||
Nuetzlich fuer:
|
||||
- Nicht-integrierte Features (xfail Tests)
|
||||
- Bekannte Probleme die noch behoben werden muessen
|
||||
- Feature Requests aus dem Test-Kontext
|
||||
"""
|
||||
from ...db_models import FailedTestBacklogDB
|
||||
|
||||
if not is_postgres_available():
|
||||
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
|
||||
|
||||
valid_priorities = ["critical", "high", "medium", "low"]
|
||||
if entry.priority not in valid_priorities:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Ungueltige Prioritaet. Erlaubt: {', '.join(valid_priorities)}"
|
||||
)
|
||||
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
now = datetime.utcnow()
|
||||
|
||||
# Pruefe ob schon ein offener Eintrag existiert
|
||||
existing = db.query(FailedTestBacklogDB).filter(
|
||||
FailedTestBacklogDB.test_name == entry.test_name,
|
||||
FailedTestBacklogDB.service == entry.service,
|
||||
FailedTestBacklogDB.status == "open"
|
||||
).first()
|
||||
|
||||
if existing:
|
||||
# Aktualisiere existierenden Eintrag
|
||||
existing.error_message = entry.error_message
|
||||
existing.priority = entry.priority
|
||||
existing.fix_suggestion = entry.fix_suggestion
|
||||
existing.last_failed_at = now
|
||||
db.commit()
|
||||
return {
|
||||
"id": existing.id,
|
||||
"status": "updated",
|
||||
"message": f"Existierender Backlog-Eintrag aktualisiert"
|
||||
}
|
||||
|
||||
# Neuen Eintrag erstellen
|
||||
backlog = FailedTestBacklogDB(
|
||||
test_name=entry.test_name,
|
||||
test_file=f"{entry.service}/",
|
||||
service=entry.service,
|
||||
framework="manual",
|
||||
error_message=entry.error_message,
|
||||
error_type="feature_not_integrated",
|
||||
status="open",
|
||||
priority=entry.priority,
|
||||
fix_suggestion=entry.fix_suggestion,
|
||||
first_failed_at=now,
|
||||
last_failed_at=now,
|
||||
failure_count=1
|
||||
)
|
||||
db.add(backlog)
|
||||
db.commit()
|
||||
db.refresh(backlog)
|
||||
|
||||
return {
|
||||
"id": backlog.id,
|
||||
"status": "created",
|
||||
"message": f"Backlog-Eintrag erstellt: {entry.test_name}"
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/history")
|
||||
async def get_test_history(
|
||||
service: Optional[str] = Query(None, description="Filter nach Service"),
|
||||
days: int = Query(30, ge=1, le=365, description="Anzahl Tage zurueck"),
|
||||
limit: int = Query(100, ge=1, le=1000)
|
||||
):
|
||||
"""
|
||||
Gibt die Test-Run Historie fuer Trend-Analysen zurueck.
|
||||
|
||||
Aggregiert Daten nach Tag und Service.
|
||||
"""
|
||||
test_runs = get_test_runs()
|
||||
|
||||
if not is_postgres_available():
|
||||
# Fallback auf In-Memory Historie
|
||||
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
|
||||
return {"runs": sorted_runs[:limit], "source": "memory"}
|
||||
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
history = repo.get_run_history(
|
||||
service=service,
|
||||
days=days,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
return {
|
||||
"history": history,
|
||||
"days": days,
|
||||
"service": service,
|
||||
"source": "postgresql"
|
||||
}
|
||||
except Exception as e:
|
||||
# Fallback auf In-Memory
|
||||
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
|
||||
return {"runs": sorted_runs[:limit], "source": "memory", "error": str(e)}
|
||||
|
||||
|
||||
@router.get("/trends")
|
||||
async def get_test_trends(
|
||||
service: Optional[str] = Query(None, description="Filter nach Service"),
|
||||
days: int = Query(14, ge=1, le=90, description="Anzahl Tage")
|
||||
):
|
||||
"""
|
||||
Gibt Trend-Daten fuer Visualisierungen zurueck.
|
||||
|
||||
Zeigt Pass-Rate und Test-Anzahl ueber Zeit.
|
||||
"""
|
||||
if not is_postgres_available():
|
||||
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar fuer Trends")
|
||||
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
history = repo.get_run_history(service=service, days=days, limit=days * 20)
|
||||
|
||||
# Aggregiere nach Tag
|
||||
by_date = {}
|
||||
for entry in history:
|
||||
date = entry["date"]
|
||||
if date not in by_date:
|
||||
by_date[date] = {
|
||||
"date": date,
|
||||
"total_tests": 0,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"runs": 0
|
||||
}
|
||||
by_date[date]["total_tests"] += entry["total_tests"]
|
||||
by_date[date]["passed"] += entry["passed"]
|
||||
by_date[date]["failed"] += entry["failed"]
|
||||
by_date[date]["runs"] += entry["runs"]
|
||||
|
||||
# Berechne Pass-Rate pro Tag
|
||||
trends = []
|
||||
for date, data in sorted(by_date.items()):
|
||||
total = data["total_tests"]
|
||||
data["pass_rate"] = round((data["passed"] / total * 100) if total > 0 else 0, 1)
|
||||
trends.append(data)
|
||||
|
||||
return {
|
||||
"trends": trends,
|
||||
"days": days,
|
||||
"service": service
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/stats")
|
||||
async def get_aggregated_stats():
|
||||
"""
|
||||
Gibt aggregierte Statistiken ueber alle Services zurueck.
|
||||
|
||||
Kombiniert Daten aus PostgreSQL und Service-Definitionen.
|
||||
"""
|
||||
from ...models import TestRegistryStats
|
||||
|
||||
persisted_results = get_persisted_results()
|
||||
|
||||
if is_postgres_available():
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
summary = repo.get_summary_stats()
|
||||
service_stats = repo.get_all_service_stats()
|
||||
|
||||
return {
|
||||
"summary": summary,
|
||||
"services": [s.to_dict() for s in service_stats],
|
||||
"source": "postgresql"
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"PostgreSQL-Fehler: {e}")
|
||||
|
||||
# Fallback auf Legacy-Daten
|
||||
stats = TestRegistryStats()
|
||||
for service, data in persisted_results.items():
|
||||
stats.total_tests += data.get("total", 0)
|
||||
stats.total_passed += data.get("passed", 0)
|
||||
stats.total_failed += data.get("failed", 0)
|
||||
|
||||
stats.services_count = len(persisted_results)
|
||||
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0
|
||||
|
||||
return {
|
||||
"summary": {
|
||||
"total_tests": stats.total_tests,
|
||||
"total_passed": stats.total_passed,
|
||||
"total_failed": stats.total_failed,
|
||||
"total_skipped": stats.total_skipped,
|
||||
"services_count": stats.services_count,
|
||||
"overall_pass_rate": round(stats.overall_pass_rate, 1)
|
||||
},
|
||||
"services": list(persisted_results.keys()),
|
||||
"source": "memory"
|
||||
}
|
||||
|
||||
|
||||
@router.post("/migrate")
|
||||
async def trigger_migration():
|
||||
"""
|
||||
Migriert bestehende JSON-Daten nach PostgreSQL.
|
||||
|
||||
Einmalig ausfuehren um historische Daten zu uebernehmen.
|
||||
"""
|
||||
if not is_postgres_available():
|
||||
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
|
||||
|
||||
count = migrate_json_to_postgres()
|
||||
return {
|
||||
"migrated_services": count,
|
||||
"message": f"{count} Services von JSON nach PostgreSQL migriert"
|
||||
}
|
||||
295
backend/api/tests/registry/routes/ci.py
Normal file
295
backend/api/tests/registry/routes/ci.py
Normal file
@@ -0,0 +1,295 @@
|
||||
"""
|
||||
Test Registry - CI/CD Integration Endpoints
|
||||
|
||||
Endpoints for receiving results from CI/CD pipelines.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Dict
|
||||
|
||||
from fastapi import APIRouter, BackgroundTasks
|
||||
|
||||
from ...database import get_db_session
|
||||
from ...repository import TestRepository
|
||||
from ..api_models import CIResultRequest
|
||||
from ..config import (
|
||||
get_test_runs,
|
||||
get_persisted_results,
|
||||
is_postgres_available,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/ci-result")
|
||||
async def receive_ci_result(result: CIResultRequest, background_tasks: BackgroundTasks):
|
||||
"""
|
||||
Empfaengt Test-Ergebnisse von der CI/CD-Pipeline.
|
||||
|
||||
Wird vom report-test-results Step in .woodpecker/main.yml aufgerufen.
|
||||
|
||||
Flow:
|
||||
1. Pipeline fuehrt Tests aus und sammelt JSON-Ergebnisse
|
||||
2. Pipeline sendet detaillierte Ergebnisse pro Service hierher
|
||||
3. Dieser Endpoint speichert in PostgreSQL
|
||||
4. Dashboard zeigt die Daten an
|
||||
|
||||
test_results Format:
|
||||
{
|
||||
"service": "consent-service",
|
||||
"framework": "go",
|
||||
"total": 57,
|
||||
"passed": 57,
|
||||
"failed": 0,
|
||||
"skipped": 0,
|
||||
"coverage": 75.5
|
||||
}
|
||||
"""
|
||||
test_runs = get_test_runs()
|
||||
persisted_results = get_persisted_results()
|
||||
|
||||
# Extrahiere Service-spezifische Daten aus test_results
|
||||
tr = result.test_results or {}
|
||||
service_name = tr.get("service", "ci-pipeline")
|
||||
framework = tr.get("framework", "unknown")
|
||||
total = tr.get("total", 0)
|
||||
passed = tr.get("passed", 0)
|
||||
failed = tr.get("failed", 0)
|
||||
skipped = tr.get("skipped", 0)
|
||||
coverage = tr.get("coverage", 0)
|
||||
|
||||
# Log zur Debugging
|
||||
print(f"[CI-RESULT] Pipeline {result.pipeline_id} - Service: {service_name}")
|
||||
print(f"[CI-RESULT] Tests: {passed}/{total} passed, {failed} failed, {skipped} skipped")
|
||||
print(f"[CI-RESULT] Coverage: {coverage}%, Commit: {result.commit[:8]}")
|
||||
|
||||
# Speichere in PostgreSQL wenn verfuegbar
|
||||
if is_postgres_available():
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
|
||||
# Erstelle eindeutige Run-ID pro Service
|
||||
run_id = f"ci-{result.pipeline_id}-{service_name}"
|
||||
|
||||
# Erstelle Test-Run Eintrag
|
||||
run = repo.create_run(
|
||||
run_id=run_id,
|
||||
service=service_name,
|
||||
framework=framework,
|
||||
triggered_by="ci",
|
||||
git_commit=result.commit[:8] if result.commit else None,
|
||||
git_branch=result.branch
|
||||
)
|
||||
|
||||
# Markiere als abgeschlossen mit detaillierten Zahlen
|
||||
status = "passed" if failed == 0 else "failed"
|
||||
repo.complete_run(
|
||||
run_id=run_id,
|
||||
status=status,
|
||||
total_tests=total,
|
||||
passed_tests=passed,
|
||||
failed_tests=failed,
|
||||
skipped_tests=skipped,
|
||||
duration_seconds=0
|
||||
)
|
||||
print(f"[CI-RESULT] Stored as run_id: {run_id}, status: {status}")
|
||||
|
||||
# WICHTIG: Aktualisiere den In-Memory Cache fuer sofortige Frontend-Updates
|
||||
persisted_results[service_name] = {
|
||||
"total": total,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"last_run": datetime.utcnow().isoformat(),
|
||||
"status": status,
|
||||
"failed_test_ids": []
|
||||
}
|
||||
print(f"[CI-RESULT] Updated cache for {service_name}: {passed}/{total} passed")
|
||||
|
||||
# Bei fehlgeschlagenen Tests: Backlog-Eintrag erstellen
|
||||
if failed > 0:
|
||||
background_tasks.add_task(
|
||||
_create_backlog_entry,
|
||||
service_name,
|
||||
framework,
|
||||
failed,
|
||||
result.pipeline_id,
|
||||
result.commit,
|
||||
result.branch
|
||||
)
|
||||
else:
|
||||
# Alle Tests bestanden: Schließe offene Backlog-Einträge
|
||||
background_tasks.add_task(
|
||||
_close_backlog_entry,
|
||||
service_name,
|
||||
result.pipeline_id,
|
||||
result.commit
|
||||
)
|
||||
|
||||
return {
|
||||
"received": True,
|
||||
"run_id": run_id,
|
||||
"service": service_name,
|
||||
"pipeline_id": result.pipeline_id,
|
||||
"status": status,
|
||||
"tests": {"total": total, "passed": passed, "failed": failed},
|
||||
"stored_in": "postgres"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"[CI-RESULT] PostgreSQL Error: {e}")
|
||||
# Fallback auf Memory-Storage
|
||||
pass
|
||||
|
||||
# Memory-Fallback
|
||||
ci_run = {
|
||||
"id": f"ci-{result.pipeline_id}",
|
||||
"pipeline_id": result.pipeline_id,
|
||||
"commit": result.commit,
|
||||
"branch": result.branch,
|
||||
"status": result.status,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"test_results": result.test_results
|
||||
}
|
||||
test_runs.append(ci_run)
|
||||
|
||||
return {
|
||||
"received": True,
|
||||
"pipeline_id": result.pipeline_id,
|
||||
"status": result.status,
|
||||
"stored_in": "memory"
|
||||
}
|
||||
|
||||
|
||||
async def _create_backlog_entry(
|
||||
service_name: str,
|
||||
framework: str,
|
||||
failed_count: int,
|
||||
pipeline_id: str,
|
||||
commit: str,
|
||||
branch: str
|
||||
):
|
||||
"""
|
||||
Background-Task: Erstellt Backlog-Eintraege fuer fehlgeschlagene Tests.
|
||||
|
||||
Wird asynchron aufgerufen wenn Tests fehlgeschlagen sind.
|
||||
"""
|
||||
from ...db_models import FailedTestBacklogDB
|
||||
|
||||
print(f"[CI-RESULT] Creating backlog entry for {service_name}: {failed_count} failed tests")
|
||||
|
||||
if is_postgres_available():
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
now = datetime.utcnow()
|
||||
|
||||
# Pruefe ob schon ein offener Backlog-Eintrag fuer diesen Service existiert
|
||||
existing = db.query(FailedTestBacklogDB).filter(
|
||||
FailedTestBacklogDB.service == service_name,
|
||||
FailedTestBacklogDB.status == "open"
|
||||
).first()
|
||||
|
||||
if existing:
|
||||
# Aktualisiere existierenden Eintrag
|
||||
existing.last_failed_at = now
|
||||
existing.failure_count += 1
|
||||
existing.error_message = f"{failed_count} Tests fehlgeschlagen in Pipeline {pipeline_id} (Branch: {branch})"
|
||||
db.commit()
|
||||
print(f"[CI-RESULT] Updated existing backlog entry (ID: {existing.id})")
|
||||
else:
|
||||
# Neuen Eintrag erstellen
|
||||
backlog = FailedTestBacklogDB(
|
||||
test_name=f"{service_name} Tests",
|
||||
test_file=f"{service_name}/",
|
||||
service=service_name,
|
||||
framework=framework,
|
||||
error_message=f"{failed_count} Tests fehlgeschlagen in Pipeline {pipeline_id} (Branch: {branch})",
|
||||
error_type="TEST_FAILURE",
|
||||
first_failed_at=now,
|
||||
last_failed_at=now,
|
||||
failure_count=1,
|
||||
status="open",
|
||||
priority="high" if failed_count > 5 else "medium"
|
||||
)
|
||||
db.add(backlog)
|
||||
db.commit()
|
||||
print(f"[CI-RESULT] Created new backlog entry (ID: {backlog.id})")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[CI-RESULT] Error creating backlog entry: {e}")
|
||||
|
||||
|
||||
async def _close_backlog_entry(
|
||||
service_name: str,
|
||||
pipeline_id: str,
|
||||
commit: str
|
||||
):
|
||||
"""
|
||||
Background-Task: Schließt Backlog-Einträge wenn alle Tests bestanden.
|
||||
|
||||
Wird asynchron aufgerufen wenn Tests erfolgreich waren.
|
||||
"""
|
||||
from ...db_models import FailedTestBacklogDB
|
||||
|
||||
print(f"[CI-RESULT] Checking for open backlog entries to close for {service_name}")
|
||||
|
||||
if is_postgres_available():
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
now = datetime.utcnow()
|
||||
|
||||
# Finde offene Backlog-Einträge für diesen Service
|
||||
open_entries = db.query(FailedTestBacklogDB).filter(
|
||||
FailedTestBacklogDB.service == service_name,
|
||||
FailedTestBacklogDB.status == "open"
|
||||
).all()
|
||||
|
||||
for entry in open_entries:
|
||||
entry.status = "resolved"
|
||||
entry.resolved_at = now
|
||||
entry.resolution_commit = commit[:8] if commit else None
|
||||
entry.resolution_notes = f"Automatisch geschlossen - alle Tests in Pipeline {pipeline_id} bestanden"
|
||||
print(f"[CI-RESULT] Auto-closed backlog entry (ID: {entry.id}) for {service_name}")
|
||||
|
||||
if open_entries:
|
||||
db.commit()
|
||||
print(f"[CI-RESULT] Closed {len(open_entries)} backlog entries for {service_name}")
|
||||
else:
|
||||
print(f"[CI-RESULT] No open backlog entries for {service_name}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[CI-RESULT] Error closing backlog entries: {e}")
|
||||
|
||||
|
||||
async def _fetch_and_store_failed_tests(pipeline_id: str, commit: str, branch: str):
|
||||
"""
|
||||
Legacy Background-Task fuer generische Pipeline-Fehler.
|
||||
"""
|
||||
from ...db_models import FailedTestBacklogDB
|
||||
|
||||
print(f"[CI-RESULT] Fetching failed test details for pipeline {pipeline_id}")
|
||||
|
||||
if is_postgres_available():
|
||||
try:
|
||||
with get_db_session() as db:
|
||||
now = datetime.utcnow()
|
||||
|
||||
backlog = FailedTestBacklogDB(
|
||||
test_name=f"CI Pipeline {pipeline_id}",
|
||||
test_file=".woodpecker/main.yml",
|
||||
service="ci-pipeline",
|
||||
framework="woodpecker",
|
||||
error_message=f"Pipeline {pipeline_id} fehlgeschlagen auf Branch {branch}",
|
||||
error_type="CI_FAILURE",
|
||||
first_failed_at=now,
|
||||
last_failed_at=now,
|
||||
failure_count=1,
|
||||
status="open",
|
||||
priority="high"
|
||||
)
|
||||
db.add(backlog)
|
||||
db.commit()
|
||||
print(f"[CI-RESULT] Added pipeline failure to backlog (ID: {backlog.id})")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[CI-RESULT] Error adding to backlog: {e}")
|
||||
335
backend/api/tests/registry/routes/tests.py
Normal file
335
backend/api/tests/registry/routes/tests.py
Normal file
@@ -0,0 +1,335 @@
|
||||
"""
|
||||
Test Registry - Test Endpoints
|
||||
|
||||
Endpoints for test discovery, running, and monitoring.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any
|
||||
|
||||
from fastapi import APIRouter, HTTPException, BackgroundTasks
|
||||
|
||||
from ...models import (
|
||||
TestFramework,
|
||||
TestRegistryStats,
|
||||
SERVICE_DEFINITIONS,
|
||||
)
|
||||
from ..api_models import TestRunResponse, RegistryResponse
|
||||
from ..config import (
|
||||
PROJECT_ROOT,
|
||||
RUN_MODE,
|
||||
check_go_available,
|
||||
check_pytest_available,
|
||||
get_go_version,
|
||||
get_pytest_version,
|
||||
get_test_runs,
|
||||
get_current_runs,
|
||||
get_running_tests,
|
||||
)
|
||||
from ..discovery import (
|
||||
build_service_info,
|
||||
discover_go_tests,
|
||||
discover_python_tests,
|
||||
discover_bqas_tests,
|
||||
)
|
||||
from ..executors import execute_test_run
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/registry", response_model=RegistryResponse)
|
||||
async def get_test_registry():
|
||||
"""
|
||||
Gibt alle registrierten Tests zurueck.
|
||||
Scannt alle Services und aggregiert Test-Informationen.
|
||||
"""
|
||||
services = []
|
||||
stats = TestRegistryStats()
|
||||
|
||||
by_category: Dict[str, int] = {}
|
||||
by_framework: Dict[str, int] = {}
|
||||
|
||||
for service_def in SERVICE_DEFINITIONS:
|
||||
info = build_service_info(service_def)
|
||||
|
||||
services.append({
|
||||
"service": info.service,
|
||||
"display_name": info.display_name,
|
||||
"port": info.port,
|
||||
"language": info.language,
|
||||
"total_tests": info.total_tests,
|
||||
"passed_tests": info.passed_tests,
|
||||
"failed_tests": info.failed_tests,
|
||||
"skipped_tests": info.skipped_tests,
|
||||
"pass_rate": round(info.pass_rate, 1),
|
||||
"coverage_percent": round(info.coverage_percent, 1) if info.coverage_percent else None,
|
||||
"last_run": info.last_run.isoformat() if info.last_run else None,
|
||||
"status": info.status.value,
|
||||
})
|
||||
|
||||
stats.total_tests += info.total_tests
|
||||
stats.total_passed += info.passed_tests
|
||||
stats.total_failed += info.failed_tests
|
||||
stats.total_skipped += info.skipped_tests
|
||||
|
||||
# Framework-Stats
|
||||
framework_name = service_def["framework"].value
|
||||
by_framework[framework_name] = by_framework.get(framework_name, 0) + info.total_tests
|
||||
|
||||
# Category basierend auf Framework
|
||||
if service_def["framework"] in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
by_category["bqas"] = by_category.get("bqas", 0) + info.total_tests
|
||||
elif service_def["framework"] == TestFramework.PLAYWRIGHT:
|
||||
by_category["e2e"] = by_category.get("e2e", 0) + info.total_tests
|
||||
else:
|
||||
by_category["unit"] = by_category.get("unit", 0) + info.total_tests
|
||||
|
||||
stats.services_count = len(services)
|
||||
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0.0
|
||||
stats.by_category = by_category
|
||||
stats.by_framework = by_framework
|
||||
|
||||
return RegistryResponse(
|
||||
services=services,
|
||||
stats={
|
||||
"total_tests": stats.total_tests,
|
||||
"total_passed": stats.total_passed,
|
||||
"total_failed": stats.total_failed,
|
||||
"total_skipped": stats.total_skipped,
|
||||
"overall_pass_rate": round(stats.overall_pass_rate, 1),
|
||||
"services_count": stats.services_count,
|
||||
"by_category": stats.by_category,
|
||||
"by_framework": stats.by_framework,
|
||||
},
|
||||
last_updated=datetime.now().isoformat(),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/registry/{service}")
|
||||
async def get_service_tests(service: str):
|
||||
"""
|
||||
Gibt Tests fuer einen spezifischen Service zurueck.
|
||||
"""
|
||||
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == service), None)
|
||||
if not service_def:
|
||||
raise HTTPException(status_code=404, detail=f"Service '{service}' nicht gefunden")
|
||||
|
||||
info = build_service_info(service_def)
|
||||
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
|
||||
framework = service_def["framework"]
|
||||
|
||||
# Test-Discovery
|
||||
if framework == TestFramework.GO_TEST:
|
||||
tests = discover_go_tests(base_path)
|
||||
elif framework == TestFramework.PYTEST:
|
||||
tests = discover_python_tests(base_path)
|
||||
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
|
||||
tests = discover_bqas_tests(base_path, test_type)
|
||||
else:
|
||||
tests = []
|
||||
|
||||
return {
|
||||
"service": info.service,
|
||||
"display_name": info.display_name,
|
||||
"port": info.port,
|
||||
"language": info.language,
|
||||
"total_tests": len(tests),
|
||||
"passed_tests": info.passed_tests,
|
||||
"failed_tests": info.failed_tests,
|
||||
"coverage_percent": info.coverage_percent,
|
||||
"tests": [
|
||||
{
|
||||
"id": t.id,
|
||||
"name": t.name,
|
||||
"file_path": t.file_path,
|
||||
"line_number": t.line_number,
|
||||
"framework": t.framework.value,
|
||||
"status": t.status.value,
|
||||
}
|
||||
for t in tests
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@router.post("/run/{suite}", response_model=TestRunResponse)
|
||||
async def run_test_suite(suite: str, background_tasks: BackgroundTasks):
|
||||
"""
|
||||
Startet einen Test-Run fuer eine Suite.
|
||||
Fuehrt Tests im Hintergrund aus.
|
||||
"""
|
||||
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == suite), None)
|
||||
if not service_def:
|
||||
raise HTTPException(status_code=404, detail=f"Suite '{suite}' nicht gefunden")
|
||||
|
||||
run_id = f"run_{suite}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||
|
||||
# Background Task starten
|
||||
background_tasks.add_task(execute_test_run, run_id, service_def)
|
||||
|
||||
return TestRunResponse(
|
||||
run_id=run_id,
|
||||
status="queued",
|
||||
message=f"Test-Run fuer {service_def['display_name']} gestartet",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/runs")
|
||||
async def get_test_runs_list(limit: int = 20):
|
||||
"""
|
||||
Gibt die Test-Run Historie zurueck.
|
||||
"""
|
||||
test_runs = get_test_runs()
|
||||
# Sortiert nach Startzeit, neueste zuerst
|
||||
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
|
||||
return {"runs": sorted_runs[:limit]}
|
||||
|
||||
|
||||
@router.get("/progress/{service_id}")
|
||||
async def get_test_progress(service_id: str):
|
||||
"""
|
||||
Gibt den Fortschritt eines laufenden Tests zurueck.
|
||||
Wird vom Frontend gepollt um Live-Updates anzuzeigen.
|
||||
"""
|
||||
running_tests = get_running_tests()
|
||||
if service_id in running_tests:
|
||||
return running_tests[service_id]
|
||||
|
||||
# Kein laufender Test - Standard-Antwort
|
||||
return {
|
||||
"current_file": "",
|
||||
"files_done": 0,
|
||||
"files_total": 0,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "idle"
|
||||
}
|
||||
|
||||
|
||||
@router.get("/progress")
|
||||
async def get_all_progress():
|
||||
"""
|
||||
Gibt den Fortschritt aller laufenden Tests zurueck.
|
||||
"""
|
||||
return get_running_tests()
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}")
|
||||
async def get_test_run(run_id: str):
|
||||
"""
|
||||
Gibt Details zu einem spezifischen Test-Run zurueck.
|
||||
"""
|
||||
current_runs = get_current_runs()
|
||||
test_runs = get_test_runs()
|
||||
|
||||
if run_id in current_runs:
|
||||
run = current_runs[run_id]
|
||||
return {
|
||||
"id": run.id,
|
||||
"suite_id": run.suite_id,
|
||||
"service": run.service,
|
||||
"started_at": run.started_at.isoformat(),
|
||||
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
|
||||
"status": run.status.value,
|
||||
"total_tests": run.total_tests,
|
||||
"passed_tests": run.passed_tests,
|
||||
"failed_tests": run.failed_tests,
|
||||
"duration_seconds": run.duration_seconds,
|
||||
"output": run.output,
|
||||
}
|
||||
|
||||
# In Historie suchen
|
||||
for run in test_runs:
|
||||
if run["id"] == run_id:
|
||||
return run
|
||||
|
||||
raise HTTPException(status_code=404, detail=f"Run '{run_id}' nicht gefunden")
|
||||
|
||||
|
||||
@router.get("/coverage")
|
||||
async def get_coverage():
|
||||
"""
|
||||
Gibt aggregierte Coverage-Informationen zurueck.
|
||||
"""
|
||||
coverage_data = []
|
||||
total_coverage = 0.0
|
||||
count = 0
|
||||
|
||||
for service_def in SERVICE_DEFINITIONS:
|
||||
info = build_service_info(service_def)
|
||||
if info.coverage_percent:
|
||||
coverage_data.append({
|
||||
"service": info.service,
|
||||
"display_name": info.display_name,
|
||||
"coverage_percent": round(info.coverage_percent, 1),
|
||||
"language": info.language,
|
||||
})
|
||||
total_coverage += info.coverage_percent
|
||||
count += 1
|
||||
|
||||
return {
|
||||
"services": coverage_data,
|
||||
"average_coverage": round(total_coverage / count, 1) if count > 0 else 0,
|
||||
"total_services": count,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def get_test_health():
|
||||
"""
|
||||
Gibt den Status der Test-Infrastruktur zurueck.
|
||||
"""
|
||||
go_available = check_go_available()
|
||||
pytest_available = check_pytest_available()
|
||||
|
||||
return {
|
||||
"status": "healthy",
|
||||
"mode": RUN_MODE, # "docker", "local", oder "demo"
|
||||
"services_monitored": len(SERVICE_DEFINITIONS),
|
||||
"project_root": str(PROJECT_ROOT),
|
||||
"project_root_exists": PROJECT_ROOT.exists(),
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"runners": {
|
||||
"go_test": "available" if go_available else "not_installed",
|
||||
"pytest": "available" if pytest_available else "not_installed",
|
||||
"jest": "available", # TODO: check Node.js
|
||||
"playwright": "available", # TODO: check Playwright
|
||||
"bqas": "available", # BQAS hat seinen eigenen Service
|
||||
},
|
||||
"versions": {
|
||||
"go": get_go_version() if go_available else None,
|
||||
"pytest": get_pytest_version() if pytest_available else None,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@router.get("/db-status")
|
||||
async def get_db_status():
|
||||
"""
|
||||
Gibt den Status der PostgreSQL-Datenbankverbindung zurueck.
|
||||
Wird vom Dashboard ServiceStatus verwendet.
|
||||
"""
|
||||
import time
|
||||
from ...database import check_db_connection, DATABASE_URL
|
||||
|
||||
start_time = time.time()
|
||||
is_connected = check_db_connection()
|
||||
response_time = int((time.time() - start_time) * 1000)
|
||||
|
||||
# Parse host from DATABASE_URL (hide password)
|
||||
try:
|
||||
# postgresql://user:pass@host:port/db -> host:port
|
||||
url_parts = DATABASE_URL.split("@")
|
||||
if len(url_parts) > 1:
|
||||
host_part = url_parts[1].split("/")[0]
|
||||
else:
|
||||
host_part = "unknown"
|
||||
except:
|
||||
host_part = "unknown"
|
||||
|
||||
return {
|
||||
"status": "online" if is_connected else "offline",
|
||||
"host": host_part,
|
||||
"response_time_ms": response_time,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
}
|
||||
23
backend/api/tests/registry/services/__init__.py
Normal file
23
backend/api/tests/registry/services/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Test Registry Services
|
||||
|
||||
Business logic and helper services.
|
||||
"""
|
||||
|
||||
from .error_handling import (
|
||||
extract_go_error,
|
||||
classify_go_error,
|
||||
suggest_go_fix,
|
||||
extract_pytest_error,
|
||||
classify_pytest_error,
|
||||
suggest_pytest_fix,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"extract_go_error",
|
||||
"classify_go_error",
|
||||
"suggest_go_fix",
|
||||
"extract_pytest_error",
|
||||
"classify_pytest_error",
|
||||
"suggest_pytest_fix",
|
||||
]
|
||||
137
backend/api/tests/registry/services/error_handling.py
Normal file
137
backend/api/tests/registry/services/error_handling.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""
|
||||
Error Analysis and Classification Helpers
|
||||
|
||||
Provides error extraction, classification, and fix suggestions for Go and Python tests.
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# Go Error Helpers
|
||||
# ==============================================================================
|
||||
|
||||
def extract_go_error(output: str) -> str:
|
||||
"""Extrahiert die Fehlermeldung aus Go-Test-Output"""
|
||||
if not output:
|
||||
return ""
|
||||
|
||||
lines = output.strip().split("\n")
|
||||
error_lines = []
|
||||
|
||||
for line in lines:
|
||||
# Typische Go-Fehlermuster
|
||||
if "Error:" in line or "FAIL" in line or "panic:" in line:
|
||||
error_lines.append(line.strip())
|
||||
elif line.strip().startswith("---"):
|
||||
continue
|
||||
elif "expected" in line.lower() or "got" in line.lower():
|
||||
error_lines.append(line.strip())
|
||||
elif ".go:" in line:
|
||||
error_lines.append(line.strip())
|
||||
|
||||
return " | ".join(error_lines[:3]) if error_lines else output[:200]
|
||||
|
||||
|
||||
def classify_go_error(error_msg: str) -> str:
|
||||
"""Klassifiziert einen Go-Fehler"""
|
||||
if not error_msg:
|
||||
return "unknown"
|
||||
|
||||
error_lower = error_msg.lower()
|
||||
if "nil pointer" in error_lower or "panic" in error_lower:
|
||||
return "nil_pointer"
|
||||
elif "expected" in error_lower and "got" in error_lower:
|
||||
return "assertion"
|
||||
elif "timeout" in error_lower:
|
||||
return "timeout"
|
||||
elif "connection" in error_lower or "dial" in error_lower:
|
||||
return "network"
|
||||
elif "not found" in error_lower or "does not exist" in error_lower:
|
||||
return "not_found"
|
||||
elif "permission" in error_lower or "unauthorized" in error_lower:
|
||||
return "permission"
|
||||
return "logic_error"
|
||||
|
||||
|
||||
def suggest_go_fix(error_msg: str) -> str:
|
||||
"""Gibt einen Loesungsvorschlag fuer Go-Fehler"""
|
||||
error_type = classify_go_error(error_msg)
|
||||
|
||||
suggestions = {
|
||||
"nil_pointer": "Pruefe ob alle Pointer initialisiert sind. Fuege nil-Checks hinzu.",
|
||||
"assertion": "Vergleiche die erwarteten mit den tatsaechlichen Werten. Pruefe die Test-Eingabedaten.",
|
||||
"timeout": "Erhoehe das Timeout oder optimiere die Funktion. Pruefe Netzwerkverbindungen.",
|
||||
"network": "Pruefe ob der Service erreichbar ist. Stelle sicher dass Mocks korrekt konfiguriert sind.",
|
||||
"not_found": "Pruefe ob die erwarteten Ressourcen existieren. Aktualisiere Test-Fixtures.",
|
||||
"permission": "Pruefe Berechtigungen und Auth-Token im Test-Setup.",
|
||||
"logic_error": "Pruefe die Geschaeftslogik und die Test-Annahmen.",
|
||||
"unknown": "Analysiere den Stack-Trace fuer mehr Details.",
|
||||
}
|
||||
return suggestions.get(error_type, suggestions["unknown"])
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# Python Error Helpers
|
||||
# ==============================================================================
|
||||
|
||||
def extract_pytest_error(output: str, test_name: str) -> str:
|
||||
"""Extrahiert die Fehlermeldung aus pytest-Output"""
|
||||
if not output:
|
||||
return ""
|
||||
|
||||
# Suche nach dem Fehler-Block fuer diesen Test
|
||||
pattern = rf'FAILED.*{re.escape(test_name)}.*?\n(.*?)(?=FAILED|PASSED|====|$)'
|
||||
match = re.search(pattern, output, re.DOTALL)
|
||||
|
||||
if match:
|
||||
error_block = match.group(1)
|
||||
# Extrahiere die relevanten Zeilen
|
||||
lines = [l.strip() for l in error_block.split("\n") if l.strip()]
|
||||
# Suche nach AssertionError oder Exception
|
||||
for i, line in enumerate(lines):
|
||||
if "AssertionError" in line or "Error" in line or "Exception" in line:
|
||||
return " | ".join(lines[max(0, i-1):min(len(lines), i+3)])
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def classify_pytest_error(error_msg: str) -> str:
|
||||
"""Klassifiziert einen Python-Fehler"""
|
||||
if not error_msg:
|
||||
return "unknown"
|
||||
|
||||
if "AssertionError" in error_msg:
|
||||
return "assertion"
|
||||
elif "TypeError" in error_msg:
|
||||
return "type_error"
|
||||
elif "AttributeError" in error_msg:
|
||||
return "attribute_error"
|
||||
elif "KeyError" in error_msg:
|
||||
return "key_error"
|
||||
elif "ValueError" in error_msg:
|
||||
return "value_error"
|
||||
elif "ImportError" in error_msg or "ModuleNotFoundError" in error_msg:
|
||||
return "import_error"
|
||||
elif "ConnectionError" in error_msg or "timeout" in error_msg.lower():
|
||||
return "network"
|
||||
return "logic_error"
|
||||
|
||||
|
||||
def suggest_pytest_fix(error_msg: str) -> str:
|
||||
"""Gibt einen Loesungsvorschlag fuer Python-Fehler"""
|
||||
error_type = classify_pytest_error(error_msg)
|
||||
|
||||
suggestions = {
|
||||
"assertion": "Pruefe die erwarteten vs. tatsaechlichen Werte. Sind die Test-Daten aktuell?",
|
||||
"type_error": "Pruefe die Typen der uebergebenen Argumente. Evtl. fehlt eine Typkonvertierung.",
|
||||
"attribute_error": "Das Objekt hat dieses Attribut nicht. Pruefe die Initialisierung.",
|
||||
"key_error": "Der Schluessel existiert nicht im Dict. Pruefe die Test-Daten.",
|
||||
"value_error": "Ungueltiger Wert uebergeben. Pruefe die Eingabeparameter.",
|
||||
"import_error": "Modul nicht gefunden. Pruefe die Abhaengigkeiten und den Pfad.",
|
||||
"network": "Netzwerkfehler. Sind alle Services gestartet? Sind Mocks konfiguriert?",
|
||||
"logic_error": "Logikfehler. Pruefe die Geschaeftslogik und Test-Annahmen.",
|
||||
"unknown": "Analysiere den Stack-Trace fuer mehr Details.",
|
||||
}
|
||||
return suggestions.get(error_type, suggestions["unknown"])
|
||||
500
backend/api/tests/repository.py
Normal file
500
backend/api/tests/repository.py
Normal file
@@ -0,0 +1,500 @@
|
||||
"""
|
||||
Repository fuer Test Registry Datenbank-Operationen.
|
||||
|
||||
Abstrahiert alle DB-Zugriffe fuer:
|
||||
- Test-Runs speichern und abrufen
|
||||
- Test-Ergebnisse verwalten
|
||||
- Backlog-Items verwalten
|
||||
- Service-Statistiken aktualisieren
|
||||
"""
|
||||
from datetime import datetime
|
||||
from typing import List, Optional, Dict, Any
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import func, desc
|
||||
|
||||
from .db_models import (
|
||||
TestRunDB,
|
||||
TestResultDB,
|
||||
FailedTestBacklogDB,
|
||||
TestFixHistoryDB,
|
||||
TestServiceStatsDB
|
||||
)
|
||||
|
||||
|
||||
class TestRepository:
|
||||
"""Repository fuer Test-bezogene Datenbank-Operationen."""
|
||||
|
||||
def __init__(self, db: Session):
|
||||
self.db = db
|
||||
|
||||
# ========================================
|
||||
# Test Runs
|
||||
# ========================================
|
||||
|
||||
def create_run(
|
||||
self,
|
||||
run_id: str,
|
||||
service: str,
|
||||
framework: str,
|
||||
triggered_by: str = "manual",
|
||||
git_commit: Optional[str] = None,
|
||||
git_branch: Optional[str] = None
|
||||
) -> TestRunDB:
|
||||
"""Erstellt einen neuen Test-Run."""
|
||||
run = TestRunDB(
|
||||
run_id=run_id,
|
||||
service=service,
|
||||
framework=framework,
|
||||
started_at=datetime.utcnow(),
|
||||
status="running",
|
||||
triggered_by=triggered_by,
|
||||
git_commit=git_commit,
|
||||
git_branch=git_branch
|
||||
)
|
||||
self.db.add(run)
|
||||
self.db.commit()
|
||||
self.db.refresh(run)
|
||||
return run
|
||||
|
||||
def complete_run(
|
||||
self,
|
||||
run_id: str,
|
||||
status: str,
|
||||
total_tests: int,
|
||||
passed_tests: int,
|
||||
failed_tests: int,
|
||||
skipped_tests: int = 0,
|
||||
duration_seconds: float = 0,
|
||||
output: Optional[str] = None
|
||||
) -> Optional[TestRunDB]:
|
||||
"""Markiert einen Run als abgeschlossen und aktualisiert Statistiken."""
|
||||
run = self.db.query(TestRunDB).filter(TestRunDB.run_id == run_id).first()
|
||||
if run:
|
||||
run.completed_at = datetime.utcnow()
|
||||
run.status = status
|
||||
run.total_tests = total_tests
|
||||
run.passed_tests = passed_tests
|
||||
run.failed_tests = failed_tests
|
||||
run.skipped_tests = skipped_tests
|
||||
run.duration_seconds = duration_seconds
|
||||
run.output = output[:10000] if output else None # Truncate output
|
||||
self.db.commit()
|
||||
self.db.refresh(run)
|
||||
|
||||
# Aktualisiere Service-Statistiken
|
||||
self._update_service_stats(run)
|
||||
|
||||
return run
|
||||
|
||||
def get_run(self, run_id: str) -> Optional[TestRunDB]:
|
||||
"""Holt einen Run anhand der ID."""
|
||||
return self.db.query(TestRunDB).filter(TestRunDB.run_id == run_id).first()
|
||||
|
||||
def get_runs(
|
||||
self,
|
||||
service: Optional[str] = None,
|
||||
limit: int = 50,
|
||||
offset: int = 0
|
||||
) -> List[TestRunDB]:
|
||||
"""Holt Test-Runs mit optionalem Service-Filter."""
|
||||
query = self.db.query(TestRunDB)
|
||||
if service:
|
||||
query = query.filter(TestRunDB.service == service)
|
||||
return query.order_by(desc(TestRunDB.started_at)).offset(offset).limit(limit).all()
|
||||
|
||||
def get_runs_count(self, service: Optional[str] = None) -> int:
|
||||
"""Zaehlt Test-Runs."""
|
||||
query = self.db.query(func.count(TestRunDB.id))
|
||||
if service:
|
||||
query = query.filter(TestRunDB.service == service)
|
||||
return query.scalar() or 0
|
||||
|
||||
# ========================================
|
||||
# Test Results
|
||||
# ========================================
|
||||
|
||||
def add_results(self, run_id: str, results: List[Dict[str, Any]]) -> int:
|
||||
"""Fuegt mehrere Test-Ergebnisse hinzu."""
|
||||
count = 0
|
||||
for result in results:
|
||||
db_result = TestResultDB(
|
||||
run_id=run_id,
|
||||
test_name=result.get("name") or result.get("test_name", "unknown"),
|
||||
test_file=result.get("file_path") or result.get("test_file"),
|
||||
line_number=result.get("line_number"),
|
||||
status=result.get("status", "unknown"),
|
||||
duration_ms=result.get("duration_ms"),
|
||||
error_message=result.get("error_message"),
|
||||
error_type=result.get("error_type"),
|
||||
output=result.get("output")
|
||||
)
|
||||
self.db.add(db_result)
|
||||
count += 1
|
||||
|
||||
# Bei fehlgeschlagenen Tests: Backlog aktualisieren
|
||||
if result.get("status") in ["failed", "error"]:
|
||||
self._update_backlog(
|
||||
run_id=run_id,
|
||||
test_name=result.get("name") or result.get("test_name", "unknown"),
|
||||
test_file=result.get("file_path") or result.get("test_file"),
|
||||
error_message=result.get("error_message"),
|
||||
error_type=result.get("error_type"),
|
||||
suggestion=result.get("suggestion")
|
||||
)
|
||||
|
||||
self.db.commit()
|
||||
return count
|
||||
|
||||
def get_results(self, run_id: str) -> List[TestResultDB]:
|
||||
"""Holt alle Ergebnisse eines Runs."""
|
||||
return self.db.query(TestResultDB).filter(TestResultDB.run_id == run_id).all()
|
||||
|
||||
def get_failed_results(self, run_id: str) -> List[TestResultDB]:
|
||||
"""Holt nur fehlgeschlagene Ergebnisse eines Runs."""
|
||||
return self.db.query(TestResultDB).filter(
|
||||
TestResultDB.run_id == run_id,
|
||||
TestResultDB.status.in_(["failed", "error"])
|
||||
).all()
|
||||
|
||||
# ========================================
|
||||
# Backlog
|
||||
# ========================================
|
||||
|
||||
def _update_backlog(
|
||||
self,
|
||||
run_id: str,
|
||||
test_name: str,
|
||||
test_file: Optional[str],
|
||||
error_message: Optional[str],
|
||||
error_type: Optional[str],
|
||||
suggestion: Optional[str] = None
|
||||
):
|
||||
"""Aktualisiert oder erstellt einen Backlog-Eintrag fuer einen fehlgeschlagenen Test."""
|
||||
# Hole den Run um Service und Framework zu bekommen
|
||||
run = self.db.query(TestRunDB).filter(TestRunDB.run_id == run_id).first()
|
||||
if not run:
|
||||
return
|
||||
|
||||
# Suche nach existierendem Backlog-Eintrag
|
||||
backlog = self.db.query(FailedTestBacklogDB).filter(
|
||||
FailedTestBacklogDB.test_name == test_name,
|
||||
FailedTestBacklogDB.service == run.service
|
||||
).first()
|
||||
|
||||
now = datetime.utcnow()
|
||||
|
||||
if backlog:
|
||||
# Existiert bereits - aktualisiere
|
||||
backlog.last_failed_at = now
|
||||
backlog.failure_count += 1
|
||||
backlog.error_message = error_message or backlog.error_message
|
||||
backlog.error_type = error_type or backlog.error_type
|
||||
if suggestion:
|
||||
backlog.fix_suggestion = suggestion
|
||||
# Reset status wenn es wieder fehlschlaegt
|
||||
if backlog.status == "fixed":
|
||||
backlog.status = "open"
|
||||
backlog.notes = f"Erneut fehlgeschlagen nach Fix am {now.isoformat()}"
|
||||
else:
|
||||
# Neu erstellen
|
||||
backlog = FailedTestBacklogDB(
|
||||
test_name=test_name,
|
||||
test_file=test_file,
|
||||
service=run.service,
|
||||
framework=run.framework,
|
||||
error_message=error_message,
|
||||
error_type=error_type,
|
||||
first_failed_at=now,
|
||||
last_failed_at=now,
|
||||
failure_count=1,
|
||||
status="open",
|
||||
priority=self._calculate_priority(error_type),
|
||||
fix_suggestion=suggestion
|
||||
)
|
||||
self.db.add(backlog)
|
||||
|
||||
def _calculate_priority(self, error_type: Optional[str]) -> str:
|
||||
"""Berechnet Prioritaet basierend auf Fehlertyp."""
|
||||
high_priority = ["nil_pointer", "panic", "security", "critical"]
|
||||
medium_priority = ["assertion", "type_error", "value_error"]
|
||||
|
||||
if error_type:
|
||||
if any(p in error_type.lower() for p in high_priority):
|
||||
return "high"
|
||||
if any(p in error_type.lower() for p in medium_priority):
|
||||
return "medium"
|
||||
return "medium"
|
||||
|
||||
def get_backlog(
|
||||
self,
|
||||
status: Optional[str] = None,
|
||||
service: Optional[str] = None,
|
||||
priority: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
offset: int = 0
|
||||
) -> List[FailedTestBacklogDB]:
|
||||
"""Holt Backlog-Eintraege mit optionalen Filtern."""
|
||||
query = self.db.query(FailedTestBacklogDB)
|
||||
|
||||
if status:
|
||||
query = query.filter(FailedTestBacklogDB.status == status)
|
||||
if service:
|
||||
query = query.filter(FailedTestBacklogDB.service == service)
|
||||
if priority:
|
||||
query = query.filter(FailedTestBacklogDB.priority == priority)
|
||||
|
||||
return query.order_by(
|
||||
desc(FailedTestBacklogDB.failure_count),
|
||||
desc(FailedTestBacklogDB.last_failed_at)
|
||||
).offset(offset).limit(limit).all()
|
||||
|
||||
def get_backlog_count(
|
||||
self,
|
||||
status: Optional[str] = None,
|
||||
service: Optional[str] = None
|
||||
) -> int:
|
||||
"""Zaehlt Backlog-Eintraege."""
|
||||
query = self.db.query(func.count(FailedTestBacklogDB.id))
|
||||
if status:
|
||||
query = query.filter(FailedTestBacklogDB.status == status)
|
||||
if service:
|
||||
query = query.filter(FailedTestBacklogDB.service == service)
|
||||
return query.scalar() or 0
|
||||
|
||||
def get_backlog_item(self, backlog_id: int) -> Optional[FailedTestBacklogDB]:
|
||||
"""Holt einen einzelnen Backlog-Eintrag."""
|
||||
return self.db.query(FailedTestBacklogDB).filter(FailedTestBacklogDB.id == backlog_id).first()
|
||||
|
||||
def update_backlog_status(
|
||||
self,
|
||||
backlog_id: int,
|
||||
status: str,
|
||||
notes: Optional[str] = None,
|
||||
assigned_to: Optional[str] = None
|
||||
) -> Optional[FailedTestBacklogDB]:
|
||||
"""Aktualisiert den Status eines Backlog-Eintrags."""
|
||||
backlog = self.get_backlog_item(backlog_id)
|
||||
if backlog:
|
||||
backlog.status = status
|
||||
if notes:
|
||||
backlog.notes = notes
|
||||
if assigned_to:
|
||||
backlog.assigned_to = assigned_to
|
||||
self.db.commit()
|
||||
self.db.refresh(backlog)
|
||||
return backlog
|
||||
|
||||
def update_backlog_priority(self, backlog_id: int, priority: str) -> Optional[FailedTestBacklogDB]:
|
||||
"""Aktualisiert die Prioritaet eines Backlog-Eintrags."""
|
||||
backlog = self.get_backlog_item(backlog_id)
|
||||
if backlog:
|
||||
backlog.priority = priority
|
||||
self.db.commit()
|
||||
self.db.refresh(backlog)
|
||||
return backlog
|
||||
|
||||
# ========================================
|
||||
# Fix History
|
||||
# ========================================
|
||||
|
||||
def add_fix_attempt(
|
||||
self,
|
||||
backlog_id: int,
|
||||
fix_type: str,
|
||||
fix_description: str,
|
||||
commit_hash: Optional[str] = None,
|
||||
success: bool = False
|
||||
) -> TestFixHistoryDB:
|
||||
"""Fuegt einen Fix-Versuch zur Historie hinzu."""
|
||||
fix = TestFixHistoryDB(
|
||||
backlog_id=backlog_id,
|
||||
fix_type=fix_type,
|
||||
fix_description=fix_description,
|
||||
commit_hash=commit_hash,
|
||||
success=success
|
||||
)
|
||||
self.db.add(fix)
|
||||
|
||||
# Bei Erfolg: Backlog-Status aktualisieren
|
||||
if success:
|
||||
backlog = self.get_backlog_item(backlog_id)
|
||||
if backlog:
|
||||
backlog.status = "fixed"
|
||||
|
||||
self.db.commit()
|
||||
self.db.refresh(fix)
|
||||
return fix
|
||||
|
||||
def get_fix_history(self, backlog_id: int) -> List[TestFixHistoryDB]:
|
||||
"""Holt die Fix-Historie fuer einen Backlog-Eintrag."""
|
||||
return self.db.query(TestFixHistoryDB).filter(
|
||||
TestFixHistoryDB.backlog_id == backlog_id
|
||||
).order_by(desc(TestFixHistoryDB.created_at)).all()
|
||||
|
||||
# ========================================
|
||||
# Service Statistics
|
||||
# ========================================
|
||||
|
||||
def _update_service_stats(self, run: TestRunDB):
|
||||
"""Aktualisiert die Service-Statistiken nach einem Run."""
|
||||
stats = self.db.query(TestServiceStatsDB).filter(
|
||||
TestServiceStatsDB.service == run.service
|
||||
).first()
|
||||
|
||||
if not stats:
|
||||
stats = TestServiceStatsDB(service=run.service)
|
||||
self.db.add(stats)
|
||||
|
||||
stats.total_tests = run.total_tests
|
||||
stats.passed_tests = run.passed_tests
|
||||
stats.failed_tests = run.failed_tests
|
||||
stats.skipped_tests = run.skipped_tests
|
||||
stats.pass_rate = (run.passed_tests / run.total_tests * 100) if run.total_tests > 0 else 0.0
|
||||
stats.last_run_id = run.run_id
|
||||
stats.last_run_at = run.completed_at or datetime.utcnow()
|
||||
stats.last_status = run.status
|
||||
|
||||
self.db.commit()
|
||||
|
||||
def get_service_stats(self, service: str) -> Optional[TestServiceStatsDB]:
|
||||
"""Holt Statistiken fuer einen Service."""
|
||||
return self.db.query(TestServiceStatsDB).filter(
|
||||
TestServiceStatsDB.service == service
|
||||
).first()
|
||||
|
||||
def get_all_service_stats(self) -> List[TestServiceStatsDB]:
|
||||
"""Holt Statistiken fuer alle Services."""
|
||||
return self.db.query(TestServiceStatsDB).all()
|
||||
|
||||
# ========================================
|
||||
# History & Trends
|
||||
# ========================================
|
||||
|
||||
def get_run_history(
|
||||
self,
|
||||
service: Optional[str] = None,
|
||||
days: int = 30,
|
||||
limit: int = 100
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Holt die Run-Historie fuer Trend-Analysen.
|
||||
Gruppiert nach Tag.
|
||||
"""
|
||||
from datetime import timedelta
|
||||
cutoff = datetime.utcnow() - timedelta(days=days)
|
||||
|
||||
query = self.db.query(
|
||||
func.date(TestRunDB.started_at).label('date'),
|
||||
TestRunDB.service,
|
||||
func.count(TestRunDB.id).label('runs'),
|
||||
func.sum(TestRunDB.total_tests).label('total_tests'),
|
||||
func.sum(TestRunDB.passed_tests).label('passed'),
|
||||
func.sum(TestRunDB.failed_tests).label('failed')
|
||||
).filter(TestRunDB.started_at >= cutoff)
|
||||
|
||||
if service:
|
||||
query = query.filter(TestRunDB.service == service)
|
||||
|
||||
results = query.group_by(
|
||||
func.date(TestRunDB.started_at),
|
||||
TestRunDB.service
|
||||
).order_by(desc(func.date(TestRunDB.started_at))).limit(limit).all()
|
||||
|
||||
return [
|
||||
{
|
||||
"date": str(r.date),
|
||||
"service": r.service,
|
||||
"runs": r.runs,
|
||||
"total_tests": r.total_tests or 0,
|
||||
"passed": r.passed or 0,
|
||||
"failed": r.failed or 0,
|
||||
"pass_rate": round((r.passed / r.total_tests * 100) if r.total_tests else 0, 1)
|
||||
}
|
||||
for r in results
|
||||
]
|
||||
|
||||
def get_summary_stats(self) -> Dict[str, Any]:
|
||||
"""Holt aggregierte Statistiken ueber alle Services."""
|
||||
stats = self.db.query(
|
||||
func.sum(TestServiceStatsDB.total_tests).label('total_tests'),
|
||||
func.sum(TestServiceStatsDB.passed_tests).label('passed'),
|
||||
func.sum(TestServiceStatsDB.failed_tests).label('failed'),
|
||||
func.sum(TestServiceStatsDB.skipped_tests).label('skipped'),
|
||||
func.count(TestServiceStatsDB.id).label('services_count')
|
||||
).first()
|
||||
|
||||
total = stats.total_tests or 0
|
||||
passed = stats.passed or 0
|
||||
|
||||
return {
|
||||
"total_tests": total,
|
||||
"total_passed": passed,
|
||||
"total_failed": stats.failed or 0,
|
||||
"total_skipped": stats.skipped or 0,
|
||||
"services_count": stats.services_count or 0,
|
||||
"overall_pass_rate": round((passed / total * 100) if total > 0 else 0, 1)
|
||||
}
|
||||
|
||||
# ========================================
|
||||
# Migration Helper
|
||||
# ========================================
|
||||
|
||||
def migrate_from_json(self, persisted_results: Dict[str, Dict]) -> int:
|
||||
"""
|
||||
Migriert bestehende JSON-Daten in die Datenbank.
|
||||
Wird einmalig beim Upgrade ausgefuehrt.
|
||||
"""
|
||||
count = 0
|
||||
for service, data in persisted_results.items():
|
||||
# Service-Stats aktualisieren
|
||||
stats = self.db.query(TestServiceStatsDB).filter(
|
||||
TestServiceStatsDB.service == service
|
||||
).first()
|
||||
|
||||
if not stats:
|
||||
stats = TestServiceStatsDB(service=service)
|
||||
self.db.add(stats)
|
||||
|
||||
stats.total_tests = data.get("total", 0)
|
||||
stats.passed_tests = data.get("passed", 0)
|
||||
stats.failed_tests = data.get("failed", 0)
|
||||
stats.pass_rate = (stats.passed_tests / stats.total_tests * 100) if stats.total_tests > 0 else 0.0
|
||||
|
||||
last_run = data.get("last_run")
|
||||
if last_run:
|
||||
try:
|
||||
stats.last_run_at = datetime.fromisoformat(last_run)
|
||||
except:
|
||||
stats.last_run_at = datetime.utcnow()
|
||||
|
||||
stats.last_status = data.get("status", "unknown")
|
||||
|
||||
# Fehlgeschlagene Tests ins Backlog
|
||||
for failed in data.get("failed_test_ids", []):
|
||||
if isinstance(failed, dict):
|
||||
test_name = failed.get("id") or failed.get("name", "unknown")
|
||||
existing = self.db.query(FailedTestBacklogDB).filter(
|
||||
FailedTestBacklogDB.test_name == test_name,
|
||||
FailedTestBacklogDB.service == service
|
||||
).first()
|
||||
|
||||
if not existing:
|
||||
backlog = FailedTestBacklogDB(
|
||||
test_name=test_name,
|
||||
test_file=failed.get("file_path"),
|
||||
service=service,
|
||||
error_message=failed.get("error_message"),
|
||||
error_type=failed.get("error_type"),
|
||||
first_failed_at=stats.last_run_at or datetime.utcnow(),
|
||||
last_failed_at=stats.last_run_at or datetime.utcnow(),
|
||||
failure_count=1,
|
||||
status="open",
|
||||
priority=self._calculate_priority(failed.get("error_type")),
|
||||
fix_suggestion=failed.get("suggestion")
|
||||
)
|
||||
self.db.add(backlog)
|
||||
|
||||
count += 1
|
||||
|
||||
self.db.commit()
|
||||
return count
|
||||
11
backend/api/tests/runners/__init__.py
Normal file
11
backend/api/tests/runners/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""
|
||||
Test Runners
|
||||
|
||||
Spezialisierte Runner fuer verschiedene Test-Frameworks.
|
||||
"""
|
||||
|
||||
from .go_runner import GoTestRunner
|
||||
from .python_runner import PytestRunner
|
||||
from .bqas_runner import BQASRunner
|
||||
|
||||
__all__ = ["GoTestRunner", "PytestRunner", "BQASRunner"]
|
||||
285
backend/api/tests/runners/bqas_runner.py
Normal file
285
backend/api/tests/runners/bqas_runner.py
Normal file
@@ -0,0 +1,285 @@
|
||||
"""
|
||||
BQAS Test Runner
|
||||
|
||||
Proxy zu den BQAS-Endpoints im Voice-Service.
|
||||
"""
|
||||
|
||||
import httpx
|
||||
from datetime import datetime
|
||||
from typing import Dict, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class BQASResult:
|
||||
"""Ergebnis eines BQAS-Test-Runs"""
|
||||
suite_type: str # "golden", "rag", "synthetic"
|
||||
total_tests: int = 0
|
||||
passed_tests: int = 0
|
||||
failed_tests: int = 0
|
||||
avg_score: float = 0.0
|
||||
duration_seconds: float = 0.0
|
||||
metrics: Dict = field(default_factory=dict)
|
||||
failed_test_ids: list = field(default_factory=list)
|
||||
raw_output: str = ""
|
||||
|
||||
|
||||
class BQASRunner:
|
||||
"""
|
||||
Runner fuer BQAS-Tests.
|
||||
|
||||
Leitet Anfragen an den Voice-Service weiter (Port 8091).
|
||||
"""
|
||||
|
||||
VOICE_SERVICE_URL = "http://localhost:8091"
|
||||
|
||||
def __init__(self, api_base: Optional[str] = None):
|
||||
self.api_base = api_base or self.VOICE_SERVICE_URL
|
||||
|
||||
async def run_golden(self, timeout: int = 120) -> BQASResult:
|
||||
"""
|
||||
Fuehrt die Golden Test Suite aus.
|
||||
|
||||
Returns:
|
||||
BQASResult mit allen Metriken
|
||||
"""
|
||||
return await self._run_suite("golden", timeout)
|
||||
|
||||
async def run_rag(self, timeout: int = 120) -> BQASResult:
|
||||
"""
|
||||
Fuehrt die RAG Test Suite aus.
|
||||
|
||||
Returns:
|
||||
BQASResult mit allen Metriken
|
||||
"""
|
||||
return await self._run_suite("rag", timeout)
|
||||
|
||||
async def run_synthetic(self, timeout: int = 300) -> BQASResult:
|
||||
"""
|
||||
Fuehrt die Synthetic Test Suite aus.
|
||||
Dauert laenger wegen LLM-Generierung.
|
||||
|
||||
Returns:
|
||||
BQASResult mit allen Metriken
|
||||
"""
|
||||
return await self._run_suite("synthetic", timeout)
|
||||
|
||||
async def _run_suite(self, suite_type: str, timeout: int) -> BQASResult:
|
||||
"""Interne Methode zum Ausfuehren einer Suite"""
|
||||
start_time = datetime.now()
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=float(timeout)) as client:
|
||||
response = await client.post(
|
||||
f"{self.api_base}/api/v1/bqas/run/{suite_type}",
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
metrics = data.get("metrics", {})
|
||||
|
||||
return BQASResult(
|
||||
suite_type=suite_type,
|
||||
total_tests=metrics.get("total_tests", 0),
|
||||
passed_tests=metrics.get("passed_tests", 0),
|
||||
failed_tests=metrics.get("failed_tests", 0),
|
||||
avg_score=metrics.get("avg_composite_score", 0.0),
|
||||
duration_seconds=(datetime.now() - start_time).total_seconds(),
|
||||
metrics=metrics,
|
||||
failed_test_ids=metrics.get("failed_test_ids", []),
|
||||
raw_output=str(data),
|
||||
)
|
||||
|
||||
else:
|
||||
return BQASResult(
|
||||
suite_type=suite_type,
|
||||
raw_output=f"HTTP {response.status_code}: {response.text}",
|
||||
)
|
||||
|
||||
except httpx.TimeoutException:
|
||||
return BQASResult(
|
||||
suite_type=suite_type,
|
||||
duration_seconds=(datetime.now() - start_time).total_seconds(),
|
||||
raw_output=f"Timeout nach {timeout} Sekunden",
|
||||
)
|
||||
|
||||
except httpx.ConnectError:
|
||||
# Demo-Daten wenn Service nicht erreichbar
|
||||
return self._get_demo_result(suite_type)
|
||||
|
||||
except Exception as e:
|
||||
return BQASResult(
|
||||
suite_type=suite_type,
|
||||
duration_seconds=(datetime.now() - start_time).total_seconds(),
|
||||
raw_output=str(e),
|
||||
)
|
||||
|
||||
def _get_demo_result(self, suite_type: str) -> BQASResult:
|
||||
"""Gibt Demo-Daten zurueck wenn Service nicht erreichbar"""
|
||||
if suite_type == "golden":
|
||||
return BQASResult(
|
||||
suite_type=suite_type,
|
||||
total_tests=97,
|
||||
passed_tests=89,
|
||||
failed_tests=8,
|
||||
avg_score=4.15,
|
||||
duration_seconds=45.2,
|
||||
metrics={
|
||||
"avg_intent_accuracy": 91.7,
|
||||
"avg_faithfulness": 4.2,
|
||||
"avg_relevance": 4.1,
|
||||
"avg_coherence": 4.3,
|
||||
"safety_pass_rate": 0.98,
|
||||
},
|
||||
failed_test_ids=["GT-023", "GT-045", "GT-067", "GT-072", "GT-081", "GT-089", "GT-092", "GT-095"],
|
||||
raw_output="Demo-Modus: Voice-Service nicht erreichbar",
|
||||
)
|
||||
|
||||
elif suite_type == "rag":
|
||||
return BQASResult(
|
||||
suite_type=suite_type,
|
||||
total_tests=20,
|
||||
passed_tests=18,
|
||||
failed_tests=2,
|
||||
avg_score=4.25,
|
||||
duration_seconds=62.1,
|
||||
metrics={
|
||||
"avg_faithfulness": 4.3,
|
||||
"avg_relevance": 4.2,
|
||||
"citation_accuracy": 0.92,
|
||||
},
|
||||
failed_test_ids=["RAG-EH-003", "RAG-HAL-002"],
|
||||
raw_output="Demo-Modus: Voice-Service nicht erreichbar",
|
||||
)
|
||||
|
||||
else: # synthetic
|
||||
return BQASResult(
|
||||
suite_type=suite_type,
|
||||
total_tests=50,
|
||||
passed_tests=45,
|
||||
failed_tests=5,
|
||||
avg_score=3.95,
|
||||
duration_seconds=180.5,
|
||||
metrics={
|
||||
"avg_robustness": 3.8,
|
||||
"avg_coherence": 4.1,
|
||||
},
|
||||
failed_test_ids=["SYN-001", "SYN-015", "SYN-023", "SYN-041", "SYN-048"],
|
||||
raw_output="Demo-Modus: Voice-Service nicht erreichbar",
|
||||
)
|
||||
|
||||
async def get_latest_metrics(self) -> Optional[Dict]:
|
||||
"""
|
||||
Holt die neuesten Metriken aus dem Voice-Service.
|
||||
|
||||
Returns:
|
||||
Dict mit allen Metriken oder None
|
||||
"""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
response = await client.get(
|
||||
f"{self.api_base}/api/v1/bqas/latest-metrics",
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Demo-Daten
|
||||
return {
|
||||
"golden": {
|
||||
"total_tests": 97,
|
||||
"passed_tests": 89,
|
||||
"failed_tests": 8,
|
||||
"avg_composite_score": 4.15,
|
||||
"last_run": datetime.now().isoformat(),
|
||||
},
|
||||
"rag": {
|
||||
"total_tests": 20,
|
||||
"passed_tests": 18,
|
||||
"failed_tests": 2,
|
||||
"avg_composite_score": 4.25,
|
||||
"last_run": datetime.now().isoformat(),
|
||||
},
|
||||
"synthetic": None,
|
||||
}
|
||||
|
||||
async def get_trend(self, days: int = 30) -> Optional[Dict]:
|
||||
"""
|
||||
Holt Trend-Daten.
|
||||
|
||||
Args:
|
||||
days: Anzahl der Tage
|
||||
|
||||
Returns:
|
||||
Dict mit Trend-Daten oder None
|
||||
"""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
response = await client.get(
|
||||
f"{self.api_base}/api/v1/bqas/trend",
|
||||
params={"days": days},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Demo-Daten
|
||||
return {
|
||||
"dates": ["2026-01-02", "2026-01-09", "2026-01-16", "2026-01-23", "2026-01-30"],
|
||||
"scores": [3.9, 4.0, 4.1, 4.15, 4.15],
|
||||
"trend": "improving",
|
||||
}
|
||||
|
||||
async def get_runs(self, limit: int = 20) -> list:
|
||||
"""
|
||||
Holt die letzten Test-Runs.
|
||||
|
||||
Args:
|
||||
limit: Maximale Anzahl
|
||||
|
||||
Returns:
|
||||
Liste von Test-Runs
|
||||
"""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
response = await client.get(
|
||||
f"{self.api_base}/api/v1/bqas/runs",
|
||||
params={"limit": limit},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get("runs", [])
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Demo-Daten
|
||||
return [
|
||||
{
|
||||
"id": 1,
|
||||
"timestamp": "2026-01-30T07:00:00Z",
|
||||
"git_commit": "abc1234",
|
||||
"golden_score": 4.15,
|
||||
"total_tests": 97,
|
||||
"passed_tests": 89,
|
||||
"failed_tests": 8,
|
||||
"duration_seconds": 45.2,
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"timestamp": "2026-01-29T07:00:00Z",
|
||||
"git_commit": "def5678",
|
||||
"golden_score": 4.12,
|
||||
"total_tests": 97,
|
||||
"passed_tests": 88,
|
||||
"failed_tests": 9,
|
||||
"duration_seconds": 44.8,
|
||||
},
|
||||
]
|
||||
229
backend/api/tests/runners/go_runner.py
Normal file
229
backend/api/tests/runners/go_runner.py
Normal file
@@ -0,0 +1,229 @@
|
||||
"""
|
||||
Go Test Runner
|
||||
|
||||
Fuehrt Go-Tests aus und parsed die Ergebnisse.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class GoTestResult:
|
||||
"""Ergebnis eines einzelnen Go-Tests"""
|
||||
package: str
|
||||
test_name: str
|
||||
passed: bool
|
||||
duration_seconds: float
|
||||
output: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class GoTestSummary:
|
||||
"""Zusammenfassung eines Go-Test-Runs"""
|
||||
total: int = 0
|
||||
passed: int = 0
|
||||
failed: int = 0
|
||||
skipped: int = 0
|
||||
duration_seconds: float = 0.0
|
||||
coverage_percent: Optional[float] = None
|
||||
results: List[GoTestResult] = field(default_factory=list)
|
||||
raw_output: str = ""
|
||||
|
||||
|
||||
class GoTestRunner:
|
||||
"""
|
||||
Runner fuer Go-Tests.
|
||||
|
||||
Verwendet `go test -json` fuer strukturierte Ausgabe.
|
||||
"""
|
||||
|
||||
def __init__(self, base_path: Path):
|
||||
self.base_path = base_path
|
||||
|
||||
async def run(self, with_coverage: bool = True, timeout: int = 300) -> GoTestSummary:
|
||||
"""
|
||||
Fuehrt Go-Tests aus.
|
||||
|
||||
Args:
|
||||
with_coverage: Coverage erfassen
|
||||
timeout: Timeout in Sekunden
|
||||
|
||||
Returns:
|
||||
GoTestSummary mit allen Ergebnissen
|
||||
"""
|
||||
if not self.base_path.exists():
|
||||
return GoTestSummary(raw_output="Pfad existiert nicht")
|
||||
|
||||
cmd = ["go", "test", "-v", "-json"]
|
||||
if with_coverage:
|
||||
cmd.extend(["-cover", "-coverprofile=coverage.out"])
|
||||
cmd.append("./...")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=str(self.base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return self._parse_output(result.stdout, result.stderr)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
return GoTestSummary(raw_output=f"Timeout nach {timeout} Sekunden")
|
||||
except FileNotFoundError:
|
||||
return GoTestSummary(raw_output="Go nicht installiert")
|
||||
except Exception as e:
|
||||
return GoTestSummary(raw_output=str(e))
|
||||
|
||||
def _parse_output(self, stdout: str, stderr: str) -> GoTestSummary:
|
||||
"""Parsed die JSON-Ausgabe von go test"""
|
||||
summary = GoTestSummary(raw_output=stdout[:10000] if stdout else stderr[:10000])
|
||||
|
||||
current_test: Dict[str, str] = {}
|
||||
test_outputs: Dict[str, List[str]] = {}
|
||||
|
||||
for line in stdout.split("\n"):
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
try:
|
||||
event = json.loads(line)
|
||||
action = event.get("Action")
|
||||
package = event.get("Package", "")
|
||||
test = event.get("Test", "")
|
||||
elapsed = event.get("Elapsed", 0)
|
||||
output = event.get("Output", "")
|
||||
|
||||
# Test-Output sammeln
|
||||
if test and output:
|
||||
key = f"{package}:{test}"
|
||||
if key not in test_outputs:
|
||||
test_outputs[key] = []
|
||||
test_outputs[key].append(output)
|
||||
|
||||
# Test-Ergebnis
|
||||
if action == "pass" and test:
|
||||
summary.passed += 1
|
||||
summary.total += 1
|
||||
summary.results.append(GoTestResult(
|
||||
package=package,
|
||||
test_name=test,
|
||||
passed=True,
|
||||
duration_seconds=elapsed,
|
||||
output="".join(test_outputs.get(f"{package}:{test}", [])),
|
||||
))
|
||||
|
||||
elif action == "fail" and test:
|
||||
summary.failed += 1
|
||||
summary.total += 1
|
||||
summary.results.append(GoTestResult(
|
||||
package=package,
|
||||
test_name=test,
|
||||
passed=False,
|
||||
duration_seconds=elapsed,
|
||||
output="".join(test_outputs.get(f"{package}:{test}", [])),
|
||||
))
|
||||
|
||||
elif action == "skip" and test:
|
||||
summary.skipped += 1
|
||||
summary.total += 1
|
||||
|
||||
# Package-Ergebnis (Gesamtdauer)
|
||||
elif action in ["pass", "fail"] and not test and elapsed:
|
||||
summary.duration_seconds = max(summary.duration_seconds, elapsed)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Nicht-JSON-Zeilen ignorieren (z.B. Coverage-Output)
|
||||
if "coverage:" in line.lower():
|
||||
# z.B. "coverage: 75.2% of statements"
|
||||
try:
|
||||
parts = line.split("coverage:")
|
||||
if len(parts) > 1:
|
||||
percent_str = parts[1].strip().split("%")[0]
|
||||
summary.coverage_percent = float(percent_str)
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
|
||||
return summary
|
||||
|
||||
async def run_single_test(self, test_name: str, timeout: int = 60) -> GoTestResult:
|
||||
"""
|
||||
Fuehrt einen einzelnen Test aus.
|
||||
|
||||
Args:
|
||||
test_name: Name des Tests (z.B. "TestMyFunction")
|
||||
timeout: Timeout in Sekunden
|
||||
|
||||
Returns:
|
||||
GoTestResult fuer den spezifischen Test
|
||||
"""
|
||||
cmd = ["go", "test", "-v", "-run", test_name, "./..."]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=str(self.base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
passed = "PASS" in result.stdout
|
||||
return GoTestResult(
|
||||
package=str(self.base_path),
|
||||
test_name=test_name,
|
||||
passed=passed,
|
||||
duration_seconds=0.0,
|
||||
output=result.stdout + result.stderr,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return GoTestResult(
|
||||
package=str(self.base_path),
|
||||
test_name=test_name,
|
||||
passed=False,
|
||||
duration_seconds=0.0,
|
||||
output=str(e),
|
||||
)
|
||||
|
||||
async def get_coverage_report(self) -> Optional[Dict]:
|
||||
"""
|
||||
Liest den Coverage-Bericht.
|
||||
|
||||
Returns:
|
||||
Dict mit Coverage-Details oder None
|
||||
"""
|
||||
coverage_file = self.base_path / "coverage.out"
|
||||
if not coverage_file.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["go", "tool", "cover", "-func=coverage.out"],
|
||||
cwd=str(self.base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
# Parse "total:" Zeile
|
||||
for line in result.stdout.split("\n"):
|
||||
if "total:" in line:
|
||||
parts = line.split()
|
||||
if len(parts) >= 3:
|
||||
percent_str = parts[-1].replace("%", "")
|
||||
return {
|
||||
"total_coverage": float(percent_str),
|
||||
"raw_output": result.stdout,
|
||||
}
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
266
backend/api/tests/runners/python_runner.py
Normal file
266
backend/api/tests/runners/python_runner.py
Normal file
@@ -0,0 +1,266 @@
|
||||
"""
|
||||
Python Test Runner (pytest)
|
||||
|
||||
Fuehrt Python-Tests aus und parsed die Ergebnisse.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class PytestResult:
|
||||
"""Ergebnis eines einzelnen pytest-Tests"""
|
||||
node_id: str
|
||||
test_name: str
|
||||
file_path: str
|
||||
passed: bool
|
||||
duration_seconds: float
|
||||
output: str = ""
|
||||
error_message: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class PytestSummary:
|
||||
"""Zusammenfassung eines pytest-Runs"""
|
||||
total: int = 0
|
||||
passed: int = 0
|
||||
failed: int = 0
|
||||
skipped: int = 0
|
||||
errors: int = 0
|
||||
duration_seconds: float = 0.0
|
||||
coverage_percent: Optional[float] = None
|
||||
results: List[PytestResult] = field(default_factory=list)
|
||||
raw_output: str = ""
|
||||
|
||||
|
||||
class PytestRunner:
|
||||
"""
|
||||
Runner fuer Python-Tests mit pytest.
|
||||
|
||||
Verwendet `pytest --json-report` fuer strukturierte Ausgabe.
|
||||
"""
|
||||
|
||||
def __init__(self, base_path: Path, venv_path: Optional[Path] = None):
|
||||
self.base_path = base_path
|
||||
self.venv_path = venv_path
|
||||
|
||||
def _get_python_cmd(self) -> str:
|
||||
"""Gibt den Python-Befehl zurueck (aus venv wenn vorhanden)"""
|
||||
if self.venv_path and (self.venv_path / "bin" / "python").exists():
|
||||
return str(self.venv_path / "bin" / "python")
|
||||
return "python"
|
||||
|
||||
async def run(self, with_coverage: bool = True, timeout: int = 300) -> PytestSummary:
|
||||
"""
|
||||
Fuehrt pytest aus.
|
||||
|
||||
Args:
|
||||
with_coverage: Coverage erfassen mit pytest-cov
|
||||
timeout: Timeout in Sekunden
|
||||
|
||||
Returns:
|
||||
PytestSummary mit allen Ergebnissen
|
||||
"""
|
||||
if not self.base_path.exists():
|
||||
return PytestSummary(raw_output="Pfad existiert nicht")
|
||||
|
||||
python_cmd = self._get_python_cmd()
|
||||
cmd = [python_cmd, "-m", "pytest", "-v", "--tb=short"]
|
||||
|
||||
if with_coverage:
|
||||
cmd.extend(["--cov=.", "--cov-report=term-missing"])
|
||||
|
||||
cmd.append(str(self.base_path))
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return self._parse_output(result.stdout, result.stderr)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
return PytestSummary(raw_output=f"Timeout nach {timeout} Sekunden")
|
||||
except FileNotFoundError:
|
||||
return PytestSummary(raw_output="Python/pytest nicht installiert")
|
||||
except Exception as e:
|
||||
return PytestSummary(raw_output=str(e))
|
||||
|
||||
def _parse_output(self, stdout: str, stderr: str) -> PytestSummary:
|
||||
"""Parsed die pytest-Ausgabe"""
|
||||
output = stdout + stderr
|
||||
summary = PytestSummary(raw_output=output[:10000])
|
||||
|
||||
# Teste-Zeilen parsen (z.B. "test_file.py::test_name PASSED")
|
||||
test_pattern = re.compile(r"([\w/]+\.py)::(\w+)(?:\[.+\])?\s+(PASSED|FAILED|SKIPPED|ERROR)")
|
||||
|
||||
for match in test_pattern.finditer(output):
|
||||
file_path, test_name, status = match.groups()
|
||||
|
||||
result = PytestResult(
|
||||
node_id=f"{file_path}::{test_name}",
|
||||
test_name=test_name,
|
||||
file_path=file_path,
|
||||
passed=status == "PASSED",
|
||||
duration_seconds=0.0,
|
||||
)
|
||||
summary.results.append(result)
|
||||
|
||||
if status == "PASSED":
|
||||
summary.passed += 1
|
||||
elif status == "FAILED":
|
||||
summary.failed += 1
|
||||
elif status == "SKIPPED":
|
||||
summary.skipped += 1
|
||||
elif status == "ERROR":
|
||||
summary.errors += 1
|
||||
|
||||
summary.total = len(summary.results)
|
||||
|
||||
# Zusammenfassung parsen (z.B. "5 passed, 2 failed in 3.45s")
|
||||
summary_pattern = re.compile(
|
||||
r"=+\s*(?:(\d+)\s+passed)?[,\s]*(?:(\d+)\s+failed)?[,\s]*(?:(\d+)\s+skipped)?[,\s]*(?:(\d+)\s+error)?.*?in\s+([\d.]+)s"
|
||||
)
|
||||
match = summary_pattern.search(output)
|
||||
if match:
|
||||
if match.group(1):
|
||||
summary.passed = int(match.group(1))
|
||||
if match.group(2):
|
||||
summary.failed = int(match.group(2))
|
||||
if match.group(3):
|
||||
summary.skipped = int(match.group(3))
|
||||
if match.group(4):
|
||||
summary.errors = int(match.group(4))
|
||||
if match.group(5):
|
||||
summary.duration_seconds = float(match.group(5))
|
||||
|
||||
summary.total = summary.passed + summary.failed + summary.skipped + summary.errors
|
||||
|
||||
# Coverage parsen (z.B. "TOTAL 1234 567 54%")
|
||||
coverage_pattern = re.compile(r"TOTAL\s+\d+\s+\d+\s+(\d+)%")
|
||||
coverage_match = coverage_pattern.search(output)
|
||||
if coverage_match:
|
||||
summary.coverage_percent = float(coverage_match.group(1))
|
||||
|
||||
return summary
|
||||
|
||||
async def run_single_test(self, test_path: str, timeout: int = 60) -> PytestResult:
|
||||
"""
|
||||
Fuehrt einen einzelnen Test aus.
|
||||
|
||||
Args:
|
||||
test_path: Pfad zum Test (z.B. "test_file.py::test_name")
|
||||
timeout: Timeout in Sekunden
|
||||
|
||||
Returns:
|
||||
PytestResult fuer den spezifischen Test
|
||||
"""
|
||||
python_cmd = self._get_python_cmd()
|
||||
cmd = [python_cmd, "-m", "pytest", "-v", test_path]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=str(self.base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
passed = "passed" in result.stdout.lower() and "failed" not in result.stdout.lower()
|
||||
|
||||
return PytestResult(
|
||||
node_id=test_path,
|
||||
test_name=test_path.split("::")[-1] if "::" in test_path else test_path,
|
||||
file_path=test_path.split("::")[0] if "::" in test_path else test_path,
|
||||
passed=passed,
|
||||
duration_seconds=0.0,
|
||||
output=result.stdout + result.stderr,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return PytestResult(
|
||||
node_id=test_path,
|
||||
test_name=test_path,
|
||||
file_path="",
|
||||
passed=False,
|
||||
duration_seconds=0.0,
|
||||
output=str(e),
|
||||
)
|
||||
|
||||
async def get_coverage_report(self, format: str = "term") -> Optional[Dict]:
|
||||
"""
|
||||
Generiert einen Coverage-Bericht.
|
||||
|
||||
Args:
|
||||
format: "term", "html", oder "xml"
|
||||
|
||||
Returns:
|
||||
Dict mit Coverage-Details oder None
|
||||
"""
|
||||
python_cmd = self._get_python_cmd()
|
||||
cmd = [python_cmd, "-m", "pytest", "--cov=.", f"--cov-report={format}"]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=str(self.base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=120,
|
||||
)
|
||||
|
||||
# Parse "TOTAL" Zeile
|
||||
coverage_pattern = re.compile(r"TOTAL\s+\d+\s+\d+\s+(\d+)%")
|
||||
match = coverage_pattern.search(result.stdout)
|
||||
|
||||
if match:
|
||||
return {
|
||||
"total_coverage": float(match.group(1)),
|
||||
"format": format,
|
||||
"raw_output": result.stdout,
|
||||
}
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
async def list_tests(self) -> List[str]:
|
||||
"""
|
||||
Listet alle verfuegbaren Tests auf.
|
||||
|
||||
Returns:
|
||||
Liste von Test-IDs
|
||||
"""
|
||||
python_cmd = self._get_python_cmd()
|
||||
cmd = [python_cmd, "-m", "pytest", "--collect-only", "-q"]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=str(self.base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
tests = []
|
||||
for line in result.stdout.split("\n"):
|
||||
line = line.strip()
|
||||
if "::" in line and not line.startswith("<"):
|
||||
tests.append(line)
|
||||
|
||||
return tests
|
||||
|
||||
except Exception:
|
||||
return []
|
||||
Reference in New Issue
Block a user