fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
335
backend/api/tests/registry/routes/tests.py
Normal file
335
backend/api/tests/registry/routes/tests.py
Normal file
@@ -0,0 +1,335 @@
|
||||
"""
|
||||
Test Registry - Test Endpoints
|
||||
|
||||
Endpoints for test discovery, running, and monitoring.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any
|
||||
|
||||
from fastapi import APIRouter, HTTPException, BackgroundTasks
|
||||
|
||||
from ...models import (
|
||||
TestFramework,
|
||||
TestRegistryStats,
|
||||
SERVICE_DEFINITIONS,
|
||||
)
|
||||
from ..api_models import TestRunResponse, RegistryResponse
|
||||
from ..config import (
|
||||
PROJECT_ROOT,
|
||||
RUN_MODE,
|
||||
check_go_available,
|
||||
check_pytest_available,
|
||||
get_go_version,
|
||||
get_pytest_version,
|
||||
get_test_runs,
|
||||
get_current_runs,
|
||||
get_running_tests,
|
||||
)
|
||||
from ..discovery import (
|
||||
build_service_info,
|
||||
discover_go_tests,
|
||||
discover_python_tests,
|
||||
discover_bqas_tests,
|
||||
)
|
||||
from ..executors import execute_test_run
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/registry", response_model=RegistryResponse)
|
||||
async def get_test_registry():
|
||||
"""
|
||||
Gibt alle registrierten Tests zurueck.
|
||||
Scannt alle Services und aggregiert Test-Informationen.
|
||||
"""
|
||||
services = []
|
||||
stats = TestRegistryStats()
|
||||
|
||||
by_category: Dict[str, int] = {}
|
||||
by_framework: Dict[str, int] = {}
|
||||
|
||||
for service_def in SERVICE_DEFINITIONS:
|
||||
info = build_service_info(service_def)
|
||||
|
||||
services.append({
|
||||
"service": info.service,
|
||||
"display_name": info.display_name,
|
||||
"port": info.port,
|
||||
"language": info.language,
|
||||
"total_tests": info.total_tests,
|
||||
"passed_tests": info.passed_tests,
|
||||
"failed_tests": info.failed_tests,
|
||||
"skipped_tests": info.skipped_tests,
|
||||
"pass_rate": round(info.pass_rate, 1),
|
||||
"coverage_percent": round(info.coverage_percent, 1) if info.coverage_percent else None,
|
||||
"last_run": info.last_run.isoformat() if info.last_run else None,
|
||||
"status": info.status.value,
|
||||
})
|
||||
|
||||
stats.total_tests += info.total_tests
|
||||
stats.total_passed += info.passed_tests
|
||||
stats.total_failed += info.failed_tests
|
||||
stats.total_skipped += info.skipped_tests
|
||||
|
||||
# Framework-Stats
|
||||
framework_name = service_def["framework"].value
|
||||
by_framework[framework_name] = by_framework.get(framework_name, 0) + info.total_tests
|
||||
|
||||
# Category basierend auf Framework
|
||||
if service_def["framework"] in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
by_category["bqas"] = by_category.get("bqas", 0) + info.total_tests
|
||||
elif service_def["framework"] == TestFramework.PLAYWRIGHT:
|
||||
by_category["e2e"] = by_category.get("e2e", 0) + info.total_tests
|
||||
else:
|
||||
by_category["unit"] = by_category.get("unit", 0) + info.total_tests
|
||||
|
||||
stats.services_count = len(services)
|
||||
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0.0
|
||||
stats.by_category = by_category
|
||||
stats.by_framework = by_framework
|
||||
|
||||
return RegistryResponse(
|
||||
services=services,
|
||||
stats={
|
||||
"total_tests": stats.total_tests,
|
||||
"total_passed": stats.total_passed,
|
||||
"total_failed": stats.total_failed,
|
||||
"total_skipped": stats.total_skipped,
|
||||
"overall_pass_rate": round(stats.overall_pass_rate, 1),
|
||||
"services_count": stats.services_count,
|
||||
"by_category": stats.by_category,
|
||||
"by_framework": stats.by_framework,
|
||||
},
|
||||
last_updated=datetime.now().isoformat(),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/registry/{service}")
|
||||
async def get_service_tests(service: str):
|
||||
"""
|
||||
Gibt Tests fuer einen spezifischen Service zurueck.
|
||||
"""
|
||||
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == service), None)
|
||||
if not service_def:
|
||||
raise HTTPException(status_code=404, detail=f"Service '{service}' nicht gefunden")
|
||||
|
||||
info = build_service_info(service_def)
|
||||
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
|
||||
framework = service_def["framework"]
|
||||
|
||||
# Test-Discovery
|
||||
if framework == TestFramework.GO_TEST:
|
||||
tests = discover_go_tests(base_path)
|
||||
elif framework == TestFramework.PYTEST:
|
||||
tests = discover_python_tests(base_path)
|
||||
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
|
||||
tests = discover_bqas_tests(base_path, test_type)
|
||||
else:
|
||||
tests = []
|
||||
|
||||
return {
|
||||
"service": info.service,
|
||||
"display_name": info.display_name,
|
||||
"port": info.port,
|
||||
"language": info.language,
|
||||
"total_tests": len(tests),
|
||||
"passed_tests": info.passed_tests,
|
||||
"failed_tests": info.failed_tests,
|
||||
"coverage_percent": info.coverage_percent,
|
||||
"tests": [
|
||||
{
|
||||
"id": t.id,
|
||||
"name": t.name,
|
||||
"file_path": t.file_path,
|
||||
"line_number": t.line_number,
|
||||
"framework": t.framework.value,
|
||||
"status": t.status.value,
|
||||
}
|
||||
for t in tests
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@router.post("/run/{suite}", response_model=TestRunResponse)
|
||||
async def run_test_suite(suite: str, background_tasks: BackgroundTasks):
|
||||
"""
|
||||
Startet einen Test-Run fuer eine Suite.
|
||||
Fuehrt Tests im Hintergrund aus.
|
||||
"""
|
||||
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == suite), None)
|
||||
if not service_def:
|
||||
raise HTTPException(status_code=404, detail=f"Suite '{suite}' nicht gefunden")
|
||||
|
||||
run_id = f"run_{suite}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||
|
||||
# Background Task starten
|
||||
background_tasks.add_task(execute_test_run, run_id, service_def)
|
||||
|
||||
return TestRunResponse(
|
||||
run_id=run_id,
|
||||
status="queued",
|
||||
message=f"Test-Run fuer {service_def['display_name']} gestartet",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/runs")
|
||||
async def get_test_runs_list(limit: int = 20):
|
||||
"""
|
||||
Gibt die Test-Run Historie zurueck.
|
||||
"""
|
||||
test_runs = get_test_runs()
|
||||
# Sortiert nach Startzeit, neueste zuerst
|
||||
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
|
||||
return {"runs": sorted_runs[:limit]}
|
||||
|
||||
|
||||
@router.get("/progress/{service_id}")
|
||||
async def get_test_progress(service_id: str):
|
||||
"""
|
||||
Gibt den Fortschritt eines laufenden Tests zurueck.
|
||||
Wird vom Frontend gepollt um Live-Updates anzuzeigen.
|
||||
"""
|
||||
running_tests = get_running_tests()
|
||||
if service_id in running_tests:
|
||||
return running_tests[service_id]
|
||||
|
||||
# Kein laufender Test - Standard-Antwort
|
||||
return {
|
||||
"current_file": "",
|
||||
"files_done": 0,
|
||||
"files_total": 0,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "idle"
|
||||
}
|
||||
|
||||
|
||||
@router.get("/progress")
|
||||
async def get_all_progress():
|
||||
"""
|
||||
Gibt den Fortschritt aller laufenden Tests zurueck.
|
||||
"""
|
||||
return get_running_tests()
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}")
|
||||
async def get_test_run(run_id: str):
|
||||
"""
|
||||
Gibt Details zu einem spezifischen Test-Run zurueck.
|
||||
"""
|
||||
current_runs = get_current_runs()
|
||||
test_runs = get_test_runs()
|
||||
|
||||
if run_id in current_runs:
|
||||
run = current_runs[run_id]
|
||||
return {
|
||||
"id": run.id,
|
||||
"suite_id": run.suite_id,
|
||||
"service": run.service,
|
||||
"started_at": run.started_at.isoformat(),
|
||||
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
|
||||
"status": run.status.value,
|
||||
"total_tests": run.total_tests,
|
||||
"passed_tests": run.passed_tests,
|
||||
"failed_tests": run.failed_tests,
|
||||
"duration_seconds": run.duration_seconds,
|
||||
"output": run.output,
|
||||
}
|
||||
|
||||
# In Historie suchen
|
||||
for run in test_runs:
|
||||
if run["id"] == run_id:
|
||||
return run
|
||||
|
||||
raise HTTPException(status_code=404, detail=f"Run '{run_id}' nicht gefunden")
|
||||
|
||||
|
||||
@router.get("/coverage")
|
||||
async def get_coverage():
|
||||
"""
|
||||
Gibt aggregierte Coverage-Informationen zurueck.
|
||||
"""
|
||||
coverage_data = []
|
||||
total_coverage = 0.0
|
||||
count = 0
|
||||
|
||||
for service_def in SERVICE_DEFINITIONS:
|
||||
info = build_service_info(service_def)
|
||||
if info.coverage_percent:
|
||||
coverage_data.append({
|
||||
"service": info.service,
|
||||
"display_name": info.display_name,
|
||||
"coverage_percent": round(info.coverage_percent, 1),
|
||||
"language": info.language,
|
||||
})
|
||||
total_coverage += info.coverage_percent
|
||||
count += 1
|
||||
|
||||
return {
|
||||
"services": coverage_data,
|
||||
"average_coverage": round(total_coverage / count, 1) if count > 0 else 0,
|
||||
"total_services": count,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def get_test_health():
|
||||
"""
|
||||
Gibt den Status der Test-Infrastruktur zurueck.
|
||||
"""
|
||||
go_available = check_go_available()
|
||||
pytest_available = check_pytest_available()
|
||||
|
||||
return {
|
||||
"status": "healthy",
|
||||
"mode": RUN_MODE, # "docker", "local", oder "demo"
|
||||
"services_monitored": len(SERVICE_DEFINITIONS),
|
||||
"project_root": str(PROJECT_ROOT),
|
||||
"project_root_exists": PROJECT_ROOT.exists(),
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"runners": {
|
||||
"go_test": "available" if go_available else "not_installed",
|
||||
"pytest": "available" if pytest_available else "not_installed",
|
||||
"jest": "available", # TODO: check Node.js
|
||||
"playwright": "available", # TODO: check Playwright
|
||||
"bqas": "available", # BQAS hat seinen eigenen Service
|
||||
},
|
||||
"versions": {
|
||||
"go": get_go_version() if go_available else None,
|
||||
"pytest": get_pytest_version() if pytest_available else None,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@router.get("/db-status")
|
||||
async def get_db_status():
|
||||
"""
|
||||
Gibt den Status der PostgreSQL-Datenbankverbindung zurueck.
|
||||
Wird vom Dashboard ServiceStatus verwendet.
|
||||
"""
|
||||
import time
|
||||
from ...database import check_db_connection, DATABASE_URL
|
||||
|
||||
start_time = time.time()
|
||||
is_connected = check_db_connection()
|
||||
response_time = int((time.time() - start_time) * 1000)
|
||||
|
||||
# Parse host from DATABASE_URL (hide password)
|
||||
try:
|
||||
# postgresql://user:pass@host:port/db -> host:port
|
||||
url_parts = DATABASE_URL.split("@")
|
||||
if len(url_parts) > 1:
|
||||
host_part = url_parts[1].split("/")[0]
|
||||
else:
|
||||
host_part = "unknown"
|
||||
except:
|
||||
host_part = "unknown"
|
||||
|
||||
return {
|
||||
"status": "online" if is_connected else "offline",
|
||||
"host": host_part,
|
||||
"response_time_ms": response_time,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
}
|
||||
Reference in New Issue
Block a user