fix: Restore all files lost during destructive rebase

A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-02-09 09:51:32 +01:00
parent f7487ee240
commit bfdaf63ba9
2009 changed files with 749983 additions and 1731 deletions

View File

@@ -0,0 +1,21 @@
"""
Test Registry Routes Module
All API endpoints for the test registry.
"""
from fastapi import APIRouter
from .tests import router as tests_router
from .backlog import router as backlog_router
from .ci import router as ci_router
# Create main router
router = APIRouter(prefix="/api/tests", tags=["Test Registry"])
# Include sub-routers
router.include_router(tests_router)
router.include_router(backlog_router)
router.include_router(ci_router)
__all__ = ["router"]

View File

@@ -0,0 +1,580 @@
"""
Test Registry - Backlog Endpoints
Endpoints for failed test backlog management.
"""
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, HTTPException, Query
from ...database import get_db_session
from ...repository import TestRepository
from ..api_models import (
BacklogStatusUpdate,
BacklogPriorityUpdate,
FixAttempt,
ManualBacklogEntry,
)
from ..config import (
get_test_runs,
get_persisted_results,
is_postgres_available,
migrate_json_to_postgres,
)
router = APIRouter()
@router.get("/failed")
async def get_failed_tests():
"""
Gibt alle fehlgeschlagenen Tests aus den persistierten Ergebnissen zurueck.
Fuer Backlog-Verwaltung mit menschenverstaendlichen Fehlerbeschreibungen.
"""
persisted_results = get_persisted_results()
failed_tests = []
# Sammle fehlgeschlagene Tests aus persistierten Ergebnissen
for service, data in persisted_results.items():
run_time = data.get("last_run", "")
run_id = f"persisted_{service}"
# Hole fehlgeschlagene Test-IDs
for failed in data.get("failed_test_ids", []):
if isinstance(failed, dict):
failed_tests.append({
"id": failed.get("id", ""),
"name": failed.get("name", ""),
"service": service,
"file_path": failed.get("file_path", ""),
"line_number": failed.get("line_number"),
"error_message": failed.get("error_message", "Keine Fehlermeldung verfuegbar"),
"error_type": failed.get("error_type", "unknown"),
"suggestion": failed.get("suggestion", ""),
"run_id": run_id,
"last_failed": run_time,
"status": "open", # open, in_progress, fixed
})
elif isinstance(failed, str):
# Legacy-Format: nur Test-ID als String
failed_tests.append({
"id": failed,
"name": failed,
"service": service,
"file_path": "",
"line_number": None,
"error_message": "Keine Details verfuegbar",
"error_type": "unknown",
"suggestion": "",
"run_id": run_id,
"last_failed": run_time,
"status": "open",
})
# Dedupliziere nach Test-ID (nur neueste Version behalten)
seen = {}
for test in failed_tests:
test_id = test["id"]
if test_id not in seen or test["last_failed"] > seen[test_id]["last_failed"]:
seen[test_id] = test
unique_failed = list(seen.values())
# Gruppiere nach Service
by_service = {}
for test in unique_failed:
service = test["service"]
if service not in by_service:
by_service[service] = []
by_service[service].append(test)
return {
"total_failed": len(unique_failed),
"by_service": by_service,
"tests": unique_failed,
"last_updated": datetime.now().isoformat(),
}
@router.post("/failed/{test_id}/status")
async def update_failed_test_status(test_id: str, status: str):
"""
Aktualisiert den Status eines fehlgeschlagenen Tests.
Status: 'open', 'in_progress', 'fixed', 'wont_fix'
Legacy-Endpoint - nutzt nun PostgreSQL wenn verfuegbar.
"""
valid_statuses = ["open", "in_progress", "fixed", "wont_fix", "flaky"]
if status not in valid_statuses:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Status. Erlaubt: {', '.join(valid_statuses)}"
)
# Versuche in PostgreSQL zu speichern
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
# Suche nach Backlog-Item mit test_id
backlog_items = repo.get_backlog()
for item in backlog_items:
if item.test_name == test_id or str(item.id) == test_id:
repo.update_backlog_status(item.id, status)
return {
"test_id": test_id,
"backlog_id": item.id,
"status": status,
"updated_at": datetime.now().isoformat(),
"message": f"Test-Status auf '{status}' gesetzt (PostgreSQL)",
}
except Exception as e:
print(f"PostgreSQL-Fehler: {e}")
# Fallback: nur Bestaetigung zurueckgeben
return {
"test_id": test_id,
"status": status,
"updated_at": datetime.now().isoformat(),
"message": f"Test-Status auf '{status}' gesetzt",
}
@router.get("/backlog")
async def get_backlog(
status: Optional[str] = Query(None, description="Filter nach Status: open, in_progress, fixed, wont_fix, flaky"),
service: Optional[str] = Query(None, description="Filter nach Service"),
priority: Optional[str] = Query(None, description="Filter nach Prioritaet: critical, high, medium, low"),
limit: int = Query(100, ge=1, le=500),
offset: int = Query(0, ge=0)
):
"""
Gibt den persistenten Backlog fehlgeschlagener Tests zurueck.
Der Backlog aggregiert fehlgeschlagene Tests ueber mehrere Runs hinweg
und ermoeglicht Status-Management (open -> in_progress -> fixed).
"""
if not is_postgres_available():
# Fallback auf legacy /failed Endpoint
return await get_failed_tests()
try:
with get_db_session() as db:
repo = TestRepository(db)
items = repo.get_backlog(
status=status,
service=service,
priority=priority,
limit=limit,
offset=offset
)
total = repo.get_backlog_count(status=status, service=service)
# Gruppiere nach Service
by_service = {}
for item in items:
svc = item.service
if svc not in by_service:
by_service[svc] = []
by_service[svc].append(item.to_dict())
return {
"total": total,
"items": [item.to_dict() for item in items],
"by_service": by_service,
"filters": {
"status": status,
"service": service,
"priority": priority
},
"pagination": {
"limit": limit,
"offset": offset
}
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/backlog/{backlog_id}")
async def get_backlog_item(backlog_id: int):
"""
Gibt Details zu einem einzelnen Backlog-Eintrag zurueck.
Inklusive Fix-Historie.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.get_backlog_item(backlog_id)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
# Hole Fix-Historie
fixes = repo.get_fix_history(backlog_id)
result = item.to_dict()
result["fixes"] = [fix.to_dict() for fix in fixes]
return result
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/status")
async def update_backlog_item_status(backlog_id: int, update: BacklogStatusUpdate):
"""
Aktualisiert den Status eines Backlog-Eintrags.
Moegliche Status:
- open: Noch nicht bearbeitet
- in_progress: Wird gerade bearbeitet
- fixed: Test wurde gefixt
- wont_fix: Wird nicht gefixt (mit Begruendung)
- flaky: Flaky Test, wird separat behandelt
"""
valid_statuses = ["open", "in_progress", "fixed", "wont_fix", "flaky"]
if update.status not in valid_statuses:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Status. Erlaubt: {', '.join(valid_statuses)}"
)
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.update_backlog_status(
backlog_id=backlog_id,
status=update.status,
notes=update.notes,
assigned_to=update.assigned_to
)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
return item.to_dict()
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/priority")
async def update_backlog_item_priority(backlog_id: int, update: BacklogPriorityUpdate):
"""
Aktualisiert die Prioritaet eines Backlog-Eintrags.
Moegliche Prioritaeten:
- critical: Kritisch - sofort beheben
- high: Hoch - bald beheben
- medium: Mittel - bei Gelegenheit
- low: Niedrig - irgendwann
"""
valid_priorities = ["critical", "high", "medium", "low"]
if update.priority not in valid_priorities:
raise HTTPException(
status_code=400,
detail=f"Ungueltige Prioritaet. Erlaubt: {', '.join(valid_priorities)}"
)
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.update_backlog_priority(backlog_id, update.priority)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
return item.to_dict()
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/fix")
async def add_fix_attempt(backlog_id: int, fix: FixAttempt):
"""
Fuegt einen Fix-Versuch zur Historie hinzu.
Bei success=True wird der Backlog-Status automatisch auf 'fixed' gesetzt.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
valid_fix_types = ["manual", "auto_claude", "auto_script"]
if fix.fix_type not in valid_fix_types:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Fix-Typ. Erlaubt: {', '.join(valid_fix_types)}"
)
try:
with get_db_session() as db:
repo = TestRepository(db)
# Pruefe ob Backlog-Item existiert
item = repo.get_backlog_item(backlog_id)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
# Fix-Versuch hinzufuegen
fix_record = repo.add_fix_attempt(
backlog_id=backlog_id,
fix_type=fix.fix_type,
fix_description=fix.fix_description,
commit_hash=fix.commit_hash,
success=fix.success
)
return {
"fix": fix_record.to_dict(),
"backlog_status": "fixed" if fix.success else item.status
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog")
async def create_backlog_entry(entry: ManualBacklogEntry):
"""
Erstellt einen manuellen Backlog-Eintrag.
Nuetzlich fuer:
- Nicht-integrierte Features (xfail Tests)
- Bekannte Probleme die noch behoben werden muessen
- Feature Requests aus dem Test-Kontext
"""
from ...db_models import FailedTestBacklogDB
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
valid_priorities = ["critical", "high", "medium", "low"]
if entry.priority not in valid_priorities:
raise HTTPException(
status_code=400,
detail=f"Ungueltige Prioritaet. Erlaubt: {', '.join(valid_priorities)}"
)
try:
with get_db_session() as db:
now = datetime.utcnow()
# Pruefe ob schon ein offener Eintrag existiert
existing = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.test_name == entry.test_name,
FailedTestBacklogDB.service == entry.service,
FailedTestBacklogDB.status == "open"
).first()
if existing:
# Aktualisiere existierenden Eintrag
existing.error_message = entry.error_message
existing.priority = entry.priority
existing.fix_suggestion = entry.fix_suggestion
existing.last_failed_at = now
db.commit()
return {
"id": existing.id,
"status": "updated",
"message": f"Existierender Backlog-Eintrag aktualisiert"
}
# Neuen Eintrag erstellen
backlog = FailedTestBacklogDB(
test_name=entry.test_name,
test_file=f"{entry.service}/",
service=entry.service,
framework="manual",
error_message=entry.error_message,
error_type="feature_not_integrated",
status="open",
priority=entry.priority,
fix_suggestion=entry.fix_suggestion,
first_failed_at=now,
last_failed_at=now,
failure_count=1
)
db.add(backlog)
db.commit()
db.refresh(backlog)
return {
"id": backlog.id,
"status": "created",
"message": f"Backlog-Eintrag erstellt: {entry.test_name}"
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/history")
async def get_test_history(
service: Optional[str] = Query(None, description="Filter nach Service"),
days: int = Query(30, ge=1, le=365, description="Anzahl Tage zurueck"),
limit: int = Query(100, ge=1, le=1000)
):
"""
Gibt die Test-Run Historie fuer Trend-Analysen zurueck.
Aggregiert Daten nach Tag und Service.
"""
test_runs = get_test_runs()
if not is_postgres_available():
# Fallback auf In-Memory Historie
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit], "source": "memory"}
try:
with get_db_session() as db:
repo = TestRepository(db)
history = repo.get_run_history(
service=service,
days=days,
limit=limit
)
return {
"history": history,
"days": days,
"service": service,
"source": "postgresql"
}
except Exception as e:
# Fallback auf In-Memory
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit], "source": "memory", "error": str(e)}
@router.get("/trends")
async def get_test_trends(
service: Optional[str] = Query(None, description="Filter nach Service"),
days: int = Query(14, ge=1, le=90, description="Anzahl Tage")
):
"""
Gibt Trend-Daten fuer Visualisierungen zurueck.
Zeigt Pass-Rate und Test-Anzahl ueber Zeit.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar fuer Trends")
try:
with get_db_session() as db:
repo = TestRepository(db)
history = repo.get_run_history(service=service, days=days, limit=days * 20)
# Aggregiere nach Tag
by_date = {}
for entry in history:
date = entry["date"]
if date not in by_date:
by_date[date] = {
"date": date,
"total_tests": 0,
"passed": 0,
"failed": 0,
"runs": 0
}
by_date[date]["total_tests"] += entry["total_tests"]
by_date[date]["passed"] += entry["passed"]
by_date[date]["failed"] += entry["failed"]
by_date[date]["runs"] += entry["runs"]
# Berechne Pass-Rate pro Tag
trends = []
for date, data in sorted(by_date.items()):
total = data["total_tests"]
data["pass_rate"] = round((data["passed"] / total * 100) if total > 0 else 0, 1)
trends.append(data)
return {
"trends": trends,
"days": days,
"service": service
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/stats")
async def get_aggregated_stats():
"""
Gibt aggregierte Statistiken ueber alle Services zurueck.
Kombiniert Daten aus PostgreSQL und Service-Definitionen.
"""
from ...models import TestRegistryStats
persisted_results = get_persisted_results()
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
summary = repo.get_summary_stats()
service_stats = repo.get_all_service_stats()
return {
"summary": summary,
"services": [s.to_dict() for s in service_stats],
"source": "postgresql"
}
except Exception as e:
print(f"PostgreSQL-Fehler: {e}")
# Fallback auf Legacy-Daten
stats = TestRegistryStats()
for service, data in persisted_results.items():
stats.total_tests += data.get("total", 0)
stats.total_passed += data.get("passed", 0)
stats.total_failed += data.get("failed", 0)
stats.services_count = len(persisted_results)
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0
return {
"summary": {
"total_tests": stats.total_tests,
"total_passed": stats.total_passed,
"total_failed": stats.total_failed,
"total_skipped": stats.total_skipped,
"services_count": stats.services_count,
"overall_pass_rate": round(stats.overall_pass_rate, 1)
},
"services": list(persisted_results.keys()),
"source": "memory"
}
@router.post("/migrate")
async def trigger_migration():
"""
Migriert bestehende JSON-Daten nach PostgreSQL.
Einmalig ausfuehren um historische Daten zu uebernehmen.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
count = migrate_json_to_postgres()
return {
"migrated_services": count,
"message": f"{count} Services von JSON nach PostgreSQL migriert"
}

View File

@@ -0,0 +1,295 @@
"""
Test Registry - CI/CD Integration Endpoints
Endpoints for receiving results from CI/CD pipelines.
"""
from datetime import datetime
from typing import Dict
from fastapi import APIRouter, BackgroundTasks
from ...database import get_db_session
from ...repository import TestRepository
from ..api_models import CIResultRequest
from ..config import (
get_test_runs,
get_persisted_results,
is_postgres_available,
)
router = APIRouter()
@router.post("/ci-result")
async def receive_ci_result(result: CIResultRequest, background_tasks: BackgroundTasks):
"""
Empfaengt Test-Ergebnisse von der CI/CD-Pipeline.
Wird vom report-test-results Step in .woodpecker/main.yml aufgerufen.
Flow:
1. Pipeline fuehrt Tests aus und sammelt JSON-Ergebnisse
2. Pipeline sendet detaillierte Ergebnisse pro Service hierher
3. Dieser Endpoint speichert in PostgreSQL
4. Dashboard zeigt die Daten an
test_results Format:
{
"service": "consent-service",
"framework": "go",
"total": 57,
"passed": 57,
"failed": 0,
"skipped": 0,
"coverage": 75.5
}
"""
test_runs = get_test_runs()
persisted_results = get_persisted_results()
# Extrahiere Service-spezifische Daten aus test_results
tr = result.test_results or {}
service_name = tr.get("service", "ci-pipeline")
framework = tr.get("framework", "unknown")
total = tr.get("total", 0)
passed = tr.get("passed", 0)
failed = tr.get("failed", 0)
skipped = tr.get("skipped", 0)
coverage = tr.get("coverage", 0)
# Log zur Debugging
print(f"[CI-RESULT] Pipeline {result.pipeline_id} - Service: {service_name}")
print(f"[CI-RESULT] Tests: {passed}/{total} passed, {failed} failed, {skipped} skipped")
print(f"[CI-RESULT] Coverage: {coverage}%, Commit: {result.commit[:8]}")
# Speichere in PostgreSQL wenn verfuegbar
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
# Erstelle eindeutige Run-ID pro Service
run_id = f"ci-{result.pipeline_id}-{service_name}"
# Erstelle Test-Run Eintrag
run = repo.create_run(
run_id=run_id,
service=service_name,
framework=framework,
triggered_by="ci",
git_commit=result.commit[:8] if result.commit else None,
git_branch=result.branch
)
# Markiere als abgeschlossen mit detaillierten Zahlen
status = "passed" if failed == 0 else "failed"
repo.complete_run(
run_id=run_id,
status=status,
total_tests=total,
passed_tests=passed,
failed_tests=failed,
skipped_tests=skipped,
duration_seconds=0
)
print(f"[CI-RESULT] Stored as run_id: {run_id}, status: {status}")
# WICHTIG: Aktualisiere den In-Memory Cache fuer sofortige Frontend-Updates
persisted_results[service_name] = {
"total": total,
"passed": passed,
"failed": failed,
"last_run": datetime.utcnow().isoformat(),
"status": status,
"failed_test_ids": []
}
print(f"[CI-RESULT] Updated cache for {service_name}: {passed}/{total} passed")
# Bei fehlgeschlagenen Tests: Backlog-Eintrag erstellen
if failed > 0:
background_tasks.add_task(
_create_backlog_entry,
service_name,
framework,
failed,
result.pipeline_id,
result.commit,
result.branch
)
else:
# Alle Tests bestanden: Schließe offene Backlog-Einträge
background_tasks.add_task(
_close_backlog_entry,
service_name,
result.pipeline_id,
result.commit
)
return {
"received": True,
"run_id": run_id,
"service": service_name,
"pipeline_id": result.pipeline_id,
"status": status,
"tests": {"total": total, "passed": passed, "failed": failed},
"stored_in": "postgres"
}
except Exception as e:
print(f"[CI-RESULT] PostgreSQL Error: {e}")
# Fallback auf Memory-Storage
pass
# Memory-Fallback
ci_run = {
"id": f"ci-{result.pipeline_id}",
"pipeline_id": result.pipeline_id,
"commit": result.commit,
"branch": result.branch,
"status": result.status,
"timestamp": datetime.now().isoformat(),
"test_results": result.test_results
}
test_runs.append(ci_run)
return {
"received": True,
"pipeline_id": result.pipeline_id,
"status": result.status,
"stored_in": "memory"
}
async def _create_backlog_entry(
service_name: str,
framework: str,
failed_count: int,
pipeline_id: str,
commit: str,
branch: str
):
"""
Background-Task: Erstellt Backlog-Eintraege fuer fehlgeschlagene Tests.
Wird asynchron aufgerufen wenn Tests fehlgeschlagen sind.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Creating backlog entry for {service_name}: {failed_count} failed tests")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
# Pruefe ob schon ein offener Backlog-Eintrag fuer diesen Service existiert
existing = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.service == service_name,
FailedTestBacklogDB.status == "open"
).first()
if existing:
# Aktualisiere existierenden Eintrag
existing.last_failed_at = now
existing.failure_count += 1
existing.error_message = f"{failed_count} Tests fehlgeschlagen in Pipeline {pipeline_id} (Branch: {branch})"
db.commit()
print(f"[CI-RESULT] Updated existing backlog entry (ID: {existing.id})")
else:
# Neuen Eintrag erstellen
backlog = FailedTestBacklogDB(
test_name=f"{service_name} Tests",
test_file=f"{service_name}/",
service=service_name,
framework=framework,
error_message=f"{failed_count} Tests fehlgeschlagen in Pipeline {pipeline_id} (Branch: {branch})",
error_type="TEST_FAILURE",
first_failed_at=now,
last_failed_at=now,
failure_count=1,
status="open",
priority="high" if failed_count > 5 else "medium"
)
db.add(backlog)
db.commit()
print(f"[CI-RESULT] Created new backlog entry (ID: {backlog.id})")
except Exception as e:
print(f"[CI-RESULT] Error creating backlog entry: {e}")
async def _close_backlog_entry(
service_name: str,
pipeline_id: str,
commit: str
):
"""
Background-Task: Schließt Backlog-Einträge wenn alle Tests bestanden.
Wird asynchron aufgerufen wenn Tests erfolgreich waren.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Checking for open backlog entries to close for {service_name}")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
# Finde offene Backlog-Einträge für diesen Service
open_entries = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.service == service_name,
FailedTestBacklogDB.status == "open"
).all()
for entry in open_entries:
entry.status = "resolved"
entry.resolved_at = now
entry.resolution_commit = commit[:8] if commit else None
entry.resolution_notes = f"Automatisch geschlossen - alle Tests in Pipeline {pipeline_id} bestanden"
print(f"[CI-RESULT] Auto-closed backlog entry (ID: {entry.id}) for {service_name}")
if open_entries:
db.commit()
print(f"[CI-RESULT] Closed {len(open_entries)} backlog entries for {service_name}")
else:
print(f"[CI-RESULT] No open backlog entries for {service_name}")
except Exception as e:
print(f"[CI-RESULT] Error closing backlog entries: {e}")
async def _fetch_and_store_failed_tests(pipeline_id: str, commit: str, branch: str):
"""
Legacy Background-Task fuer generische Pipeline-Fehler.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Fetching failed test details for pipeline {pipeline_id}")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
backlog = FailedTestBacklogDB(
test_name=f"CI Pipeline {pipeline_id}",
test_file=".woodpecker/main.yml",
service="ci-pipeline",
framework="woodpecker",
error_message=f"Pipeline {pipeline_id} fehlgeschlagen auf Branch {branch}",
error_type="CI_FAILURE",
first_failed_at=now,
last_failed_at=now,
failure_count=1,
status="open",
priority="high"
)
db.add(backlog)
db.commit()
print(f"[CI-RESULT] Added pipeline failure to backlog (ID: {backlog.id})")
except Exception as e:
print(f"[CI-RESULT] Error adding to backlog: {e}")

View File

@@ -0,0 +1,335 @@
"""
Test Registry - Test Endpoints
Endpoints for test discovery, running, and monitoring.
"""
from datetime import datetime
from typing import Dict, Any
from fastapi import APIRouter, HTTPException, BackgroundTasks
from ...models import (
TestFramework,
TestRegistryStats,
SERVICE_DEFINITIONS,
)
from ..api_models import TestRunResponse, RegistryResponse
from ..config import (
PROJECT_ROOT,
RUN_MODE,
check_go_available,
check_pytest_available,
get_go_version,
get_pytest_version,
get_test_runs,
get_current_runs,
get_running_tests,
)
from ..discovery import (
build_service_info,
discover_go_tests,
discover_python_tests,
discover_bqas_tests,
)
from ..executors import execute_test_run
router = APIRouter()
@router.get("/registry", response_model=RegistryResponse)
async def get_test_registry():
"""
Gibt alle registrierten Tests zurueck.
Scannt alle Services und aggregiert Test-Informationen.
"""
services = []
stats = TestRegistryStats()
by_category: Dict[str, int] = {}
by_framework: Dict[str, int] = {}
for service_def in SERVICE_DEFINITIONS:
info = build_service_info(service_def)
services.append({
"service": info.service,
"display_name": info.display_name,
"port": info.port,
"language": info.language,
"total_tests": info.total_tests,
"passed_tests": info.passed_tests,
"failed_tests": info.failed_tests,
"skipped_tests": info.skipped_tests,
"pass_rate": round(info.pass_rate, 1),
"coverage_percent": round(info.coverage_percent, 1) if info.coverage_percent else None,
"last_run": info.last_run.isoformat() if info.last_run else None,
"status": info.status.value,
})
stats.total_tests += info.total_tests
stats.total_passed += info.passed_tests
stats.total_failed += info.failed_tests
stats.total_skipped += info.skipped_tests
# Framework-Stats
framework_name = service_def["framework"].value
by_framework[framework_name] = by_framework.get(framework_name, 0) + info.total_tests
# Category basierend auf Framework
if service_def["framework"] in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
by_category["bqas"] = by_category.get("bqas", 0) + info.total_tests
elif service_def["framework"] == TestFramework.PLAYWRIGHT:
by_category["e2e"] = by_category.get("e2e", 0) + info.total_tests
else:
by_category["unit"] = by_category.get("unit", 0) + info.total_tests
stats.services_count = len(services)
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0.0
stats.by_category = by_category
stats.by_framework = by_framework
return RegistryResponse(
services=services,
stats={
"total_tests": stats.total_tests,
"total_passed": stats.total_passed,
"total_failed": stats.total_failed,
"total_skipped": stats.total_skipped,
"overall_pass_rate": round(stats.overall_pass_rate, 1),
"services_count": stats.services_count,
"by_category": stats.by_category,
"by_framework": stats.by_framework,
},
last_updated=datetime.now().isoformat(),
)
@router.get("/registry/{service}")
async def get_service_tests(service: str):
"""
Gibt Tests fuer einen spezifischen Service zurueck.
"""
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == service), None)
if not service_def:
raise HTTPException(status_code=404, detail=f"Service '{service}' nicht gefunden")
info = build_service_info(service_def)
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
framework = service_def["framework"]
# Test-Discovery
if framework == TestFramework.GO_TEST:
tests = discover_go_tests(base_path)
elif framework == TestFramework.PYTEST:
tests = discover_python_tests(base_path)
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
tests = discover_bqas_tests(base_path, test_type)
else:
tests = []
return {
"service": info.service,
"display_name": info.display_name,
"port": info.port,
"language": info.language,
"total_tests": len(tests),
"passed_tests": info.passed_tests,
"failed_tests": info.failed_tests,
"coverage_percent": info.coverage_percent,
"tests": [
{
"id": t.id,
"name": t.name,
"file_path": t.file_path,
"line_number": t.line_number,
"framework": t.framework.value,
"status": t.status.value,
}
for t in tests
],
}
@router.post("/run/{suite}", response_model=TestRunResponse)
async def run_test_suite(suite: str, background_tasks: BackgroundTasks):
"""
Startet einen Test-Run fuer eine Suite.
Fuehrt Tests im Hintergrund aus.
"""
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == suite), None)
if not service_def:
raise HTTPException(status_code=404, detail=f"Suite '{suite}' nicht gefunden")
run_id = f"run_{suite}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
# Background Task starten
background_tasks.add_task(execute_test_run, run_id, service_def)
return TestRunResponse(
run_id=run_id,
status="queued",
message=f"Test-Run fuer {service_def['display_name']} gestartet",
)
@router.get("/runs")
async def get_test_runs_list(limit: int = 20):
"""
Gibt die Test-Run Historie zurueck.
"""
test_runs = get_test_runs()
# Sortiert nach Startzeit, neueste zuerst
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit]}
@router.get("/progress/{service_id}")
async def get_test_progress(service_id: str):
"""
Gibt den Fortschritt eines laufenden Tests zurueck.
Wird vom Frontend gepollt um Live-Updates anzuzeigen.
"""
running_tests = get_running_tests()
if service_id in running_tests:
return running_tests[service_id]
# Kein laufender Test - Standard-Antwort
return {
"current_file": "",
"files_done": 0,
"files_total": 0,
"passed": 0,
"failed": 0,
"status": "idle"
}
@router.get("/progress")
async def get_all_progress():
"""
Gibt den Fortschritt aller laufenden Tests zurueck.
"""
return get_running_tests()
@router.get("/runs/{run_id}")
async def get_test_run(run_id: str):
"""
Gibt Details zu einem spezifischen Test-Run zurueck.
"""
current_runs = get_current_runs()
test_runs = get_test_runs()
if run_id in current_runs:
run = current_runs[run_id]
return {
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
"status": run.status.value,
"total_tests": run.total_tests,
"passed_tests": run.passed_tests,
"failed_tests": run.failed_tests,
"duration_seconds": run.duration_seconds,
"output": run.output,
}
# In Historie suchen
for run in test_runs:
if run["id"] == run_id:
return run
raise HTTPException(status_code=404, detail=f"Run '{run_id}' nicht gefunden")
@router.get("/coverage")
async def get_coverage():
"""
Gibt aggregierte Coverage-Informationen zurueck.
"""
coverage_data = []
total_coverage = 0.0
count = 0
for service_def in SERVICE_DEFINITIONS:
info = build_service_info(service_def)
if info.coverage_percent:
coverage_data.append({
"service": info.service,
"display_name": info.display_name,
"coverage_percent": round(info.coverage_percent, 1),
"language": info.language,
})
total_coverage += info.coverage_percent
count += 1
return {
"services": coverage_data,
"average_coverage": round(total_coverage / count, 1) if count > 0 else 0,
"total_services": count,
}
@router.get("/health")
async def get_test_health():
"""
Gibt den Status der Test-Infrastruktur zurueck.
"""
go_available = check_go_available()
pytest_available = check_pytest_available()
return {
"status": "healthy",
"mode": RUN_MODE, # "docker", "local", oder "demo"
"services_monitored": len(SERVICE_DEFINITIONS),
"project_root": str(PROJECT_ROOT),
"project_root_exists": PROJECT_ROOT.exists(),
"timestamp": datetime.now().isoformat(),
"runners": {
"go_test": "available" if go_available else "not_installed",
"pytest": "available" if pytest_available else "not_installed",
"jest": "available", # TODO: check Node.js
"playwright": "available", # TODO: check Playwright
"bqas": "available", # BQAS hat seinen eigenen Service
},
"versions": {
"go": get_go_version() if go_available else None,
"pytest": get_pytest_version() if pytest_available else None,
},
}
@router.get("/db-status")
async def get_db_status():
"""
Gibt den Status der PostgreSQL-Datenbankverbindung zurueck.
Wird vom Dashboard ServiceStatus verwendet.
"""
import time
from ...database import check_db_connection, DATABASE_URL
start_time = time.time()
is_connected = check_db_connection()
response_time = int((time.time() - start_time) * 1000)
# Parse host from DATABASE_URL (hide password)
try:
# postgresql://user:pass@host:port/db -> host:port
url_parts = DATABASE_URL.split("@")
if len(url_parts) > 1:
host_part = url_parts[1].split("/")[0]
else:
host_part = "unknown"
except:
host_part = "unknown"
return {
"status": "online" if is_connected else "offline",
"host": host_part,
"response_time_ms": response_time,
"timestamp": datetime.now().isoformat(),
}