fix: Restore all files lost during destructive rebase

A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-02-09 09:51:32 +01:00
parent f7487ee240
commit bfdaf63ba9
2009 changed files with 749983 additions and 1731 deletions

View File

@@ -0,0 +1,129 @@
"""
Test Registry Module
Zentrale API fuer das Test-Dashboard.
Entdeckt, registriert und fuehrt Tests aus allen Services aus.
Phase 1 Update (2026-02-02):
- PostgreSQL-Integration fuer persistente Speicherung
- Backlog-Management mit Status-Workflow
- Historie und Trends ueber Zeit
Modular Refactoring (2026-02-03):
- Split into sub-modules for maintainability
"""
# Re-export the router for FastAPI
from .routes import router
# Re-export config for external access
from .config import (
PROJECT_ROOT,
RUN_MODE,
DATA_DIR,
RESULTS_FILE,
check_go_available,
check_pytest_available,
get_go_version,
get_pytest_version,
load_persisted_results,
save_persisted_results,
migrate_json_to_postgres,
is_postgres_available,
get_persisted_results,
get_test_runs,
get_current_runs,
get_running_tests,
)
# Re-export API models
from .api_models import (
TestRunRequest,
TestRunResponse,
RegistryResponse,
BacklogStatusUpdate,
BacklogPriorityUpdate,
FixAttempt,
ManualBacklogEntry,
CIResultRequest,
)
# Re-export discovery functions
from .discovery import (
discover_go_tests,
discover_python_tests,
discover_bqas_tests,
build_service_info,
)
# Re-export executors
from .executors import (
run_go_tests,
run_python_tests,
run_bqas_tests,
run_jest_tests,
run_playwright_tests,
run_tests_in_container,
execute_test_run,
)
# Re-export services
from .services import (
extract_go_error,
classify_go_error,
suggest_go_fix,
extract_pytest_error,
classify_pytest_error,
suggest_pytest_fix,
)
__all__ = [
# Router
"router",
# Config
"PROJECT_ROOT",
"RUN_MODE",
"DATA_DIR",
"RESULTS_FILE",
"check_go_available",
"check_pytest_available",
"get_go_version",
"get_pytest_version",
"load_persisted_results",
"save_persisted_results",
"migrate_json_to_postgres",
"is_postgres_available",
"get_persisted_results",
"get_test_runs",
"get_current_runs",
"get_running_tests",
# API Models
"TestRunRequest",
"TestRunResponse",
"RegistryResponse",
"BacklogStatusUpdate",
"BacklogPriorityUpdate",
"FixAttempt",
"ManualBacklogEntry",
"CIResultRequest",
# Discovery
"discover_go_tests",
"discover_python_tests",
"discover_bqas_tests",
"build_service_info",
# Executors
"run_go_tests",
"run_python_tests",
"run_bqas_tests",
"run_jest_tests",
"run_playwright_tests",
"run_tests_in_container",
"execute_test_run",
# Services
"extract_go_error",
"classify_go_error",
"suggest_go_fix",
"extract_pytest_error",
"classify_pytest_error",
"suggest_pytest_fix",
]

View File

@@ -0,0 +1,73 @@
"""
Test Registry API Models
Pydantic models for API requests and responses.
"""
from typing import Optional, Dict, Any, List
from pydantic import BaseModel
# ==============================================================================
# Test Run Models
# ==============================================================================
class TestRunRequest(BaseModel):
suite_id: str
service: Optional[str] = None
triggered_by: str = "manual"
class TestRunResponse(BaseModel):
run_id: str
status: str
message: str
class RegistryResponse(BaseModel):
services: List[Dict[str, Any]]
stats: Dict[str, Any]
last_updated: str
# ==============================================================================
# Backlog Models
# ==============================================================================
class BacklogStatusUpdate(BaseModel):
status: str
notes: Optional[str] = None
assigned_to: Optional[str] = None
class BacklogPriorityUpdate(BaseModel):
priority: str
class FixAttempt(BaseModel):
fix_type: str # manual, auto_claude, auto_script
fix_description: str
commit_hash: Optional[str] = None
success: bool = False
class ManualBacklogEntry(BaseModel):
"""Manueller Backlog-Eintrag fuer nicht-integrierte Features."""
test_name: str
service: str
error_message: str
priority: str = "medium" # critical, high, medium, low
fix_suggestion: Optional[str] = None
# ==============================================================================
# CI/CD Models
# ==============================================================================
class CIResultRequest(BaseModel):
"""Daten von der CI/CD-Pipeline (Woodpecker)"""
pipeline_id: str
commit: str
branch: str
status: str # "completed", "failed", "success"
test_results: Optional[Dict[str, Any]] = None # Detaillierte Ergebnisse

View File

@@ -0,0 +1,230 @@
"""
Test Registry Configuration
Project paths, environment setup, and global state management.
"""
import os
import json
import subprocess
from pathlib import Path
from typing import List, Dict, Any, Optional
from datetime import datetime
# Projekt-Basisverzeichnis - prüfe verschiedene Pfade
# 1. Docker mit Volume-Mount: /app/project
# 2. Lokale Entwicklung: /Users/benjaminadmin/Projekte/breakpilot-pwa
# 3. Fallback: Demo-Modus
DOCKER_PROJECT_PATH = Path("/app/project")
LOCAL_PROJECT_PATH = Path("/Users/benjaminadmin/Projekte/breakpilot-pwa")
if DOCKER_PROJECT_PATH.exists():
PROJECT_ROOT = DOCKER_PROJECT_PATH
RUN_MODE = "docker"
elif LOCAL_PROJECT_PATH.exists():
PROJECT_ROOT = LOCAL_PROJECT_PATH
RUN_MODE = "local"
else:
PROJECT_ROOT = LOCAL_PROJECT_PATH # Fallback für Demo
RUN_MODE = "demo"
# Pfad fuer persistierte Ergebnisse (Legacy JSON - wird noch als Fallback verwendet)
DATA_DIR = Path("/app/data")
RESULTS_FILE = DATA_DIR / "test_results.json"
# Deaktiviert - wir wollen IMMER echte Tests wenn Tools verfügbar sind
IS_DOCKER = False # Nie Demo-Modus verwenden
# Flag fuer PostgreSQL-Verfuegbarkeit
_use_postgres = True
# ==============================================================================
# In-Memory Storage
# ==============================================================================
# In-Memory Storage (wird parallel zu PostgreSQL gepflegt fuer Abwaertskompatibilitaet)
_test_runs: List[Dict] = []
_current_runs: Dict[str, Any] = {}
_running_tests: Dict[str, Dict] = {} # Progress-Tracking fuer laufende Tests
_persisted_results: Dict[str, Dict] = {} # Persistierte Testergebnisse (Legacy)
def get_test_runs() -> List[Dict]:
"""Get all test runs."""
return _test_runs
def get_current_runs() -> Dict[str, Any]:
"""Get currently running tests."""
return _current_runs
def get_running_tests() -> Dict[str, Dict]:
"""Get running test progress."""
return _running_tests
def get_persisted_results() -> Dict[str, Dict]:
"""Get persisted test results."""
return _persisted_results
def set_persisted_results(results: Dict[str, Dict]):
"""Set persisted test results."""
global _persisted_results
_persisted_results = results
def is_postgres_available() -> bool:
"""Check if PostgreSQL is available."""
return _use_postgres
def set_postgres_available(available: bool):
"""Set PostgreSQL availability flag."""
global _use_postgres
_use_postgres = available
# ==============================================================================
# Tool Availability Checks
# ==============================================================================
def check_go_available() -> bool:
"""Prüft ob Go installiert ist"""
try:
result = subprocess.run(["go", "version"], capture_output=True, timeout=5)
return result.returncode == 0
except:
return False
def check_pytest_available() -> bool:
"""Prüft ob pytest installiert ist"""
pytest_paths = ["/opt/venv/bin/pytest", "pytest"]
for path in pytest_paths:
try:
result = subprocess.run(path.split() + ["--version"], capture_output=True, timeout=5)
if result.returncode == 0:
return True
except:
continue
return False
def get_go_version() -> Optional[str]:
"""Gibt die Go-Version zurueck"""
try:
result = subprocess.run(["go", "version"], capture_output=True, text=True, timeout=5)
if result.returncode == 0:
# "go version go1.23.5 linux/arm64" -> "1.23.5"
parts = result.stdout.strip().split()
if len(parts) >= 3:
return parts[2].replace("go", "")
except:
pass
return None
def get_pytest_version() -> Optional[str]:
"""Gibt die pytest-Version zurueck"""
try:
result = subprocess.run(["/opt/venv/bin/pytest", "--version"], capture_output=True, text=True, timeout=5)
if result.returncode == 0:
# "pytest 8.x.x" -> "8.x.x"
return result.stdout.strip().split()[1] if result.stdout else None
except:
pass
return None
# ==============================================================================
# Persistence Functions
# ==============================================================================
def check_postgres_available() -> bool:
"""Prueft ob PostgreSQL verfuegbar ist."""
global _use_postgres
try:
from ..database import check_db_connection
_use_postgres = check_db_connection()
except Exception:
_use_postgres = False
return _use_postgres
def load_persisted_results():
"""Laedt persistierte Testergebnisse beim Start - erst aus DB, dann JSON als Fallback"""
global _persisted_results
# Versuche zuerst aus PostgreSQL zu laden
if check_postgres_available():
try:
from ..database import get_db_session
from ..repository import TestRepository
with get_db_session() as db:
repo = TestRepository(db)
stats = repo.get_all_service_stats()
for stat in stats:
_persisted_results[stat.service] = {
"total": stat.total_tests,
"passed": stat.passed_tests,
"failed": stat.failed_tests,
"last_run": stat.last_run_at.isoformat() if stat.last_run_at else None,
"status": stat.last_status or "unknown",
"failed_test_ids": [] # Wird spaeter nachgeladen
}
print(f"Test-Ergebnisse aus PostgreSQL geladen: {len(stats)} Services")
return
except Exception as e:
print(f"Fehler beim Laden aus PostgreSQL: {e}")
# Fallback: JSON-Datei
if RESULTS_FILE.exists():
try:
with open(RESULTS_FILE, "r") as f:
_persisted_results = json.load(f)
print(f"Test-Ergebnisse aus JSON geladen: {len(_persisted_results)} Services")
except Exception as e:
print(f"Fehler beim Laden der Testergebnisse: {e}")
_persisted_results = {}
def save_persisted_results():
"""Speichert Testergebnisse - in PostgreSQL und JSON als Backup"""
# JSON als Backup speichern
try:
DATA_DIR.mkdir(parents=True, exist_ok=True)
with open(RESULTS_FILE, "w") as f:
json.dump(_persisted_results, f, indent=2, default=str)
except Exception as e:
print(f"Fehler beim Speichern der JSON-Testergebnisse: {e}")
def migrate_json_to_postgres() -> int:
"""Migriert bestehende JSON-Daten nach PostgreSQL (einmalig)."""
if not _use_postgres:
return 0
if not _persisted_results:
return 0
try:
from ..database import get_db_session
from ..repository import TestRepository
with get_db_session() as db:
repo = TestRepository(db)
count = repo.migrate_from_json(_persisted_results)
print(f"Migration abgeschlossen: {count} Services migriert")
return count
except Exception as e:
print(f"Fehler bei Migration: {e}")
return 0
# Lade persistierte Ergebnisse beim Import
load_persisted_results()

View File

@@ -0,0 +1,16 @@
"""
Test Discovery Module
Functions for discovering tests in various frameworks.
"""
from .go_discovery import discover_go_tests
from .python_discovery import discover_python_tests, discover_bqas_tests
from .service_builder import build_service_info
__all__ = [
"discover_go_tests",
"discover_python_tests",
"discover_bqas_tests",
"build_service_info",
]

View File

@@ -0,0 +1,45 @@
"""
Go Test Discovery
Functions for discovering Go tests in a codebase.
"""
from pathlib import Path
from typing import List
from ...models import TestCase, TestFramework, TestCategory
from ..config import PROJECT_ROOT
def discover_go_tests(base_path: Path) -> List[TestCase]:
"""Entdeckt Go-Tests in einem Verzeichnis"""
tests = []
if not base_path.exists():
return tests
# Suche nach *_test.go Dateien
test_files = list(base_path.rglob("*_test.go"))
for test_file in test_files:
# Parse Test-Funktionen aus der Datei
try:
content = test_file.read_text()
for i, line in enumerate(content.split("\n"), 1):
if line.strip().startswith("func Test"):
# Extrahiere Funktionsname
name_start = line.find("Test")
name_end = line.find("(", name_start)
if name_end > name_start:
func_name = line[name_start:name_end]
tests.append(TestCase(
id=f"{test_file.stem}_{func_name}",
name=func_name,
file_path=str(test_file.relative_to(PROJECT_ROOT)),
line_number=i,
framework=TestFramework.GO_TEST,
category=TestCategory.UNIT,
))
except Exception:
pass
return tests

View File

@@ -0,0 +1,86 @@
"""
Python Test Discovery
Functions for discovering Python and BQAS tests in a codebase.
"""
import json
from pathlib import Path
from typing import List
from ...models import TestCase, TestFramework, TestCategory
from ..config import PROJECT_ROOT
def discover_python_tests(base_path: Path) -> List[TestCase]:
"""Entdeckt Python-Tests in einem Verzeichnis"""
tests = []
if not base_path.exists():
return tests
# Suche nach test_*.py Dateien
test_files = list(base_path.rglob("test_*.py"))
for test_file in test_files:
try:
content = test_file.read_text()
for i, line in enumerate(content.split("\n"), 1):
stripped = line.strip()
# Test-Funktionen
if stripped.startswith("def test_"):
name_end = stripped.find("(")
if name_end > 4:
func_name = stripped[4:name_end]
tests.append(TestCase(
id=f"{test_file.stem}_{func_name}",
name=func_name,
file_path=str(test_file.relative_to(PROJECT_ROOT)),
line_number=i,
framework=TestFramework.PYTEST,
category=TestCategory.UNIT,
))
# Async Test-Methoden
elif stripped.startswith("async def test_"):
name_end = stripped.find("(")
if name_end > 10:
func_name = stripped[10:name_end]
tests.append(TestCase(
id=f"{test_file.stem}_{func_name}",
name=func_name,
file_path=str(test_file.relative_to(PROJECT_ROOT)),
line_number=i,
framework=TestFramework.PYTEST,
category=TestCategory.UNIT,
))
except Exception:
pass
return tests
def discover_bqas_tests(base_path: Path, test_type: str) -> List[TestCase]:
"""Entdeckt BQAS-Tests (Golden/RAG)"""
tests = []
if not base_path.exists():
return tests
# Suche nach JSON-Dateien
test_files = list(base_path.rglob("*.json"))
for test_file in test_files:
try:
content = json.loads(test_file.read_text())
if isinstance(content, list):
for i, test_case in enumerate(content):
test_id = test_case.get("id", f"{test_file.stem}_{i}")
tests.append(TestCase(
id=test_id,
name=test_case.get("name", test_id),
file_path=str(test_file.relative_to(PROJECT_ROOT)),
framework=TestFramework.BQAS_GOLDEN if test_type == "golden" else TestFramework.BQAS_RAG,
category=TestCategory.BQAS,
))
except Exception:
pass
return tests

View File

@@ -0,0 +1,115 @@
"""
Service Info Builder
Builds ServiceTestInfo from service definitions.
"""
from datetime import datetime
from pathlib import Path
from typing import Dict
from ...models import ServiceTestInfo, TestStatus, TestFramework
from ..config import PROJECT_ROOT, get_persisted_results
from .go_discovery import discover_go_tests
from .python_discovery import discover_python_tests, discover_bqas_tests
def build_service_info(service_def: Dict) -> ServiceTestInfo:
"""Erstellt ServiceTestInfo aus einer Service-Definition"""
service_id = service_def["service"]
persisted_results = get_persisted_results()
# Prüfe ob Service deaktiviert ist
if service_def.get("disabled", False):
return ServiceTestInfo(
service=service_def["service"],
display_name=f"{service_def['display_name']} (deaktiviert)",
port=service_def.get("port"),
language=service_def["language"],
total_tests=0,
passed_tests=0,
failed_tests=0,
skipped_tests=0,
pass_rate=0.0,
coverage_percent=None,
last_run=None,
status=TestStatus.SKIPPED,
)
# Prüfe zuerst persistierte Ergebnisse
if service_id in persisted_results:
persisted = persisted_results[service_id]
total = persisted.get("total", 0)
passed = persisted.get("passed", 0)
failed = persisted.get("failed", 0)
skipped = max(0, total - passed - failed)
pass_rate = (passed / total * 100) if total > 0 else 0.0
last_run_str = persisted.get("last_run")
last_run = datetime.fromisoformat(last_run_str) if last_run_str else None
return ServiceTestInfo(
service=service_def["service"],
display_name=service_def["display_name"],
port=service_def.get("port"),
language=service_def["language"],
total_tests=total,
passed_tests=passed,
failed_tests=failed,
skipped_tests=skipped,
pass_rate=pass_rate,
coverage_percent=None,
last_run=last_run,
status=TestStatus.PASSED if failed == 0 and total > 0 else TestStatus.FAILED if failed > 0 else TestStatus.PENDING,
)
# Falls keine persistierten Ergebnisse: Test-Discovery
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
framework = service_def["framework"]
# Fuer Container-basierte Services: keine lokale Discovery moeglich
if service_def.get("run_in_container", False):
# Keine lokalen Tests - warte auf tatsaechliche Ausfuehrung
return ServiceTestInfo(
service=service_def["service"],
display_name=service_def["display_name"],
port=service_def.get("port"),
language=service_def["language"],
total_tests=0,
passed_tests=0,
failed_tests=0,
skipped_tests=0,
pass_rate=0.0,
coverage_percent=None,
last_run=None,
status=TestStatus.PENDING,
)
# Test-Discovery basierend auf Framework
if framework == TestFramework.GO_TEST:
tests = discover_go_tests(base_path)
elif framework == TestFramework.PYTEST:
tests = discover_python_tests(base_path)
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
tests = discover_bqas_tests(base_path, test_type)
else:
tests = []
total = len(tests)
# Ohne persistierte Ergebnisse: Tests gefunden aber noch nicht ausgefuehrt
# Zeige nur die Anzahl entdeckter Tests, alle anderen Werte sind 0
return ServiceTestInfo(
service=service_def["service"],
display_name=service_def["display_name"],
port=service_def.get("port"),
language=service_def["language"],
total_tests=total,
passed_tests=0,
failed_tests=0,
skipped_tests=0,
pass_rate=0.0,
coverage_percent=None,
last_run=None,
status=TestStatus.PENDING,
)

View File

@@ -0,0 +1,23 @@
"""
Test Executors Module
Functions for running tests in various frameworks.
"""
from .go_executor import run_go_tests
from .python_executor import run_python_tests
from .bqas_executor import run_bqas_tests
from .jest_executor import run_jest_tests
from .playwright_executor import run_playwright_tests
from .container_executor import run_tests_in_container
from .test_runner import execute_test_run
__all__ = [
"run_go_tests",
"run_python_tests",
"run_bqas_tests",
"run_jest_tests",
"run_playwright_tests",
"run_tests_in_container",
"execute_test_run",
]

View File

@@ -0,0 +1,44 @@
"""
BQAS Test Executor
Executes BQAS tests via API proxy.
"""
import json
from typing import Dict
import httpx
from ...models import TestFramework
async def run_bqas_tests(service_def: Dict) -> Dict:
"""Proxy zu BQAS API im Voice-Service"""
test_type = "golden" if service_def["framework"] == TestFramework.BQAS_GOLDEN else "rag"
try:
async with httpx.AsyncClient(timeout=120.0) as client:
response = await client.post(
f"http://localhost:8091/api/v1/bqas/run/{test_type}",
)
if response.status_code == 200:
data = response.json()
metrics = data.get("metrics", {})
return {
"passed": metrics.get("passed_tests", 0),
"failed": metrics.get("failed_tests", 0),
"total": metrics.get("total_tests", 0),
"output": json.dumps(data, indent=2)[:5000],
"failed_test_ids": [],
}
except Exception as e:
pass
# Fehler wenn API nicht erreichbar - KEINE Demo-Daten
return {
"passed": 0,
"failed": 0,
"total": 0,
"output": f"BQAS API nicht erreichbar. Nutze docker exec fuer {test_type} Tests.",
"failed_test_ids": [],
}

View File

@@ -0,0 +1,106 @@
"""
Container Test Executor
Executes tests inside Docker containers via docker exec.
"""
import re
import asyncio
import subprocess
from typing import Dict
from ..config import get_running_tests
async def run_tests_in_container(
container_name: str,
framework: str,
base_path: str,
service_id: str,
pytest_args: str = ""
) -> Dict:
"""Fuehrt Tests in einem anderen Docker-Container aus via docker exec"""
running_tests = get_running_tests()
running_tests[service_id] = {
"current_file": "",
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "running"
}
try:
if framework == "pytest":
cmd = ["docker", "exec", container_name, "python", "-m", "pytest", base_path, "-v", "--tb=short", "-q"]
# Fuege zusaetzliche pytest Argumente hinzu
if pytest_args:
cmd.extend(pytest_args.split())
else:
cmd = ["docker", "exec", container_name, "go", "test", "-v", "./..."]
def run_docker_exec():
return subprocess.run(cmd, capture_output=True, text=True, timeout=600)
result = await asyncio.to_thread(run_docker_exec)
output = result.stdout + result.stderr
passed = 0
failed = 0
failed_test_ids = []
if framework == "pytest":
# Parse pytest output
for line in output.split("\n"):
if "passed" in line.lower():
match = re.search(r"(\d+)\s+passed", line)
if match:
passed = int(match.group(1))
if "failed" in line.lower():
match = re.search(r"(\d+)\s+failed", line)
if match:
failed = int(match.group(1))
if line.startswith("FAILED"):
test_name = line.replace("FAILED", "").strip()
failed_test_ids.append(test_name)
else:
# Parse go test output
for line in output.split("\n"):
if line.startswith("--- PASS:"):
passed += 1
elif line.startswith("--- FAIL:"):
failed += 1
match = re.search(r"--- FAIL: (\S+)", line)
if match:
failed_test_ids.append(match.group(1))
total = passed + failed
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "completed"
}
return {
"passed": passed,
"failed": failed,
"total": total,
"output": output,
"failed_test_ids": failed_test_ids
}
except Exception as e:
running_tests[service_id] = {
"current_file": str(e),
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "error"
}
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}

View File

@@ -0,0 +1,137 @@
"""
Go Test Executor
Executes Go tests and parses results.
"""
import os
import json
import subprocess
import asyncio
from pathlib import Path
from typing import Dict
from ..config import check_go_available, get_running_tests
from ..services.error_handling import extract_go_error, classify_go_error, suggest_go_fix
async def run_go_tests(base_path: Path, service_id: str = "") -> Dict:
"""Fuehrt Go-Tests aus (Thread-basiert, blockiert nicht den Event Loop)"""
running_tests = get_running_tests()
if not base_path.exists():
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
# Prüfe ob Go installiert ist
go_available = check_go_available()
if not go_available:
return {"passed": 0, "failed": 0, "total": 0, "output": "Go nicht installiert - Tests koennen nicht ausgefuehrt werden", "failed_test_ids": []}
# Initialer Progress-Status
if service_id:
running_tests[service_id] = {
"current_file": "Starte Go-Tests...",
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "running"
}
def run_go_tests_sync():
"""Laeuft in separatem Thread"""
try:
env = os.environ.copy()
env["GOPATH"] = "/tmp/go"
env["GOCACHE"] = "/tmp/go-cache"
env["CGO_ENABLED"] = "0"
result = subprocess.run(
["go", "test", "-v", "-json", "./..."],
cwd=str(base_path),
capture_output=True,
text=True,
timeout=300,
env=env,
)
passed = failed = 0
failed_test_ids = []
test_outputs = {}
for line in result.stdout.split("\n"):
if line.strip():
try:
event = json.loads(line)
action = event.get("Action")
test_name = event.get("Test", "")
pkg = event.get("Package", "")
if action == "pass" and test_name:
passed += 1
if service_id:
running_tests[service_id] = {
"current_file": f"{test_name}",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "running"
}
elif action == "fail" and test_name:
failed += 1
test_key = f"{pkg}::{test_name}"
error_output = test_outputs.get(test_key, "")
error_message = extract_go_error(error_output)
failed_test_ids.append({
"id": test_key,
"name": test_name,
"package": pkg,
"file_path": pkg.replace("github.com/", ""),
"error_message": error_message or "Test fehlgeschlagen - keine Details",
"error_type": classify_go_error(error_message),
"suggestion": suggest_go_fix(error_message),
})
if service_id:
running_tests[service_id] = {
"current_file": f"{test_name}",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "running"
}
elif action == "output" and test_name:
test_key = f"{pkg}::{test_name}"
test_outputs[test_key] = test_outputs.get(test_key, "") + event.get("Output", "")
except json.JSONDecodeError:
pass
return {
"passed": passed,
"failed": failed,
"total": passed + failed,
"output": result.stdout[:5000] if result.stdout else result.stderr[:5000],
"failed_test_ids": failed_test_ids,
}
except subprocess.TimeoutExpired:
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 5 Minuten", "failed_test_ids": []}
except Exception as e:
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(None, run_go_tests_sync)
# Finaler Status
if service_id:
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": 1,
"files_total": 1,
"passed": result.get("passed", 0),
"failed": result.get("failed", 0),
"status": "completed"
}
return result

View File

@@ -0,0 +1,130 @@
"""
Jest Test Executor
Executes Jest tests for JavaScript/TypeScript projects.
"""
import os
import re
import json
import subprocess
from pathlib import Path
from typing import Dict
from ..config import get_running_tests
async def run_jest_tests(base_path: Path, service_id: str = "") -> Dict:
"""Fuehrt Jest-Tests aus"""
running_tests = get_running_tests()
if not base_path.exists():
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
running_tests[service_id] = {
"current_file": "Starte Jest...",
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "running"
}
# Prüfe ob Node.js verfügbar ist
try:
node_check = subprocess.run(["node", "--version"], capture_output=True, timeout=5)
if node_check.returncode != 0:
return {"passed": 0, "failed": 0, "total": 0, "output": "Node.js nicht installiert", "failed_test_ids": []}
except:
return {"passed": 0, "failed": 0, "total": 0, "output": "Node.js nicht verfuegbar", "failed_test_ids": []}
try:
# Wechsle ins Projektverzeichnis und fuehre Jest aus
env = os.environ.copy()
env["CI"] = "true" # Nicht-interaktiver Modus
result = subprocess.run(
["npm", "test", "--", "--json", "--passWithNoTests"],
cwd=str(base_path),
capture_output=True,
text=True,
timeout=300,
env=env,
)
output = result.stdout + result.stderr
passed = 0
failed = 0
failed_test_ids = []
# Versuche JSON-Output zu parsen
try:
# Jest JSON beginnt mit {"num... - finde den Start
json_start = output.find('{"num')
if json_start == -1:
json_start = output.rfind('{"')
if json_start != -1:
json_str = output[json_start:]
# Versuche JSON zu parsen mit json.JSONDecoder
decoder = json.JSONDecoder()
try:
jest_result, _ = decoder.raw_decode(json_str)
passed = jest_result.get("numPassedTests", 0)
failed = jest_result.get("numFailedTests", 0)
# Extrahiere fehlgeschlagene Tests
for test_result in jest_result.get("testResults", []):
for assertion in test_result.get("assertionResults", []):
if assertion.get("status") == "failed":
failed_test_ids.append({
"id": f"{test_result.get('name', '')}::{assertion.get('fullName', '')}",
"name": assertion.get("fullName", ""),
"file_path": test_result.get("name", ""),
"error_message": " ".join(assertion.get("failureMessages", []))[:500],
"error_type": "assertion",
"suggestion": "Pruefe die Test-Assertions und erwarteten Werte",
})
except json.JSONDecodeError:
# Fallback: Parse Text-Output mit Regex
for line in output.split("\n"):
if "passed" in line.lower():
match = re.search(r"(\d+)\s+passed", line)
if match:
passed = int(match.group(1))
if "failed" in line.lower():
match = re.search(r"(\d+)\s+failed", line)
if match:
failed = int(match.group(1))
except Exception:
# Allgemeiner Fallback: Parse Text-Output
for line in output.split("\n"):
if "passed" in line.lower():
match = re.search(r"(\d+)\s+passed", line)
if match:
passed = int(match.group(1))
if "failed" in line.lower():
match = re.search(r"(\d+)\s+failed", line)
if match:
failed = int(match.group(1))
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "completed"
}
return {
"passed": passed,
"failed": failed,
"total": passed + failed,
"output": output[:5000],
"failed_test_ids": failed_test_ids,
}
except subprocess.TimeoutExpired:
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 5 Minuten", "failed_test_ids": []}
except Exception as e:
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}

View File

@@ -0,0 +1,101 @@
"""
Playwright Test Executor
Executes Playwright E2E tests.
"""
import os
import re
import json
import subprocess
from pathlib import Path
from typing import Dict
from ..config import get_running_tests
async def run_playwright_tests(base_path: Path, service_id: str = "") -> Dict:
"""Fuehrt Playwright E2E-Tests aus"""
running_tests = get_running_tests()
if not base_path.exists():
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
running_tests[service_id] = {
"current_file": "Starte Playwright...",
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "running"
}
try:
env = os.environ.copy()
env["CI"] = "true"
result = subprocess.run(
["npx", "playwright", "test", "--reporter=json"],
cwd=str(base_path),
capture_output=True,
text=True,
timeout=600, # E2E Tests brauchen laenger
env=env,
)
output = result.stdout + result.stderr
passed = 0
failed = 0
failed_test_ids = []
# Parse Playwright JSON Output
try:
pw_result = json.loads(output)
for suite in pw_result.get("suites", []):
for spec in suite.get("specs", []):
for test in spec.get("tests", []):
for result_item in test.get("results", []):
if result_item.get("status") == "passed":
passed += 1
elif result_item.get("status") == "failed":
failed += 1
failed_test_ids.append({
"id": spec.get("title", ""),
"name": spec.get("title", ""),
"file_path": spec.get("file", ""),
"error_message": result_item.get("error", {}).get("message", "")[:500],
"error_type": "e2e",
"suggestion": "Pruefe den E2E-Test und die Anwendung",
})
except json.JSONDecodeError:
# Fallback: Parse Text-Output
for line in output.split("\n"):
if "passed" in line.lower():
match = re.search(r"(\d+)\s+passed", line)
if match:
passed = int(match.group(1))
if "failed" in line.lower():
match = re.search(r"(\d+)\s+failed", line)
if match:
failed = int(match.group(1))
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "completed"
}
return {
"passed": passed,
"failed": failed,
"total": passed + failed,
"output": output[:5000],
"failed_test_ids": failed_test_ids,
}
except subprocess.TimeoutExpired:
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 10 Minuten", "failed_test_ids": []}
except Exception as e:
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}

View File

@@ -0,0 +1,187 @@
"""
Python Test Executor
Executes pytest tests with live progress updates.
"""
import os
import re
import subprocess
import asyncio
from pathlib import Path
from typing import Dict
from ..config import get_running_tests
from ..services.error_handling import extract_pytest_error, classify_pytest_error, suggest_pytest_fix
async def run_python_tests(base_path: Path, service_id: str = "") -> Dict:
"""Fuehrt Python-Tests aus mit Live-Progress-Updates (Thread-basiert)"""
running_tests = get_running_tests()
if not base_path.exists():
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
# Versuche verschiedene pytest-Pfade
pytest_paths = [
"/opt/venv/bin/pytest", # Docker venv
"pytest", # System pytest
"python -m pytest", # Als Modul
]
pytest_cmd = None
for path in pytest_paths:
try:
check = subprocess.run(
path.split() + ["--version"],
capture_output=True,
timeout=5,
)
if check.returncode == 0:
pytest_cmd = path.split()
break
except:
continue
if not pytest_cmd:
return {"passed": 0, "failed": 0, "total": 0, "output": "pytest nicht gefunden", "failed_test_ids": []}
# Erst alle Test-Dateien zaehlen
test_files = []
test_dir = base_path if base_path.is_dir() else base_path.parent
for f in test_dir.rglob("test_*.py"):
test_files.append(f.name)
total_files = len(test_files) if test_files else 1
# Initialer Progress-Status
running_tests[service_id] = {
"current_file": "Starte Tests...",
"files_done": 0,
"files_total": total_files,
"passed": 0,
"failed": 0,
"status": "running"
}
# Ergebnis-Container
result_container = {
"output_lines": [],
"passed": 0,
"failed": 0,
"files_seen": set(),
"current_file": "",
"done": False,
"error": None
}
def run_pytest_with_progress():
"""Laeuft in separatem Thread - blockiert nicht den Event Loop"""
try:
cwd = str(base_path.parent) if base_path.is_file() else str(base_path)
# Unbuffered output fuer Echtzeit-Fortschritt
env = os.environ.copy()
env["PYTHONUNBUFFERED"] = "1"
process = subprocess.Popen(
pytest_cmd + ["-v", "-s", "--tb=short", str(base_path)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
cwd=cwd,
bufsize=1,
env=env,
)
for line in iter(process.stdout.readline, ''):
if not line:
break
result_container["output_lines"].append(line)
line_stripped = line.strip()
# Parse Test-Ergebnisse
match = re.match(r'(\S+\.py)::(\S+)\s+(PASSED|FAILED|SKIPPED|ERROR)', line_stripped)
if match:
file_path = match.group(1)
status = match.group(3)
file_name = Path(file_path).name
if file_name not in result_container["files_seen"]:
result_container["files_seen"].add(file_name)
result_container["current_file"] = file_name
if status == "PASSED":
result_container["passed"] += 1
elif status in ("FAILED", "ERROR"):
result_container["failed"] += 1
# Progress aktualisieren
running_tests[service_id] = {
"current_file": result_container["current_file"],
"files_done": len(result_container["files_seen"]),
"files_total": max(total_files, len(result_container["files_seen"])),
"passed": result_container["passed"],
"failed": result_container["failed"],
"status": "running"
}
process.wait()
result_container["done"] = True
except Exception as e:
result_container["error"] = str(e)
result_container["done"] = True
# Starte Test-Ausführung in separatem Thread
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, run_pytest_with_progress)
full_output = "".join(result_container["output_lines"])
passed = result_container["passed"]
failed = result_container["failed"]
files_seen = result_container["files_seen"]
if result_container["error"]:
running_tests[service_id] = {
"current_file": result_container["error"],
"files_done": 0,
"files_total": total_files,
"passed": 0,
"failed": 0,
"status": "error"
}
return {"passed": 0, "failed": 0, "total": 0, "output": result_container["error"], "failed_test_ids": []}
# Parse fehlgeschlagene Tests aus Output
failed_test_ids = []
for match in re.finditer(r'FAILED\s+(\S+)::(\S+)', full_output):
file_path = match.group(1)
test_name = match.group(2)
error_msg = extract_pytest_error(full_output, test_name)
failed_test_ids.append({
"id": f"{file_path}::{test_name}",
"name": test_name,
"file_path": file_path,
"error_message": error_msg or "Test fehlgeschlagen - keine Details",
"error_type": classify_pytest_error(error_msg),
"suggestion": suggest_pytest_fix(error_msg),
})
# Finaler Status
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": len(files_seen),
"files_total": len(files_seen),
"passed": passed,
"failed": failed,
"status": "completed"
}
return {
"passed": passed,
"failed": failed,
"total": passed + failed,
"output": full_output[:5000],
"failed_test_ids": failed_test_ids,
}

View File

@@ -0,0 +1,192 @@
"""
Test Runner
Orchestrates test execution and persists results.
"""
from datetime import datetime
from typing import Dict
from ...models import TestRun, RunStatus, TestFramework
from ..config import (
PROJECT_ROOT,
get_test_runs,
get_current_runs,
get_persisted_results,
save_persisted_results,
is_postgres_available,
)
from .go_executor import run_go_tests
from .python_executor import run_python_tests
from .bqas_executor import run_bqas_tests
from .jest_executor import run_jest_tests
from .playwright_executor import run_playwright_tests
from .container_executor import run_tests_in_container
async def execute_test_run(run_id: str, service_def: Dict):
"""Fuehrt einen Test-Run im Hintergrund aus"""
test_runs = get_test_runs()
current_runs = get_current_runs()
persisted_results = get_persisted_results()
framework = service_def["framework"]
service_id = service_def["service"]
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
# Pruefe ob Service deaktiviert ist
if service_def.get("disabled", False):
reason = service_def.get("disabled_reason", "Service deaktiviert")
run = TestRun(
id=run_id,
suite_id=service_id,
service=service_id,
started_at=datetime.now(),
completed_at=datetime.now(),
status=RunStatus.COMPLETED,
output=f"Service deaktiviert: {reason}",
)
current_runs[run_id] = run
test_runs.append({
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat(),
"status": run.status.value,
"total_tests": 0,
"passed_tests": 0,
"failed_tests": 0,
"failed_test_ids": [],
"duration_seconds": 0,
})
return
# Pruefe ob Tests in einem anderen Container laufen sollen
run_in_container = service_def.get("run_in_container", False)
container_name = service_def.get("container_name", "")
run = TestRun(
id=run_id,
suite_id=service_id,
service=service_id,
started_at=datetime.now(),
status=RunStatus.RUNNING,
)
current_runs[run_id] = run
try:
# Echte Test-Ausführung basierend auf Framework
if run_in_container and container_name:
# Tests im externen Container ausfuehren
framework_str = "pytest" if framework == TestFramework.PYTEST else "go"
container_base_path = service_def.get("base_path", "/app/tests")
pytest_args = service_def.get("pytest_args", "")
result = await run_tests_in_container(container_name, framework_str, container_base_path, service_id, pytest_args)
elif framework == TestFramework.GO_TEST:
result = await run_go_tests(base_path, service_id=service_id)
elif framework == TestFramework.PYTEST:
result = await run_python_tests(base_path, service_id=service_id)
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
result = await run_bqas_tests(service_def)
elif framework == TestFramework.JEST:
result = await run_jest_tests(base_path, service_id=service_id)
elif framework == TestFramework.PLAYWRIGHT:
result = await run_playwright_tests(base_path, service_id=service_id)
else:
result = {"passed": 0, "failed": 0, "total": 0, "output": "Framework nicht unterstuetzt"}
run.completed_at = datetime.now()
run.status = RunStatus.COMPLETED if result.get("failed", 0) == 0 else RunStatus.FAILED
run.total_tests = result.get("total", 0)
run.passed_tests = result.get("passed", 0)
run.failed_tests = result.get("failed", 0)
run.failed_test_ids = result.get("failed_test_ids", [])
run.duration_seconds = (run.completed_at - run.started_at).total_seconds()
run.output = result.get("output", "")
except Exception as e:
run.completed_at = datetime.now()
run.status = RunStatus.FAILED
run.output = str(e)
# In Historie speichern (In-Memory)
test_runs.append({
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
"status": run.status.value,
"total_tests": run.total_tests,
"passed_tests": run.passed_tests,
"failed_tests": run.failed_tests,
"failed_test_ids": run.failed_test_ids,
"duration_seconds": run.duration_seconds,
})
# Persistiere Ergebnisse (Legacy In-Memory Dict)
persisted_results[service_id] = {
"total": run.total_tests,
"passed": run.passed_tests,
"failed": run.failed_tests,
"failed_test_ids": run.failed_test_ids,
"last_run": run.completed_at.isoformat() if run.completed_at else datetime.now().isoformat(),
"status": run.status.value,
}
save_persisted_results()
# PostgreSQL-Persistierung
if is_postgres_available():
try:
from ...database import get_db_session
from ...repository import TestRepository
with get_db_session() as db:
repo = TestRepository(db)
# Run erstellen falls noch nicht vorhanden
db_run = repo.get_run(run.id)
if not db_run:
db_run = repo.create_run(
run_id=run.id,
service=service_id,
framework=framework.value,
triggered_by="manual"
)
# Run abschliessen
repo.complete_run(
run_id=run.id,
status=run.status.value,
total_tests=run.total_tests,
passed_tests=run.passed_tests,
failed_tests=run.failed_tests,
skipped_tests=0,
duration_seconds=run.duration_seconds,
output=run.output
)
# Einzelne Test-Ergebnisse speichern (fehlgeschlagene Tests)
if run.failed_test_ids:
results_to_add = []
for failed in run.failed_test_ids:
if isinstance(failed, dict):
results_to_add.append({
"name": failed.get("name") or failed.get("id", "unknown"),
"file_path": failed.get("file_path"),
"status": "failed",
"error_message": failed.get("error_message"),
"error_type": failed.get("error_type"),
"suggestion": failed.get("suggestion")
})
elif isinstance(failed, str):
results_to_add.append({
"name": failed,
"status": "failed"
})
if results_to_add:
repo.add_results(run.id, results_to_add)
except Exception as e:
print(f"Fehler beim PostgreSQL-Speichern: {e}")

View File

@@ -0,0 +1,21 @@
"""
Test Registry Routes Module
All API endpoints for the test registry.
"""
from fastapi import APIRouter
from .tests import router as tests_router
from .backlog import router as backlog_router
from .ci import router as ci_router
# Create main router
router = APIRouter(prefix="/api/tests", tags=["Test Registry"])
# Include sub-routers
router.include_router(tests_router)
router.include_router(backlog_router)
router.include_router(ci_router)
__all__ = ["router"]

View File

@@ -0,0 +1,580 @@
"""
Test Registry - Backlog Endpoints
Endpoints for failed test backlog management.
"""
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, HTTPException, Query
from ...database import get_db_session
from ...repository import TestRepository
from ..api_models import (
BacklogStatusUpdate,
BacklogPriorityUpdate,
FixAttempt,
ManualBacklogEntry,
)
from ..config import (
get_test_runs,
get_persisted_results,
is_postgres_available,
migrate_json_to_postgres,
)
router = APIRouter()
@router.get("/failed")
async def get_failed_tests():
"""
Gibt alle fehlgeschlagenen Tests aus den persistierten Ergebnissen zurueck.
Fuer Backlog-Verwaltung mit menschenverstaendlichen Fehlerbeschreibungen.
"""
persisted_results = get_persisted_results()
failed_tests = []
# Sammle fehlgeschlagene Tests aus persistierten Ergebnissen
for service, data in persisted_results.items():
run_time = data.get("last_run", "")
run_id = f"persisted_{service}"
# Hole fehlgeschlagene Test-IDs
for failed in data.get("failed_test_ids", []):
if isinstance(failed, dict):
failed_tests.append({
"id": failed.get("id", ""),
"name": failed.get("name", ""),
"service": service,
"file_path": failed.get("file_path", ""),
"line_number": failed.get("line_number"),
"error_message": failed.get("error_message", "Keine Fehlermeldung verfuegbar"),
"error_type": failed.get("error_type", "unknown"),
"suggestion": failed.get("suggestion", ""),
"run_id": run_id,
"last_failed": run_time,
"status": "open", # open, in_progress, fixed
})
elif isinstance(failed, str):
# Legacy-Format: nur Test-ID als String
failed_tests.append({
"id": failed,
"name": failed,
"service": service,
"file_path": "",
"line_number": None,
"error_message": "Keine Details verfuegbar",
"error_type": "unknown",
"suggestion": "",
"run_id": run_id,
"last_failed": run_time,
"status": "open",
})
# Dedupliziere nach Test-ID (nur neueste Version behalten)
seen = {}
for test in failed_tests:
test_id = test["id"]
if test_id not in seen or test["last_failed"] > seen[test_id]["last_failed"]:
seen[test_id] = test
unique_failed = list(seen.values())
# Gruppiere nach Service
by_service = {}
for test in unique_failed:
service = test["service"]
if service not in by_service:
by_service[service] = []
by_service[service].append(test)
return {
"total_failed": len(unique_failed),
"by_service": by_service,
"tests": unique_failed,
"last_updated": datetime.now().isoformat(),
}
@router.post("/failed/{test_id}/status")
async def update_failed_test_status(test_id: str, status: str):
"""
Aktualisiert den Status eines fehlgeschlagenen Tests.
Status: 'open', 'in_progress', 'fixed', 'wont_fix'
Legacy-Endpoint - nutzt nun PostgreSQL wenn verfuegbar.
"""
valid_statuses = ["open", "in_progress", "fixed", "wont_fix", "flaky"]
if status not in valid_statuses:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Status. Erlaubt: {', '.join(valid_statuses)}"
)
# Versuche in PostgreSQL zu speichern
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
# Suche nach Backlog-Item mit test_id
backlog_items = repo.get_backlog()
for item in backlog_items:
if item.test_name == test_id or str(item.id) == test_id:
repo.update_backlog_status(item.id, status)
return {
"test_id": test_id,
"backlog_id": item.id,
"status": status,
"updated_at": datetime.now().isoformat(),
"message": f"Test-Status auf '{status}' gesetzt (PostgreSQL)",
}
except Exception as e:
print(f"PostgreSQL-Fehler: {e}")
# Fallback: nur Bestaetigung zurueckgeben
return {
"test_id": test_id,
"status": status,
"updated_at": datetime.now().isoformat(),
"message": f"Test-Status auf '{status}' gesetzt",
}
@router.get("/backlog")
async def get_backlog(
status: Optional[str] = Query(None, description="Filter nach Status: open, in_progress, fixed, wont_fix, flaky"),
service: Optional[str] = Query(None, description="Filter nach Service"),
priority: Optional[str] = Query(None, description="Filter nach Prioritaet: critical, high, medium, low"),
limit: int = Query(100, ge=1, le=500),
offset: int = Query(0, ge=0)
):
"""
Gibt den persistenten Backlog fehlgeschlagener Tests zurueck.
Der Backlog aggregiert fehlgeschlagene Tests ueber mehrere Runs hinweg
und ermoeglicht Status-Management (open -> in_progress -> fixed).
"""
if not is_postgres_available():
# Fallback auf legacy /failed Endpoint
return await get_failed_tests()
try:
with get_db_session() as db:
repo = TestRepository(db)
items = repo.get_backlog(
status=status,
service=service,
priority=priority,
limit=limit,
offset=offset
)
total = repo.get_backlog_count(status=status, service=service)
# Gruppiere nach Service
by_service = {}
for item in items:
svc = item.service
if svc not in by_service:
by_service[svc] = []
by_service[svc].append(item.to_dict())
return {
"total": total,
"items": [item.to_dict() for item in items],
"by_service": by_service,
"filters": {
"status": status,
"service": service,
"priority": priority
},
"pagination": {
"limit": limit,
"offset": offset
}
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/backlog/{backlog_id}")
async def get_backlog_item(backlog_id: int):
"""
Gibt Details zu einem einzelnen Backlog-Eintrag zurueck.
Inklusive Fix-Historie.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.get_backlog_item(backlog_id)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
# Hole Fix-Historie
fixes = repo.get_fix_history(backlog_id)
result = item.to_dict()
result["fixes"] = [fix.to_dict() for fix in fixes]
return result
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/status")
async def update_backlog_item_status(backlog_id: int, update: BacklogStatusUpdate):
"""
Aktualisiert den Status eines Backlog-Eintrags.
Moegliche Status:
- open: Noch nicht bearbeitet
- in_progress: Wird gerade bearbeitet
- fixed: Test wurde gefixt
- wont_fix: Wird nicht gefixt (mit Begruendung)
- flaky: Flaky Test, wird separat behandelt
"""
valid_statuses = ["open", "in_progress", "fixed", "wont_fix", "flaky"]
if update.status not in valid_statuses:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Status. Erlaubt: {', '.join(valid_statuses)}"
)
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.update_backlog_status(
backlog_id=backlog_id,
status=update.status,
notes=update.notes,
assigned_to=update.assigned_to
)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
return item.to_dict()
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/priority")
async def update_backlog_item_priority(backlog_id: int, update: BacklogPriorityUpdate):
"""
Aktualisiert die Prioritaet eines Backlog-Eintrags.
Moegliche Prioritaeten:
- critical: Kritisch - sofort beheben
- high: Hoch - bald beheben
- medium: Mittel - bei Gelegenheit
- low: Niedrig - irgendwann
"""
valid_priorities = ["critical", "high", "medium", "low"]
if update.priority not in valid_priorities:
raise HTTPException(
status_code=400,
detail=f"Ungueltige Prioritaet. Erlaubt: {', '.join(valid_priorities)}"
)
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.update_backlog_priority(backlog_id, update.priority)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
return item.to_dict()
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/fix")
async def add_fix_attempt(backlog_id: int, fix: FixAttempt):
"""
Fuegt einen Fix-Versuch zur Historie hinzu.
Bei success=True wird der Backlog-Status automatisch auf 'fixed' gesetzt.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
valid_fix_types = ["manual", "auto_claude", "auto_script"]
if fix.fix_type not in valid_fix_types:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Fix-Typ. Erlaubt: {', '.join(valid_fix_types)}"
)
try:
with get_db_session() as db:
repo = TestRepository(db)
# Pruefe ob Backlog-Item existiert
item = repo.get_backlog_item(backlog_id)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
# Fix-Versuch hinzufuegen
fix_record = repo.add_fix_attempt(
backlog_id=backlog_id,
fix_type=fix.fix_type,
fix_description=fix.fix_description,
commit_hash=fix.commit_hash,
success=fix.success
)
return {
"fix": fix_record.to_dict(),
"backlog_status": "fixed" if fix.success else item.status
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog")
async def create_backlog_entry(entry: ManualBacklogEntry):
"""
Erstellt einen manuellen Backlog-Eintrag.
Nuetzlich fuer:
- Nicht-integrierte Features (xfail Tests)
- Bekannte Probleme die noch behoben werden muessen
- Feature Requests aus dem Test-Kontext
"""
from ...db_models import FailedTestBacklogDB
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
valid_priorities = ["critical", "high", "medium", "low"]
if entry.priority not in valid_priorities:
raise HTTPException(
status_code=400,
detail=f"Ungueltige Prioritaet. Erlaubt: {', '.join(valid_priorities)}"
)
try:
with get_db_session() as db:
now = datetime.utcnow()
# Pruefe ob schon ein offener Eintrag existiert
existing = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.test_name == entry.test_name,
FailedTestBacklogDB.service == entry.service,
FailedTestBacklogDB.status == "open"
).first()
if existing:
# Aktualisiere existierenden Eintrag
existing.error_message = entry.error_message
existing.priority = entry.priority
existing.fix_suggestion = entry.fix_suggestion
existing.last_failed_at = now
db.commit()
return {
"id": existing.id,
"status": "updated",
"message": f"Existierender Backlog-Eintrag aktualisiert"
}
# Neuen Eintrag erstellen
backlog = FailedTestBacklogDB(
test_name=entry.test_name,
test_file=f"{entry.service}/",
service=entry.service,
framework="manual",
error_message=entry.error_message,
error_type="feature_not_integrated",
status="open",
priority=entry.priority,
fix_suggestion=entry.fix_suggestion,
first_failed_at=now,
last_failed_at=now,
failure_count=1
)
db.add(backlog)
db.commit()
db.refresh(backlog)
return {
"id": backlog.id,
"status": "created",
"message": f"Backlog-Eintrag erstellt: {entry.test_name}"
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/history")
async def get_test_history(
service: Optional[str] = Query(None, description="Filter nach Service"),
days: int = Query(30, ge=1, le=365, description="Anzahl Tage zurueck"),
limit: int = Query(100, ge=1, le=1000)
):
"""
Gibt die Test-Run Historie fuer Trend-Analysen zurueck.
Aggregiert Daten nach Tag und Service.
"""
test_runs = get_test_runs()
if not is_postgres_available():
# Fallback auf In-Memory Historie
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit], "source": "memory"}
try:
with get_db_session() as db:
repo = TestRepository(db)
history = repo.get_run_history(
service=service,
days=days,
limit=limit
)
return {
"history": history,
"days": days,
"service": service,
"source": "postgresql"
}
except Exception as e:
# Fallback auf In-Memory
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit], "source": "memory", "error": str(e)}
@router.get("/trends")
async def get_test_trends(
service: Optional[str] = Query(None, description="Filter nach Service"),
days: int = Query(14, ge=1, le=90, description="Anzahl Tage")
):
"""
Gibt Trend-Daten fuer Visualisierungen zurueck.
Zeigt Pass-Rate und Test-Anzahl ueber Zeit.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar fuer Trends")
try:
with get_db_session() as db:
repo = TestRepository(db)
history = repo.get_run_history(service=service, days=days, limit=days * 20)
# Aggregiere nach Tag
by_date = {}
for entry in history:
date = entry["date"]
if date not in by_date:
by_date[date] = {
"date": date,
"total_tests": 0,
"passed": 0,
"failed": 0,
"runs": 0
}
by_date[date]["total_tests"] += entry["total_tests"]
by_date[date]["passed"] += entry["passed"]
by_date[date]["failed"] += entry["failed"]
by_date[date]["runs"] += entry["runs"]
# Berechne Pass-Rate pro Tag
trends = []
for date, data in sorted(by_date.items()):
total = data["total_tests"]
data["pass_rate"] = round((data["passed"] / total * 100) if total > 0 else 0, 1)
trends.append(data)
return {
"trends": trends,
"days": days,
"service": service
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/stats")
async def get_aggregated_stats():
"""
Gibt aggregierte Statistiken ueber alle Services zurueck.
Kombiniert Daten aus PostgreSQL und Service-Definitionen.
"""
from ...models import TestRegistryStats
persisted_results = get_persisted_results()
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
summary = repo.get_summary_stats()
service_stats = repo.get_all_service_stats()
return {
"summary": summary,
"services": [s.to_dict() for s in service_stats],
"source": "postgresql"
}
except Exception as e:
print(f"PostgreSQL-Fehler: {e}")
# Fallback auf Legacy-Daten
stats = TestRegistryStats()
for service, data in persisted_results.items():
stats.total_tests += data.get("total", 0)
stats.total_passed += data.get("passed", 0)
stats.total_failed += data.get("failed", 0)
stats.services_count = len(persisted_results)
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0
return {
"summary": {
"total_tests": stats.total_tests,
"total_passed": stats.total_passed,
"total_failed": stats.total_failed,
"total_skipped": stats.total_skipped,
"services_count": stats.services_count,
"overall_pass_rate": round(stats.overall_pass_rate, 1)
},
"services": list(persisted_results.keys()),
"source": "memory"
}
@router.post("/migrate")
async def trigger_migration():
"""
Migriert bestehende JSON-Daten nach PostgreSQL.
Einmalig ausfuehren um historische Daten zu uebernehmen.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
count = migrate_json_to_postgres()
return {
"migrated_services": count,
"message": f"{count} Services von JSON nach PostgreSQL migriert"
}

View File

@@ -0,0 +1,295 @@
"""
Test Registry - CI/CD Integration Endpoints
Endpoints for receiving results from CI/CD pipelines.
"""
from datetime import datetime
from typing import Dict
from fastapi import APIRouter, BackgroundTasks
from ...database import get_db_session
from ...repository import TestRepository
from ..api_models import CIResultRequest
from ..config import (
get_test_runs,
get_persisted_results,
is_postgres_available,
)
router = APIRouter()
@router.post("/ci-result")
async def receive_ci_result(result: CIResultRequest, background_tasks: BackgroundTasks):
"""
Empfaengt Test-Ergebnisse von der CI/CD-Pipeline.
Wird vom report-test-results Step in .woodpecker/main.yml aufgerufen.
Flow:
1. Pipeline fuehrt Tests aus und sammelt JSON-Ergebnisse
2. Pipeline sendet detaillierte Ergebnisse pro Service hierher
3. Dieser Endpoint speichert in PostgreSQL
4. Dashboard zeigt die Daten an
test_results Format:
{
"service": "consent-service",
"framework": "go",
"total": 57,
"passed": 57,
"failed": 0,
"skipped": 0,
"coverage": 75.5
}
"""
test_runs = get_test_runs()
persisted_results = get_persisted_results()
# Extrahiere Service-spezifische Daten aus test_results
tr = result.test_results or {}
service_name = tr.get("service", "ci-pipeline")
framework = tr.get("framework", "unknown")
total = tr.get("total", 0)
passed = tr.get("passed", 0)
failed = tr.get("failed", 0)
skipped = tr.get("skipped", 0)
coverage = tr.get("coverage", 0)
# Log zur Debugging
print(f"[CI-RESULT] Pipeline {result.pipeline_id} - Service: {service_name}")
print(f"[CI-RESULT] Tests: {passed}/{total} passed, {failed} failed, {skipped} skipped")
print(f"[CI-RESULT] Coverage: {coverage}%, Commit: {result.commit[:8]}")
# Speichere in PostgreSQL wenn verfuegbar
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
# Erstelle eindeutige Run-ID pro Service
run_id = f"ci-{result.pipeline_id}-{service_name}"
# Erstelle Test-Run Eintrag
run = repo.create_run(
run_id=run_id,
service=service_name,
framework=framework,
triggered_by="ci",
git_commit=result.commit[:8] if result.commit else None,
git_branch=result.branch
)
# Markiere als abgeschlossen mit detaillierten Zahlen
status = "passed" if failed == 0 else "failed"
repo.complete_run(
run_id=run_id,
status=status,
total_tests=total,
passed_tests=passed,
failed_tests=failed,
skipped_tests=skipped,
duration_seconds=0
)
print(f"[CI-RESULT] Stored as run_id: {run_id}, status: {status}")
# WICHTIG: Aktualisiere den In-Memory Cache fuer sofortige Frontend-Updates
persisted_results[service_name] = {
"total": total,
"passed": passed,
"failed": failed,
"last_run": datetime.utcnow().isoformat(),
"status": status,
"failed_test_ids": []
}
print(f"[CI-RESULT] Updated cache for {service_name}: {passed}/{total} passed")
# Bei fehlgeschlagenen Tests: Backlog-Eintrag erstellen
if failed > 0:
background_tasks.add_task(
_create_backlog_entry,
service_name,
framework,
failed,
result.pipeline_id,
result.commit,
result.branch
)
else:
# Alle Tests bestanden: Schließe offene Backlog-Einträge
background_tasks.add_task(
_close_backlog_entry,
service_name,
result.pipeline_id,
result.commit
)
return {
"received": True,
"run_id": run_id,
"service": service_name,
"pipeline_id": result.pipeline_id,
"status": status,
"tests": {"total": total, "passed": passed, "failed": failed},
"stored_in": "postgres"
}
except Exception as e:
print(f"[CI-RESULT] PostgreSQL Error: {e}")
# Fallback auf Memory-Storage
pass
# Memory-Fallback
ci_run = {
"id": f"ci-{result.pipeline_id}",
"pipeline_id": result.pipeline_id,
"commit": result.commit,
"branch": result.branch,
"status": result.status,
"timestamp": datetime.now().isoformat(),
"test_results": result.test_results
}
test_runs.append(ci_run)
return {
"received": True,
"pipeline_id": result.pipeline_id,
"status": result.status,
"stored_in": "memory"
}
async def _create_backlog_entry(
service_name: str,
framework: str,
failed_count: int,
pipeline_id: str,
commit: str,
branch: str
):
"""
Background-Task: Erstellt Backlog-Eintraege fuer fehlgeschlagene Tests.
Wird asynchron aufgerufen wenn Tests fehlgeschlagen sind.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Creating backlog entry for {service_name}: {failed_count} failed tests")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
# Pruefe ob schon ein offener Backlog-Eintrag fuer diesen Service existiert
existing = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.service == service_name,
FailedTestBacklogDB.status == "open"
).first()
if existing:
# Aktualisiere existierenden Eintrag
existing.last_failed_at = now
existing.failure_count += 1
existing.error_message = f"{failed_count} Tests fehlgeschlagen in Pipeline {pipeline_id} (Branch: {branch})"
db.commit()
print(f"[CI-RESULT] Updated existing backlog entry (ID: {existing.id})")
else:
# Neuen Eintrag erstellen
backlog = FailedTestBacklogDB(
test_name=f"{service_name} Tests",
test_file=f"{service_name}/",
service=service_name,
framework=framework,
error_message=f"{failed_count} Tests fehlgeschlagen in Pipeline {pipeline_id} (Branch: {branch})",
error_type="TEST_FAILURE",
first_failed_at=now,
last_failed_at=now,
failure_count=1,
status="open",
priority="high" if failed_count > 5 else "medium"
)
db.add(backlog)
db.commit()
print(f"[CI-RESULT] Created new backlog entry (ID: {backlog.id})")
except Exception as e:
print(f"[CI-RESULT] Error creating backlog entry: {e}")
async def _close_backlog_entry(
service_name: str,
pipeline_id: str,
commit: str
):
"""
Background-Task: Schließt Backlog-Einträge wenn alle Tests bestanden.
Wird asynchron aufgerufen wenn Tests erfolgreich waren.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Checking for open backlog entries to close for {service_name}")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
# Finde offene Backlog-Einträge für diesen Service
open_entries = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.service == service_name,
FailedTestBacklogDB.status == "open"
).all()
for entry in open_entries:
entry.status = "resolved"
entry.resolved_at = now
entry.resolution_commit = commit[:8] if commit else None
entry.resolution_notes = f"Automatisch geschlossen - alle Tests in Pipeline {pipeline_id} bestanden"
print(f"[CI-RESULT] Auto-closed backlog entry (ID: {entry.id}) for {service_name}")
if open_entries:
db.commit()
print(f"[CI-RESULT] Closed {len(open_entries)} backlog entries for {service_name}")
else:
print(f"[CI-RESULT] No open backlog entries for {service_name}")
except Exception as e:
print(f"[CI-RESULT] Error closing backlog entries: {e}")
async def _fetch_and_store_failed_tests(pipeline_id: str, commit: str, branch: str):
"""
Legacy Background-Task fuer generische Pipeline-Fehler.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Fetching failed test details for pipeline {pipeline_id}")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
backlog = FailedTestBacklogDB(
test_name=f"CI Pipeline {pipeline_id}",
test_file=".woodpecker/main.yml",
service="ci-pipeline",
framework="woodpecker",
error_message=f"Pipeline {pipeline_id} fehlgeschlagen auf Branch {branch}",
error_type="CI_FAILURE",
first_failed_at=now,
last_failed_at=now,
failure_count=1,
status="open",
priority="high"
)
db.add(backlog)
db.commit()
print(f"[CI-RESULT] Added pipeline failure to backlog (ID: {backlog.id})")
except Exception as e:
print(f"[CI-RESULT] Error adding to backlog: {e}")

View File

@@ -0,0 +1,335 @@
"""
Test Registry - Test Endpoints
Endpoints for test discovery, running, and monitoring.
"""
from datetime import datetime
from typing import Dict, Any
from fastapi import APIRouter, HTTPException, BackgroundTasks
from ...models import (
TestFramework,
TestRegistryStats,
SERVICE_DEFINITIONS,
)
from ..api_models import TestRunResponse, RegistryResponse
from ..config import (
PROJECT_ROOT,
RUN_MODE,
check_go_available,
check_pytest_available,
get_go_version,
get_pytest_version,
get_test_runs,
get_current_runs,
get_running_tests,
)
from ..discovery import (
build_service_info,
discover_go_tests,
discover_python_tests,
discover_bqas_tests,
)
from ..executors import execute_test_run
router = APIRouter()
@router.get("/registry", response_model=RegistryResponse)
async def get_test_registry():
"""
Gibt alle registrierten Tests zurueck.
Scannt alle Services und aggregiert Test-Informationen.
"""
services = []
stats = TestRegistryStats()
by_category: Dict[str, int] = {}
by_framework: Dict[str, int] = {}
for service_def in SERVICE_DEFINITIONS:
info = build_service_info(service_def)
services.append({
"service": info.service,
"display_name": info.display_name,
"port": info.port,
"language": info.language,
"total_tests": info.total_tests,
"passed_tests": info.passed_tests,
"failed_tests": info.failed_tests,
"skipped_tests": info.skipped_tests,
"pass_rate": round(info.pass_rate, 1),
"coverage_percent": round(info.coverage_percent, 1) if info.coverage_percent else None,
"last_run": info.last_run.isoformat() if info.last_run else None,
"status": info.status.value,
})
stats.total_tests += info.total_tests
stats.total_passed += info.passed_tests
stats.total_failed += info.failed_tests
stats.total_skipped += info.skipped_tests
# Framework-Stats
framework_name = service_def["framework"].value
by_framework[framework_name] = by_framework.get(framework_name, 0) + info.total_tests
# Category basierend auf Framework
if service_def["framework"] in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
by_category["bqas"] = by_category.get("bqas", 0) + info.total_tests
elif service_def["framework"] == TestFramework.PLAYWRIGHT:
by_category["e2e"] = by_category.get("e2e", 0) + info.total_tests
else:
by_category["unit"] = by_category.get("unit", 0) + info.total_tests
stats.services_count = len(services)
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0.0
stats.by_category = by_category
stats.by_framework = by_framework
return RegistryResponse(
services=services,
stats={
"total_tests": stats.total_tests,
"total_passed": stats.total_passed,
"total_failed": stats.total_failed,
"total_skipped": stats.total_skipped,
"overall_pass_rate": round(stats.overall_pass_rate, 1),
"services_count": stats.services_count,
"by_category": stats.by_category,
"by_framework": stats.by_framework,
},
last_updated=datetime.now().isoformat(),
)
@router.get("/registry/{service}")
async def get_service_tests(service: str):
"""
Gibt Tests fuer einen spezifischen Service zurueck.
"""
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == service), None)
if not service_def:
raise HTTPException(status_code=404, detail=f"Service '{service}' nicht gefunden")
info = build_service_info(service_def)
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
framework = service_def["framework"]
# Test-Discovery
if framework == TestFramework.GO_TEST:
tests = discover_go_tests(base_path)
elif framework == TestFramework.PYTEST:
tests = discover_python_tests(base_path)
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
tests = discover_bqas_tests(base_path, test_type)
else:
tests = []
return {
"service": info.service,
"display_name": info.display_name,
"port": info.port,
"language": info.language,
"total_tests": len(tests),
"passed_tests": info.passed_tests,
"failed_tests": info.failed_tests,
"coverage_percent": info.coverage_percent,
"tests": [
{
"id": t.id,
"name": t.name,
"file_path": t.file_path,
"line_number": t.line_number,
"framework": t.framework.value,
"status": t.status.value,
}
for t in tests
],
}
@router.post("/run/{suite}", response_model=TestRunResponse)
async def run_test_suite(suite: str, background_tasks: BackgroundTasks):
"""
Startet einen Test-Run fuer eine Suite.
Fuehrt Tests im Hintergrund aus.
"""
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == suite), None)
if not service_def:
raise HTTPException(status_code=404, detail=f"Suite '{suite}' nicht gefunden")
run_id = f"run_{suite}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
# Background Task starten
background_tasks.add_task(execute_test_run, run_id, service_def)
return TestRunResponse(
run_id=run_id,
status="queued",
message=f"Test-Run fuer {service_def['display_name']} gestartet",
)
@router.get("/runs")
async def get_test_runs_list(limit: int = 20):
"""
Gibt die Test-Run Historie zurueck.
"""
test_runs = get_test_runs()
# Sortiert nach Startzeit, neueste zuerst
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit]}
@router.get("/progress/{service_id}")
async def get_test_progress(service_id: str):
"""
Gibt den Fortschritt eines laufenden Tests zurueck.
Wird vom Frontend gepollt um Live-Updates anzuzeigen.
"""
running_tests = get_running_tests()
if service_id in running_tests:
return running_tests[service_id]
# Kein laufender Test - Standard-Antwort
return {
"current_file": "",
"files_done": 0,
"files_total": 0,
"passed": 0,
"failed": 0,
"status": "idle"
}
@router.get("/progress")
async def get_all_progress():
"""
Gibt den Fortschritt aller laufenden Tests zurueck.
"""
return get_running_tests()
@router.get("/runs/{run_id}")
async def get_test_run(run_id: str):
"""
Gibt Details zu einem spezifischen Test-Run zurueck.
"""
current_runs = get_current_runs()
test_runs = get_test_runs()
if run_id in current_runs:
run = current_runs[run_id]
return {
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
"status": run.status.value,
"total_tests": run.total_tests,
"passed_tests": run.passed_tests,
"failed_tests": run.failed_tests,
"duration_seconds": run.duration_seconds,
"output": run.output,
}
# In Historie suchen
for run in test_runs:
if run["id"] == run_id:
return run
raise HTTPException(status_code=404, detail=f"Run '{run_id}' nicht gefunden")
@router.get("/coverage")
async def get_coverage():
"""
Gibt aggregierte Coverage-Informationen zurueck.
"""
coverage_data = []
total_coverage = 0.0
count = 0
for service_def in SERVICE_DEFINITIONS:
info = build_service_info(service_def)
if info.coverage_percent:
coverage_data.append({
"service": info.service,
"display_name": info.display_name,
"coverage_percent": round(info.coverage_percent, 1),
"language": info.language,
})
total_coverage += info.coverage_percent
count += 1
return {
"services": coverage_data,
"average_coverage": round(total_coverage / count, 1) if count > 0 else 0,
"total_services": count,
}
@router.get("/health")
async def get_test_health():
"""
Gibt den Status der Test-Infrastruktur zurueck.
"""
go_available = check_go_available()
pytest_available = check_pytest_available()
return {
"status": "healthy",
"mode": RUN_MODE, # "docker", "local", oder "demo"
"services_monitored": len(SERVICE_DEFINITIONS),
"project_root": str(PROJECT_ROOT),
"project_root_exists": PROJECT_ROOT.exists(),
"timestamp": datetime.now().isoformat(),
"runners": {
"go_test": "available" if go_available else "not_installed",
"pytest": "available" if pytest_available else "not_installed",
"jest": "available", # TODO: check Node.js
"playwright": "available", # TODO: check Playwright
"bqas": "available", # BQAS hat seinen eigenen Service
},
"versions": {
"go": get_go_version() if go_available else None,
"pytest": get_pytest_version() if pytest_available else None,
},
}
@router.get("/db-status")
async def get_db_status():
"""
Gibt den Status der PostgreSQL-Datenbankverbindung zurueck.
Wird vom Dashboard ServiceStatus verwendet.
"""
import time
from ...database import check_db_connection, DATABASE_URL
start_time = time.time()
is_connected = check_db_connection()
response_time = int((time.time() - start_time) * 1000)
# Parse host from DATABASE_URL (hide password)
try:
# postgresql://user:pass@host:port/db -> host:port
url_parts = DATABASE_URL.split("@")
if len(url_parts) > 1:
host_part = url_parts[1].split("/")[0]
else:
host_part = "unknown"
except:
host_part = "unknown"
return {
"status": "online" if is_connected else "offline",
"host": host_part,
"response_time_ms": response_time,
"timestamp": datetime.now().isoformat(),
}

View File

@@ -0,0 +1,23 @@
"""
Test Registry Services
Business logic and helper services.
"""
from .error_handling import (
extract_go_error,
classify_go_error,
suggest_go_fix,
extract_pytest_error,
classify_pytest_error,
suggest_pytest_fix,
)
__all__ = [
"extract_go_error",
"classify_go_error",
"suggest_go_fix",
"extract_pytest_error",
"classify_pytest_error",
"suggest_pytest_fix",
]

View File

@@ -0,0 +1,137 @@
"""
Error Analysis and Classification Helpers
Provides error extraction, classification, and fix suggestions for Go and Python tests.
"""
import re
from typing import Optional
# ==============================================================================
# Go Error Helpers
# ==============================================================================
def extract_go_error(output: str) -> str:
"""Extrahiert die Fehlermeldung aus Go-Test-Output"""
if not output:
return ""
lines = output.strip().split("\n")
error_lines = []
for line in lines:
# Typische Go-Fehlermuster
if "Error:" in line or "FAIL" in line or "panic:" in line:
error_lines.append(line.strip())
elif line.strip().startswith("---"):
continue
elif "expected" in line.lower() or "got" in line.lower():
error_lines.append(line.strip())
elif ".go:" in line:
error_lines.append(line.strip())
return " | ".join(error_lines[:3]) if error_lines else output[:200]
def classify_go_error(error_msg: str) -> str:
"""Klassifiziert einen Go-Fehler"""
if not error_msg:
return "unknown"
error_lower = error_msg.lower()
if "nil pointer" in error_lower or "panic" in error_lower:
return "nil_pointer"
elif "expected" in error_lower and "got" in error_lower:
return "assertion"
elif "timeout" in error_lower:
return "timeout"
elif "connection" in error_lower or "dial" in error_lower:
return "network"
elif "not found" in error_lower or "does not exist" in error_lower:
return "not_found"
elif "permission" in error_lower or "unauthorized" in error_lower:
return "permission"
return "logic_error"
def suggest_go_fix(error_msg: str) -> str:
"""Gibt einen Loesungsvorschlag fuer Go-Fehler"""
error_type = classify_go_error(error_msg)
suggestions = {
"nil_pointer": "Pruefe ob alle Pointer initialisiert sind. Fuege nil-Checks hinzu.",
"assertion": "Vergleiche die erwarteten mit den tatsaechlichen Werten. Pruefe die Test-Eingabedaten.",
"timeout": "Erhoehe das Timeout oder optimiere die Funktion. Pruefe Netzwerkverbindungen.",
"network": "Pruefe ob der Service erreichbar ist. Stelle sicher dass Mocks korrekt konfiguriert sind.",
"not_found": "Pruefe ob die erwarteten Ressourcen existieren. Aktualisiere Test-Fixtures.",
"permission": "Pruefe Berechtigungen und Auth-Token im Test-Setup.",
"logic_error": "Pruefe die Geschaeftslogik und die Test-Annahmen.",
"unknown": "Analysiere den Stack-Trace fuer mehr Details.",
}
return suggestions.get(error_type, suggestions["unknown"])
# ==============================================================================
# Python Error Helpers
# ==============================================================================
def extract_pytest_error(output: str, test_name: str) -> str:
"""Extrahiert die Fehlermeldung aus pytest-Output"""
if not output:
return ""
# Suche nach dem Fehler-Block fuer diesen Test
pattern = rf'FAILED.*{re.escape(test_name)}.*?\n(.*?)(?=FAILED|PASSED|====|$)'
match = re.search(pattern, output, re.DOTALL)
if match:
error_block = match.group(1)
# Extrahiere die relevanten Zeilen
lines = [l.strip() for l in error_block.split("\n") if l.strip()]
# Suche nach AssertionError oder Exception
for i, line in enumerate(lines):
if "AssertionError" in line or "Error" in line or "Exception" in line:
return " | ".join(lines[max(0, i-1):min(len(lines), i+3)])
return ""
def classify_pytest_error(error_msg: str) -> str:
"""Klassifiziert einen Python-Fehler"""
if not error_msg:
return "unknown"
if "AssertionError" in error_msg:
return "assertion"
elif "TypeError" in error_msg:
return "type_error"
elif "AttributeError" in error_msg:
return "attribute_error"
elif "KeyError" in error_msg:
return "key_error"
elif "ValueError" in error_msg:
return "value_error"
elif "ImportError" in error_msg or "ModuleNotFoundError" in error_msg:
return "import_error"
elif "ConnectionError" in error_msg or "timeout" in error_msg.lower():
return "network"
return "logic_error"
def suggest_pytest_fix(error_msg: str) -> str:
"""Gibt einen Loesungsvorschlag fuer Python-Fehler"""
error_type = classify_pytest_error(error_msg)
suggestions = {
"assertion": "Pruefe die erwarteten vs. tatsaechlichen Werte. Sind die Test-Daten aktuell?",
"type_error": "Pruefe die Typen der uebergebenen Argumente. Evtl. fehlt eine Typkonvertierung.",
"attribute_error": "Das Objekt hat dieses Attribut nicht. Pruefe die Initialisierung.",
"key_error": "Der Schluessel existiert nicht im Dict. Pruefe die Test-Daten.",
"value_error": "Ungueltiger Wert uebergeben. Pruefe die Eingabeparameter.",
"import_error": "Modul nicht gefunden. Pruefe die Abhaengigkeiten und den Pfad.",
"network": "Netzwerkfehler. Sind alle Services gestartet? Sind Mocks konfiguriert?",
"logic_error": "Logikfehler. Pruefe die Geschaeftslogik und Test-Annahmen.",
"unknown": "Analysiere den Stack-Trace fuer mehr Details.",
}
return suggestions.get(error_type, suggestions["unknown"])