A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
193 lines
7.4 KiB
Python
193 lines
7.4 KiB
Python
"""
|
|
Test Runner
|
|
|
|
Orchestrates test execution and persists results.
|
|
"""
|
|
|
|
from datetime import datetime
|
|
from typing import Dict
|
|
|
|
from ...models import TestRun, RunStatus, TestFramework
|
|
from ..config import (
|
|
PROJECT_ROOT,
|
|
get_test_runs,
|
|
get_current_runs,
|
|
get_persisted_results,
|
|
save_persisted_results,
|
|
is_postgres_available,
|
|
)
|
|
from .go_executor import run_go_tests
|
|
from .python_executor import run_python_tests
|
|
from .bqas_executor import run_bqas_tests
|
|
from .jest_executor import run_jest_tests
|
|
from .playwright_executor import run_playwright_tests
|
|
from .container_executor import run_tests_in_container
|
|
|
|
|
|
async def execute_test_run(run_id: str, service_def: Dict):
|
|
"""Fuehrt einen Test-Run im Hintergrund aus"""
|
|
test_runs = get_test_runs()
|
|
current_runs = get_current_runs()
|
|
persisted_results = get_persisted_results()
|
|
|
|
framework = service_def["framework"]
|
|
service_id = service_def["service"]
|
|
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
|
|
|
|
# Pruefe ob Service deaktiviert ist
|
|
if service_def.get("disabled", False):
|
|
reason = service_def.get("disabled_reason", "Service deaktiviert")
|
|
run = TestRun(
|
|
id=run_id,
|
|
suite_id=service_id,
|
|
service=service_id,
|
|
started_at=datetime.now(),
|
|
completed_at=datetime.now(),
|
|
status=RunStatus.COMPLETED,
|
|
output=f"Service deaktiviert: {reason}",
|
|
)
|
|
current_runs[run_id] = run
|
|
test_runs.append({
|
|
"id": run.id,
|
|
"suite_id": run.suite_id,
|
|
"service": run.service,
|
|
"started_at": run.started_at.isoformat(),
|
|
"completed_at": run.completed_at.isoformat(),
|
|
"status": run.status.value,
|
|
"total_tests": 0,
|
|
"passed_tests": 0,
|
|
"failed_tests": 0,
|
|
"failed_test_ids": [],
|
|
"duration_seconds": 0,
|
|
})
|
|
return
|
|
|
|
# Pruefe ob Tests in einem anderen Container laufen sollen
|
|
run_in_container = service_def.get("run_in_container", False)
|
|
container_name = service_def.get("container_name", "")
|
|
|
|
run = TestRun(
|
|
id=run_id,
|
|
suite_id=service_id,
|
|
service=service_id,
|
|
started_at=datetime.now(),
|
|
status=RunStatus.RUNNING,
|
|
)
|
|
current_runs[run_id] = run
|
|
|
|
try:
|
|
# Echte Test-Ausführung basierend auf Framework
|
|
if run_in_container and container_name:
|
|
# Tests im externen Container ausfuehren
|
|
framework_str = "pytest" if framework == TestFramework.PYTEST else "go"
|
|
container_base_path = service_def.get("base_path", "/app/tests")
|
|
pytest_args = service_def.get("pytest_args", "")
|
|
result = await run_tests_in_container(container_name, framework_str, container_base_path, service_id, pytest_args)
|
|
elif framework == TestFramework.GO_TEST:
|
|
result = await run_go_tests(base_path, service_id=service_id)
|
|
elif framework == TestFramework.PYTEST:
|
|
result = await run_python_tests(base_path, service_id=service_id)
|
|
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
|
result = await run_bqas_tests(service_def)
|
|
elif framework == TestFramework.JEST:
|
|
result = await run_jest_tests(base_path, service_id=service_id)
|
|
elif framework == TestFramework.PLAYWRIGHT:
|
|
result = await run_playwright_tests(base_path, service_id=service_id)
|
|
else:
|
|
result = {"passed": 0, "failed": 0, "total": 0, "output": "Framework nicht unterstuetzt"}
|
|
|
|
run.completed_at = datetime.now()
|
|
run.status = RunStatus.COMPLETED if result.get("failed", 0) == 0 else RunStatus.FAILED
|
|
run.total_tests = result.get("total", 0)
|
|
run.passed_tests = result.get("passed", 0)
|
|
run.failed_tests = result.get("failed", 0)
|
|
run.failed_test_ids = result.get("failed_test_ids", [])
|
|
run.duration_seconds = (run.completed_at - run.started_at).total_seconds()
|
|
run.output = result.get("output", "")
|
|
|
|
except Exception as e:
|
|
run.completed_at = datetime.now()
|
|
run.status = RunStatus.FAILED
|
|
run.output = str(e)
|
|
|
|
# In Historie speichern (In-Memory)
|
|
test_runs.append({
|
|
"id": run.id,
|
|
"suite_id": run.suite_id,
|
|
"service": run.service,
|
|
"started_at": run.started_at.isoformat(),
|
|
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
|
|
"status": run.status.value,
|
|
"total_tests": run.total_tests,
|
|
"passed_tests": run.passed_tests,
|
|
"failed_tests": run.failed_tests,
|
|
"failed_test_ids": run.failed_test_ids,
|
|
"duration_seconds": run.duration_seconds,
|
|
})
|
|
|
|
# Persistiere Ergebnisse (Legacy In-Memory Dict)
|
|
persisted_results[service_id] = {
|
|
"total": run.total_tests,
|
|
"passed": run.passed_tests,
|
|
"failed": run.failed_tests,
|
|
"failed_test_ids": run.failed_test_ids,
|
|
"last_run": run.completed_at.isoformat() if run.completed_at else datetime.now().isoformat(),
|
|
"status": run.status.value,
|
|
}
|
|
save_persisted_results()
|
|
|
|
# PostgreSQL-Persistierung
|
|
if is_postgres_available():
|
|
try:
|
|
from ...database import get_db_session
|
|
from ...repository import TestRepository
|
|
|
|
with get_db_session() as db:
|
|
repo = TestRepository(db)
|
|
|
|
# Run erstellen falls noch nicht vorhanden
|
|
db_run = repo.get_run(run.id)
|
|
if not db_run:
|
|
db_run = repo.create_run(
|
|
run_id=run.id,
|
|
service=service_id,
|
|
framework=framework.value,
|
|
triggered_by="manual"
|
|
)
|
|
|
|
# Run abschliessen
|
|
repo.complete_run(
|
|
run_id=run.id,
|
|
status=run.status.value,
|
|
total_tests=run.total_tests,
|
|
passed_tests=run.passed_tests,
|
|
failed_tests=run.failed_tests,
|
|
skipped_tests=0,
|
|
duration_seconds=run.duration_seconds,
|
|
output=run.output
|
|
)
|
|
|
|
# Einzelne Test-Ergebnisse speichern (fehlgeschlagene Tests)
|
|
if run.failed_test_ids:
|
|
results_to_add = []
|
|
for failed in run.failed_test_ids:
|
|
if isinstance(failed, dict):
|
|
results_to_add.append({
|
|
"name": failed.get("name") or failed.get("id", "unknown"),
|
|
"file_path": failed.get("file_path"),
|
|
"status": "failed",
|
|
"error_message": failed.get("error_message"),
|
|
"error_type": failed.get("error_type"),
|
|
"suggestion": failed.get("suggestion")
|
|
})
|
|
elif isinstance(failed, str):
|
|
results_to_add.append({
|
|
"name": failed,
|
|
"status": "failed"
|
|
})
|
|
if results_to_add:
|
|
repo.add_results(run.id, results_to_add)
|
|
|
|
except Exception as e:
|
|
print(f"Fehler beim PostgreSQL-Speichern: {e}")
|