This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/backend/api/tests/registry/executors/test_runner.py
BreakPilot Dev 19855efacc
Some checks failed
Tests / Go Tests (push) Has been cancelled
Tests / Python Tests (push) Has been cancelled
Tests / Integration Tests (push) Has been cancelled
Tests / Go Lint (push) Has been cancelled
Tests / Python Lint (push) Has been cancelled
Tests / Security Scan (push) Has been cancelled
Tests / All Checks Passed (push) Has been cancelled
Security Scanning / Secret Scanning (push) Has been cancelled
Security Scanning / Dependency Vulnerability Scan (push) Has been cancelled
Security Scanning / Go Security Scan (push) Has been cancelled
Security Scanning / Python Security Scan (push) Has been cancelled
Security Scanning / Node.js Security Scan (push) Has been cancelled
Security Scanning / Docker Image Security (push) Has been cancelled
Security Scanning / Security Summary (push) Has been cancelled
CI/CD Pipeline / Go Tests (push) Has been cancelled
CI/CD Pipeline / Python Tests (push) Has been cancelled
CI/CD Pipeline / Website Tests (push) Has been cancelled
CI/CD Pipeline / Linting (push) Has been cancelled
CI/CD Pipeline / Security Scan (push) Has been cancelled
CI/CD Pipeline / Docker Build & Push (push) Has been cancelled
CI/CD Pipeline / Integration Tests (push) Has been cancelled
CI/CD Pipeline / Deploy to Staging (push) Has been cancelled
CI/CD Pipeline / Deploy to Production (push) Has been cancelled
CI/CD Pipeline / CI Summary (push) Has been cancelled
ci/woodpecker/manual/build-ci-image Pipeline was successful
ci/woodpecker/manual/main Pipeline failed
feat: BreakPilot PWA - Full codebase (clean push without large binaries)
All services: admin-v2, studio-v2, website, ai-compliance-sdk,
consent-service, klausur-service, voice-service, and infrastructure.
Large PDFs and compiled binaries excluded via .gitignore.
2026-02-11 13:25:58 +01:00

193 lines
7.4 KiB
Python

"""
Test Runner
Orchestrates test execution and persists results.
"""
from datetime import datetime
from typing import Dict
from ...models import TestRun, RunStatus, TestFramework
from ..config import (
PROJECT_ROOT,
get_test_runs,
get_current_runs,
get_persisted_results,
save_persisted_results,
is_postgres_available,
)
from .go_executor import run_go_tests
from .python_executor import run_python_tests
from .bqas_executor import run_bqas_tests
from .jest_executor import run_jest_tests
from .playwright_executor import run_playwright_tests
from .container_executor import run_tests_in_container
async def execute_test_run(run_id: str, service_def: Dict):
"""Fuehrt einen Test-Run im Hintergrund aus"""
test_runs = get_test_runs()
current_runs = get_current_runs()
persisted_results = get_persisted_results()
framework = service_def["framework"]
service_id = service_def["service"]
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
# Pruefe ob Service deaktiviert ist
if service_def.get("disabled", False):
reason = service_def.get("disabled_reason", "Service deaktiviert")
run = TestRun(
id=run_id,
suite_id=service_id,
service=service_id,
started_at=datetime.now(),
completed_at=datetime.now(),
status=RunStatus.COMPLETED,
output=f"Service deaktiviert: {reason}",
)
current_runs[run_id] = run
test_runs.append({
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat(),
"status": run.status.value,
"total_tests": 0,
"passed_tests": 0,
"failed_tests": 0,
"failed_test_ids": [],
"duration_seconds": 0,
})
return
# Pruefe ob Tests in einem anderen Container laufen sollen
run_in_container = service_def.get("run_in_container", False)
container_name = service_def.get("container_name", "")
run = TestRun(
id=run_id,
suite_id=service_id,
service=service_id,
started_at=datetime.now(),
status=RunStatus.RUNNING,
)
current_runs[run_id] = run
try:
# Echte Test-Ausführung basierend auf Framework
if run_in_container and container_name:
# Tests im externen Container ausfuehren
framework_str = "pytest" if framework == TestFramework.PYTEST else "go"
container_base_path = service_def.get("base_path", "/app/tests")
pytest_args = service_def.get("pytest_args", "")
result = await run_tests_in_container(container_name, framework_str, container_base_path, service_id, pytest_args)
elif framework == TestFramework.GO_TEST:
result = await run_go_tests(base_path, service_id=service_id)
elif framework == TestFramework.PYTEST:
result = await run_python_tests(base_path, service_id=service_id)
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
result = await run_bqas_tests(service_def)
elif framework == TestFramework.JEST:
result = await run_jest_tests(base_path, service_id=service_id)
elif framework == TestFramework.PLAYWRIGHT:
result = await run_playwright_tests(base_path, service_id=service_id)
else:
result = {"passed": 0, "failed": 0, "total": 0, "output": "Framework nicht unterstuetzt"}
run.completed_at = datetime.now()
run.status = RunStatus.COMPLETED if result.get("failed", 0) == 0 else RunStatus.FAILED
run.total_tests = result.get("total", 0)
run.passed_tests = result.get("passed", 0)
run.failed_tests = result.get("failed", 0)
run.failed_test_ids = result.get("failed_test_ids", [])
run.duration_seconds = (run.completed_at - run.started_at).total_seconds()
run.output = result.get("output", "")
except Exception as e:
run.completed_at = datetime.now()
run.status = RunStatus.FAILED
run.output = str(e)
# In Historie speichern (In-Memory)
test_runs.append({
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
"status": run.status.value,
"total_tests": run.total_tests,
"passed_tests": run.passed_tests,
"failed_tests": run.failed_tests,
"failed_test_ids": run.failed_test_ids,
"duration_seconds": run.duration_seconds,
})
# Persistiere Ergebnisse (Legacy In-Memory Dict)
persisted_results[service_id] = {
"total": run.total_tests,
"passed": run.passed_tests,
"failed": run.failed_tests,
"failed_test_ids": run.failed_test_ids,
"last_run": run.completed_at.isoformat() if run.completed_at else datetime.now().isoformat(),
"status": run.status.value,
}
save_persisted_results()
# PostgreSQL-Persistierung
if is_postgres_available():
try:
from ...database import get_db_session
from ...repository import TestRepository
with get_db_session() as db:
repo = TestRepository(db)
# Run erstellen falls noch nicht vorhanden
db_run = repo.get_run(run.id)
if not db_run:
db_run = repo.create_run(
run_id=run.id,
service=service_id,
framework=framework.value,
triggered_by="manual"
)
# Run abschliessen
repo.complete_run(
run_id=run.id,
status=run.status.value,
total_tests=run.total_tests,
passed_tests=run.passed_tests,
failed_tests=run.failed_tests,
skipped_tests=0,
duration_seconds=run.duration_seconds,
output=run.output
)
# Einzelne Test-Ergebnisse speichern (fehlgeschlagene Tests)
if run.failed_test_ids:
results_to_add = []
for failed in run.failed_test_ids:
if isinstance(failed, dict):
results_to_add.append({
"name": failed.get("name") or failed.get("id", "unknown"),
"file_path": failed.get("file_path"),
"status": "failed",
"error_message": failed.get("error_message"),
"error_type": failed.get("error_type"),
"suggestion": failed.get("suggestion")
})
elif isinstance(failed, str):
results_to_add.append({
"name": failed,
"status": "failed"
})
if results_to_add:
repo.add_results(run.id, results_to_add)
except Exception as e:
print(f"Fehler beim PostgreSQL-Speichern: {e}")