fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
23
backend/api/tests/registry/executors/__init__.py
Normal file
23
backend/api/tests/registry/executors/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Test Executors Module
|
||||
|
||||
Functions for running tests in various frameworks.
|
||||
"""
|
||||
|
||||
from .go_executor import run_go_tests
|
||||
from .python_executor import run_python_tests
|
||||
from .bqas_executor import run_bqas_tests
|
||||
from .jest_executor import run_jest_tests
|
||||
from .playwright_executor import run_playwright_tests
|
||||
from .container_executor import run_tests_in_container
|
||||
from .test_runner import execute_test_run
|
||||
|
||||
__all__ = [
|
||||
"run_go_tests",
|
||||
"run_python_tests",
|
||||
"run_bqas_tests",
|
||||
"run_jest_tests",
|
||||
"run_playwright_tests",
|
||||
"run_tests_in_container",
|
||||
"execute_test_run",
|
||||
]
|
||||
44
backend/api/tests/registry/executors/bqas_executor.py
Normal file
44
backend/api/tests/registry/executors/bqas_executor.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""
|
||||
BQAS Test Executor
|
||||
|
||||
Executes BQAS tests via API proxy.
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import Dict
|
||||
|
||||
import httpx
|
||||
|
||||
from ...models import TestFramework
|
||||
|
||||
|
||||
async def run_bqas_tests(service_def: Dict) -> Dict:
|
||||
"""Proxy zu BQAS API im Voice-Service"""
|
||||
test_type = "golden" if service_def["framework"] == TestFramework.BQAS_GOLDEN else "rag"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
response = await client.post(
|
||||
f"http://localhost:8091/api/v1/bqas/run/{test_type}",
|
||||
)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
metrics = data.get("metrics", {})
|
||||
return {
|
||||
"passed": metrics.get("passed_tests", 0),
|
||||
"failed": metrics.get("failed_tests", 0),
|
||||
"total": metrics.get("total_tests", 0),
|
||||
"output": json.dumps(data, indent=2)[:5000],
|
||||
"failed_test_ids": [],
|
||||
}
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
# Fehler wenn API nicht erreichbar - KEINE Demo-Daten
|
||||
return {
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"total": 0,
|
||||
"output": f"BQAS API nicht erreichbar. Nutze docker exec fuer {test_type} Tests.",
|
||||
"failed_test_ids": [],
|
||||
}
|
||||
106
backend/api/tests/registry/executors/container_executor.py
Normal file
106
backend/api/tests/registry/executors/container_executor.py
Normal file
@@ -0,0 +1,106 @@
|
||||
"""
|
||||
Container Test Executor
|
||||
|
||||
Executes tests inside Docker containers via docker exec.
|
||||
"""
|
||||
|
||||
import re
|
||||
import asyncio
|
||||
import subprocess
|
||||
from typing import Dict
|
||||
|
||||
from ..config import get_running_tests
|
||||
|
||||
|
||||
async def run_tests_in_container(
|
||||
container_name: str,
|
||||
framework: str,
|
||||
base_path: str,
|
||||
service_id: str,
|
||||
pytest_args: str = ""
|
||||
) -> Dict:
|
||||
"""Fuehrt Tests in einem anderen Docker-Container aus via docker exec"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "",
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
try:
|
||||
if framework == "pytest":
|
||||
cmd = ["docker", "exec", container_name, "python", "-m", "pytest", base_path, "-v", "--tb=short", "-q"]
|
||||
# Fuege zusaetzliche pytest Argumente hinzu
|
||||
if pytest_args:
|
||||
cmd.extend(pytest_args.split())
|
||||
else:
|
||||
cmd = ["docker", "exec", container_name, "go", "test", "-v", "./..."]
|
||||
|
||||
def run_docker_exec():
|
||||
return subprocess.run(cmd, capture_output=True, text=True, timeout=600)
|
||||
|
||||
result = await asyncio.to_thread(run_docker_exec)
|
||||
|
||||
output = result.stdout + result.stderr
|
||||
passed = 0
|
||||
failed = 0
|
||||
failed_test_ids = []
|
||||
|
||||
if framework == "pytest":
|
||||
# Parse pytest output
|
||||
for line in output.split("\n"):
|
||||
if "passed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+passed", line)
|
||||
if match:
|
||||
passed = int(match.group(1))
|
||||
if "failed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+failed", line)
|
||||
if match:
|
||||
failed = int(match.group(1))
|
||||
if line.startswith("FAILED"):
|
||||
test_name = line.replace("FAILED", "").strip()
|
||||
failed_test_ids.append(test_name)
|
||||
else:
|
||||
# Parse go test output
|
||||
for line in output.split("\n"):
|
||||
if line.startswith("--- PASS:"):
|
||||
passed += 1
|
||||
elif line.startswith("--- FAIL:"):
|
||||
failed += 1
|
||||
match = re.search(r"--- FAIL: (\S+)", line)
|
||||
if match:
|
||||
failed_test_ids.append(match.group(1))
|
||||
|
||||
total = passed + failed
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": total,
|
||||
"output": output,
|
||||
"failed_test_ids": failed_test_ids
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
running_tests[service_id] = {
|
||||
"current_file": str(e),
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "error"
|
||||
}
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
|
||||
137
backend/api/tests/registry/executors/go_executor.py
Normal file
137
backend/api/tests/registry/executors/go_executor.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""
|
||||
Go Test Executor
|
||||
|
||||
Executes Go tests and parses results.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import subprocess
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ..config import check_go_available, get_running_tests
|
||||
from ..services.error_handling import extract_go_error, classify_go_error, suggest_go_fix
|
||||
|
||||
|
||||
async def run_go_tests(base_path: Path, service_id: str = "") -> Dict:
|
||||
"""Fuehrt Go-Tests aus (Thread-basiert, blockiert nicht den Event Loop)"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
if not base_path.exists():
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
|
||||
|
||||
# Prüfe ob Go installiert ist
|
||||
go_available = check_go_available()
|
||||
|
||||
if not go_available:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Go nicht installiert - Tests koennen nicht ausgefuehrt werden", "failed_test_ids": []}
|
||||
|
||||
# Initialer Progress-Status
|
||||
if service_id:
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Starte Go-Tests...",
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
def run_go_tests_sync():
|
||||
"""Laeuft in separatem Thread"""
|
||||
try:
|
||||
env = os.environ.copy()
|
||||
env["GOPATH"] = "/tmp/go"
|
||||
env["GOCACHE"] = "/tmp/go-cache"
|
||||
env["CGO_ENABLED"] = "0"
|
||||
|
||||
result = subprocess.run(
|
||||
["go", "test", "-v", "-json", "./..."],
|
||||
cwd=str(base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300,
|
||||
env=env,
|
||||
)
|
||||
|
||||
passed = failed = 0
|
||||
failed_test_ids = []
|
||||
test_outputs = {}
|
||||
|
||||
for line in result.stdout.split("\n"):
|
||||
if line.strip():
|
||||
try:
|
||||
event = json.loads(line)
|
||||
action = event.get("Action")
|
||||
test_name = event.get("Test", "")
|
||||
pkg = event.get("Package", "")
|
||||
|
||||
if action == "pass" and test_name:
|
||||
passed += 1
|
||||
if service_id:
|
||||
running_tests[service_id] = {
|
||||
"current_file": f"{test_name}",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "running"
|
||||
}
|
||||
elif action == "fail" and test_name:
|
||||
failed += 1
|
||||
test_key = f"{pkg}::{test_name}"
|
||||
error_output = test_outputs.get(test_key, "")
|
||||
error_message = extract_go_error(error_output)
|
||||
failed_test_ids.append({
|
||||
"id": test_key,
|
||||
"name": test_name,
|
||||
"package": pkg,
|
||||
"file_path": pkg.replace("github.com/", ""),
|
||||
"error_message": error_message or "Test fehlgeschlagen - keine Details",
|
||||
"error_type": classify_go_error(error_message),
|
||||
"suggestion": suggest_go_fix(error_message),
|
||||
})
|
||||
if service_id:
|
||||
running_tests[service_id] = {
|
||||
"current_file": f"{test_name}",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "running"
|
||||
}
|
||||
elif action == "output" and test_name:
|
||||
test_key = f"{pkg}::{test_name}"
|
||||
test_outputs[test_key] = test_outputs.get(test_key, "") + event.get("Output", "")
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": passed + failed,
|
||||
"output": result.stdout[:5000] if result.stdout else result.stderr[:5000],
|
||||
"failed_test_ids": failed_test_ids,
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 5 Minuten", "failed_test_ids": []}
|
||||
except Exception as e:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(None, run_go_tests_sync)
|
||||
|
||||
# Finaler Status
|
||||
if service_id:
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": result.get("passed", 0),
|
||||
"failed": result.get("failed", 0),
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return result
|
||||
130
backend/api/tests/registry/executors/jest_executor.py
Normal file
130
backend/api/tests/registry/executors/jest_executor.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
Jest Test Executor
|
||||
|
||||
Executes Jest tests for JavaScript/TypeScript projects.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ..config import get_running_tests
|
||||
|
||||
|
||||
async def run_jest_tests(base_path: Path, service_id: str = "") -> Dict:
|
||||
"""Fuehrt Jest-Tests aus"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
if not base_path.exists():
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Starte Jest...",
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
# Prüfe ob Node.js verfügbar ist
|
||||
try:
|
||||
node_check = subprocess.run(["node", "--version"], capture_output=True, timeout=5)
|
||||
if node_check.returncode != 0:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Node.js nicht installiert", "failed_test_ids": []}
|
||||
except:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Node.js nicht verfuegbar", "failed_test_ids": []}
|
||||
|
||||
try:
|
||||
# Wechsle ins Projektverzeichnis und fuehre Jest aus
|
||||
env = os.environ.copy()
|
||||
env["CI"] = "true" # Nicht-interaktiver Modus
|
||||
|
||||
result = subprocess.run(
|
||||
["npm", "test", "--", "--json", "--passWithNoTests"],
|
||||
cwd=str(base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300,
|
||||
env=env,
|
||||
)
|
||||
|
||||
output = result.stdout + result.stderr
|
||||
passed = 0
|
||||
failed = 0
|
||||
failed_test_ids = []
|
||||
|
||||
# Versuche JSON-Output zu parsen
|
||||
try:
|
||||
# Jest JSON beginnt mit {"num... - finde den Start
|
||||
json_start = output.find('{"num')
|
||||
if json_start == -1:
|
||||
json_start = output.rfind('{"')
|
||||
|
||||
if json_start != -1:
|
||||
json_str = output[json_start:]
|
||||
# Versuche JSON zu parsen mit json.JSONDecoder
|
||||
decoder = json.JSONDecoder()
|
||||
try:
|
||||
jest_result, _ = decoder.raw_decode(json_str)
|
||||
passed = jest_result.get("numPassedTests", 0)
|
||||
failed = jest_result.get("numFailedTests", 0)
|
||||
|
||||
# Extrahiere fehlgeschlagene Tests
|
||||
for test_result in jest_result.get("testResults", []):
|
||||
for assertion in test_result.get("assertionResults", []):
|
||||
if assertion.get("status") == "failed":
|
||||
failed_test_ids.append({
|
||||
"id": f"{test_result.get('name', '')}::{assertion.get('fullName', '')}",
|
||||
"name": assertion.get("fullName", ""),
|
||||
"file_path": test_result.get("name", ""),
|
||||
"error_message": " ".join(assertion.get("failureMessages", []))[:500],
|
||||
"error_type": "assertion",
|
||||
"suggestion": "Pruefe die Test-Assertions und erwarteten Werte",
|
||||
})
|
||||
except json.JSONDecodeError:
|
||||
# Fallback: Parse Text-Output mit Regex
|
||||
for line in output.split("\n"):
|
||||
if "passed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+passed", line)
|
||||
if match:
|
||||
passed = int(match.group(1))
|
||||
if "failed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+failed", line)
|
||||
if match:
|
||||
failed = int(match.group(1))
|
||||
except Exception:
|
||||
# Allgemeiner Fallback: Parse Text-Output
|
||||
for line in output.split("\n"):
|
||||
if "passed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+passed", line)
|
||||
if match:
|
||||
passed = int(match.group(1))
|
||||
if "failed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+failed", line)
|
||||
if match:
|
||||
failed = int(match.group(1))
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": passed + failed,
|
||||
"output": output[:5000],
|
||||
"failed_test_ids": failed_test_ids,
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 5 Minuten", "failed_test_ids": []}
|
||||
except Exception as e:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
|
||||
101
backend/api/tests/registry/executors/playwright_executor.py
Normal file
101
backend/api/tests/registry/executors/playwright_executor.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""
|
||||
Playwright Test Executor
|
||||
|
||||
Executes Playwright E2E tests.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ..config import get_running_tests
|
||||
|
||||
|
||||
async def run_playwright_tests(base_path: Path, service_id: str = "") -> Dict:
|
||||
"""Fuehrt Playwright E2E-Tests aus"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
if not base_path.exists():
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Starte Playwright...",
|
||||
"files_done": 0,
|
||||
"files_total": 1,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
try:
|
||||
env = os.environ.copy()
|
||||
env["CI"] = "true"
|
||||
|
||||
result = subprocess.run(
|
||||
["npx", "playwright", "test", "--reporter=json"],
|
||||
cwd=str(base_path),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=600, # E2E Tests brauchen laenger
|
||||
env=env,
|
||||
)
|
||||
|
||||
output = result.stdout + result.stderr
|
||||
passed = 0
|
||||
failed = 0
|
||||
failed_test_ids = []
|
||||
|
||||
# Parse Playwright JSON Output
|
||||
try:
|
||||
pw_result = json.loads(output)
|
||||
for suite in pw_result.get("suites", []):
|
||||
for spec in suite.get("specs", []):
|
||||
for test in spec.get("tests", []):
|
||||
for result_item in test.get("results", []):
|
||||
if result_item.get("status") == "passed":
|
||||
passed += 1
|
||||
elif result_item.get("status") == "failed":
|
||||
failed += 1
|
||||
failed_test_ids.append({
|
||||
"id": spec.get("title", ""),
|
||||
"name": spec.get("title", ""),
|
||||
"file_path": spec.get("file", ""),
|
||||
"error_message": result_item.get("error", {}).get("message", "")[:500],
|
||||
"error_type": "e2e",
|
||||
"suggestion": "Pruefe den E2E-Test und die Anwendung",
|
||||
})
|
||||
except json.JSONDecodeError:
|
||||
# Fallback: Parse Text-Output
|
||||
for line in output.split("\n"):
|
||||
if "passed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+passed", line)
|
||||
if match:
|
||||
passed = int(match.group(1))
|
||||
if "failed" in line.lower():
|
||||
match = re.search(r"(\d+)\s+failed", line)
|
||||
if match:
|
||||
failed = int(match.group(1))
|
||||
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": 1,
|
||||
"files_total": 1,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": passed + failed,
|
||||
"output": output[:5000],
|
||||
"failed_test_ids": failed_test_ids,
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 10 Minuten", "failed_test_ids": []}
|
||||
except Exception as e:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
|
||||
187
backend/api/tests/registry/executors/python_executor.py
Normal file
187
backend/api/tests/registry/executors/python_executor.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""
|
||||
Python Test Executor
|
||||
|
||||
Executes pytest tests with live progress updates.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ..config import get_running_tests
|
||||
from ..services.error_handling import extract_pytest_error, classify_pytest_error, suggest_pytest_fix
|
||||
|
||||
|
||||
async def run_python_tests(base_path: Path, service_id: str = "") -> Dict:
|
||||
"""Fuehrt Python-Tests aus mit Live-Progress-Updates (Thread-basiert)"""
|
||||
running_tests = get_running_tests()
|
||||
|
||||
if not base_path.exists():
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
|
||||
|
||||
# Versuche verschiedene pytest-Pfade
|
||||
pytest_paths = [
|
||||
"/opt/venv/bin/pytest", # Docker venv
|
||||
"pytest", # System pytest
|
||||
"python -m pytest", # Als Modul
|
||||
]
|
||||
|
||||
pytest_cmd = None
|
||||
for path in pytest_paths:
|
||||
try:
|
||||
check = subprocess.run(
|
||||
path.split() + ["--version"],
|
||||
capture_output=True,
|
||||
timeout=5,
|
||||
)
|
||||
if check.returncode == 0:
|
||||
pytest_cmd = path.split()
|
||||
break
|
||||
except:
|
||||
continue
|
||||
|
||||
if not pytest_cmd:
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": "pytest nicht gefunden", "failed_test_ids": []}
|
||||
|
||||
# Erst alle Test-Dateien zaehlen
|
||||
test_files = []
|
||||
test_dir = base_path if base_path.is_dir() else base_path.parent
|
||||
for f in test_dir.rglob("test_*.py"):
|
||||
test_files.append(f.name)
|
||||
total_files = len(test_files) if test_files else 1
|
||||
|
||||
# Initialer Progress-Status
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Starte Tests...",
|
||||
"files_done": 0,
|
||||
"files_total": total_files,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
# Ergebnis-Container
|
||||
result_container = {
|
||||
"output_lines": [],
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"files_seen": set(),
|
||||
"current_file": "",
|
||||
"done": False,
|
||||
"error": None
|
||||
}
|
||||
|
||||
def run_pytest_with_progress():
|
||||
"""Laeuft in separatem Thread - blockiert nicht den Event Loop"""
|
||||
try:
|
||||
cwd = str(base_path.parent) if base_path.is_file() else str(base_path)
|
||||
|
||||
# Unbuffered output fuer Echtzeit-Fortschritt
|
||||
env = os.environ.copy()
|
||||
env["PYTHONUNBUFFERED"] = "1"
|
||||
|
||||
process = subprocess.Popen(
|
||||
pytest_cmd + ["-v", "-s", "--tb=short", str(base_path)],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
bufsize=1,
|
||||
env=env,
|
||||
)
|
||||
|
||||
for line in iter(process.stdout.readline, ''):
|
||||
if not line:
|
||||
break
|
||||
|
||||
result_container["output_lines"].append(line)
|
||||
line_stripped = line.strip()
|
||||
|
||||
# Parse Test-Ergebnisse
|
||||
match = re.match(r'(\S+\.py)::(\S+)\s+(PASSED|FAILED|SKIPPED|ERROR)', line_stripped)
|
||||
if match:
|
||||
file_path = match.group(1)
|
||||
status = match.group(3)
|
||||
|
||||
file_name = Path(file_path).name
|
||||
if file_name not in result_container["files_seen"]:
|
||||
result_container["files_seen"].add(file_name)
|
||||
result_container["current_file"] = file_name
|
||||
|
||||
if status == "PASSED":
|
||||
result_container["passed"] += 1
|
||||
elif status in ("FAILED", "ERROR"):
|
||||
result_container["failed"] += 1
|
||||
|
||||
# Progress aktualisieren
|
||||
running_tests[service_id] = {
|
||||
"current_file": result_container["current_file"],
|
||||
"files_done": len(result_container["files_seen"]),
|
||||
"files_total": max(total_files, len(result_container["files_seen"])),
|
||||
"passed": result_container["passed"],
|
||||
"failed": result_container["failed"],
|
||||
"status": "running"
|
||||
}
|
||||
|
||||
process.wait()
|
||||
result_container["done"] = True
|
||||
|
||||
except Exception as e:
|
||||
result_container["error"] = str(e)
|
||||
result_container["done"] = True
|
||||
|
||||
# Starte Test-Ausführung in separatem Thread
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, run_pytest_with_progress)
|
||||
|
||||
full_output = "".join(result_container["output_lines"])
|
||||
passed = result_container["passed"]
|
||||
failed = result_container["failed"]
|
||||
files_seen = result_container["files_seen"]
|
||||
|
||||
if result_container["error"]:
|
||||
running_tests[service_id] = {
|
||||
"current_file": result_container["error"],
|
||||
"files_done": 0,
|
||||
"files_total": total_files,
|
||||
"passed": 0,
|
||||
"failed": 0,
|
||||
"status": "error"
|
||||
}
|
||||
return {"passed": 0, "failed": 0, "total": 0, "output": result_container["error"], "failed_test_ids": []}
|
||||
|
||||
# Parse fehlgeschlagene Tests aus Output
|
||||
failed_test_ids = []
|
||||
for match in re.finditer(r'FAILED\s+(\S+)::(\S+)', full_output):
|
||||
file_path = match.group(1)
|
||||
test_name = match.group(2)
|
||||
error_msg = extract_pytest_error(full_output, test_name)
|
||||
failed_test_ids.append({
|
||||
"id": f"{file_path}::{test_name}",
|
||||
"name": test_name,
|
||||
"file_path": file_path,
|
||||
"error_message": error_msg or "Test fehlgeschlagen - keine Details",
|
||||
"error_type": classify_pytest_error(error_msg),
|
||||
"suggestion": suggest_pytest_fix(error_msg),
|
||||
})
|
||||
|
||||
# Finaler Status
|
||||
running_tests[service_id] = {
|
||||
"current_file": "Abgeschlossen",
|
||||
"files_done": len(files_seen),
|
||||
"files_total": len(files_seen),
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
return {
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"total": passed + failed,
|
||||
"output": full_output[:5000],
|
||||
"failed_test_ids": failed_test_ids,
|
||||
}
|
||||
192
backend/api/tests/registry/executors/test_runner.py
Normal file
192
backend/api/tests/registry/executors/test_runner.py
Normal file
@@ -0,0 +1,192 @@
|
||||
"""
|
||||
Test Runner
|
||||
|
||||
Orchestrates test execution and persists results.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Dict
|
||||
|
||||
from ...models import TestRun, RunStatus, TestFramework
|
||||
from ..config import (
|
||||
PROJECT_ROOT,
|
||||
get_test_runs,
|
||||
get_current_runs,
|
||||
get_persisted_results,
|
||||
save_persisted_results,
|
||||
is_postgres_available,
|
||||
)
|
||||
from .go_executor import run_go_tests
|
||||
from .python_executor import run_python_tests
|
||||
from .bqas_executor import run_bqas_tests
|
||||
from .jest_executor import run_jest_tests
|
||||
from .playwright_executor import run_playwright_tests
|
||||
from .container_executor import run_tests_in_container
|
||||
|
||||
|
||||
async def execute_test_run(run_id: str, service_def: Dict):
|
||||
"""Fuehrt einen Test-Run im Hintergrund aus"""
|
||||
test_runs = get_test_runs()
|
||||
current_runs = get_current_runs()
|
||||
persisted_results = get_persisted_results()
|
||||
|
||||
framework = service_def["framework"]
|
||||
service_id = service_def["service"]
|
||||
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
|
||||
|
||||
# Pruefe ob Service deaktiviert ist
|
||||
if service_def.get("disabled", False):
|
||||
reason = service_def.get("disabled_reason", "Service deaktiviert")
|
||||
run = TestRun(
|
||||
id=run_id,
|
||||
suite_id=service_id,
|
||||
service=service_id,
|
||||
started_at=datetime.now(),
|
||||
completed_at=datetime.now(),
|
||||
status=RunStatus.COMPLETED,
|
||||
output=f"Service deaktiviert: {reason}",
|
||||
)
|
||||
current_runs[run_id] = run
|
||||
test_runs.append({
|
||||
"id": run.id,
|
||||
"suite_id": run.suite_id,
|
||||
"service": run.service,
|
||||
"started_at": run.started_at.isoformat(),
|
||||
"completed_at": run.completed_at.isoformat(),
|
||||
"status": run.status.value,
|
||||
"total_tests": 0,
|
||||
"passed_tests": 0,
|
||||
"failed_tests": 0,
|
||||
"failed_test_ids": [],
|
||||
"duration_seconds": 0,
|
||||
})
|
||||
return
|
||||
|
||||
# Pruefe ob Tests in einem anderen Container laufen sollen
|
||||
run_in_container = service_def.get("run_in_container", False)
|
||||
container_name = service_def.get("container_name", "")
|
||||
|
||||
run = TestRun(
|
||||
id=run_id,
|
||||
suite_id=service_id,
|
||||
service=service_id,
|
||||
started_at=datetime.now(),
|
||||
status=RunStatus.RUNNING,
|
||||
)
|
||||
current_runs[run_id] = run
|
||||
|
||||
try:
|
||||
# Echte Test-Ausführung basierend auf Framework
|
||||
if run_in_container and container_name:
|
||||
# Tests im externen Container ausfuehren
|
||||
framework_str = "pytest" if framework == TestFramework.PYTEST else "go"
|
||||
container_base_path = service_def.get("base_path", "/app/tests")
|
||||
pytest_args = service_def.get("pytest_args", "")
|
||||
result = await run_tests_in_container(container_name, framework_str, container_base_path, service_id, pytest_args)
|
||||
elif framework == TestFramework.GO_TEST:
|
||||
result = await run_go_tests(base_path, service_id=service_id)
|
||||
elif framework == TestFramework.PYTEST:
|
||||
result = await run_python_tests(base_path, service_id=service_id)
|
||||
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
result = await run_bqas_tests(service_def)
|
||||
elif framework == TestFramework.JEST:
|
||||
result = await run_jest_tests(base_path, service_id=service_id)
|
||||
elif framework == TestFramework.PLAYWRIGHT:
|
||||
result = await run_playwright_tests(base_path, service_id=service_id)
|
||||
else:
|
||||
result = {"passed": 0, "failed": 0, "total": 0, "output": "Framework nicht unterstuetzt"}
|
||||
|
||||
run.completed_at = datetime.now()
|
||||
run.status = RunStatus.COMPLETED if result.get("failed", 0) == 0 else RunStatus.FAILED
|
||||
run.total_tests = result.get("total", 0)
|
||||
run.passed_tests = result.get("passed", 0)
|
||||
run.failed_tests = result.get("failed", 0)
|
||||
run.failed_test_ids = result.get("failed_test_ids", [])
|
||||
run.duration_seconds = (run.completed_at - run.started_at).total_seconds()
|
||||
run.output = result.get("output", "")
|
||||
|
||||
except Exception as e:
|
||||
run.completed_at = datetime.now()
|
||||
run.status = RunStatus.FAILED
|
||||
run.output = str(e)
|
||||
|
||||
# In Historie speichern (In-Memory)
|
||||
test_runs.append({
|
||||
"id": run.id,
|
||||
"suite_id": run.suite_id,
|
||||
"service": run.service,
|
||||
"started_at": run.started_at.isoformat(),
|
||||
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
|
||||
"status": run.status.value,
|
||||
"total_tests": run.total_tests,
|
||||
"passed_tests": run.passed_tests,
|
||||
"failed_tests": run.failed_tests,
|
||||
"failed_test_ids": run.failed_test_ids,
|
||||
"duration_seconds": run.duration_seconds,
|
||||
})
|
||||
|
||||
# Persistiere Ergebnisse (Legacy In-Memory Dict)
|
||||
persisted_results[service_id] = {
|
||||
"total": run.total_tests,
|
||||
"passed": run.passed_tests,
|
||||
"failed": run.failed_tests,
|
||||
"failed_test_ids": run.failed_test_ids,
|
||||
"last_run": run.completed_at.isoformat() if run.completed_at else datetime.now().isoformat(),
|
||||
"status": run.status.value,
|
||||
}
|
||||
save_persisted_results()
|
||||
|
||||
# PostgreSQL-Persistierung
|
||||
if is_postgres_available():
|
||||
try:
|
||||
from ...database import get_db_session
|
||||
from ...repository import TestRepository
|
||||
|
||||
with get_db_session() as db:
|
||||
repo = TestRepository(db)
|
||||
|
||||
# Run erstellen falls noch nicht vorhanden
|
||||
db_run = repo.get_run(run.id)
|
||||
if not db_run:
|
||||
db_run = repo.create_run(
|
||||
run_id=run.id,
|
||||
service=service_id,
|
||||
framework=framework.value,
|
||||
triggered_by="manual"
|
||||
)
|
||||
|
||||
# Run abschliessen
|
||||
repo.complete_run(
|
||||
run_id=run.id,
|
||||
status=run.status.value,
|
||||
total_tests=run.total_tests,
|
||||
passed_tests=run.passed_tests,
|
||||
failed_tests=run.failed_tests,
|
||||
skipped_tests=0,
|
||||
duration_seconds=run.duration_seconds,
|
||||
output=run.output
|
||||
)
|
||||
|
||||
# Einzelne Test-Ergebnisse speichern (fehlgeschlagene Tests)
|
||||
if run.failed_test_ids:
|
||||
results_to_add = []
|
||||
for failed in run.failed_test_ids:
|
||||
if isinstance(failed, dict):
|
||||
results_to_add.append({
|
||||
"name": failed.get("name") or failed.get("id", "unknown"),
|
||||
"file_path": failed.get("file_path"),
|
||||
"status": "failed",
|
||||
"error_message": failed.get("error_message"),
|
||||
"error_type": failed.get("error_type"),
|
||||
"suggestion": failed.get("suggestion")
|
||||
})
|
||||
elif isinstance(failed, str):
|
||||
results_to_add.append({
|
||||
"name": failed,
|
||||
"status": "failed"
|
||||
})
|
||||
if results_to_add:
|
||||
repo.add_results(run.id, results_to_add)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Fehler beim PostgreSQL-Speichern: {e}")
|
||||
Reference in New Issue
Block a user