fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
115
backend/api/tests/registry/discovery/service_builder.py
Normal file
115
backend/api/tests/registry/discovery/service_builder.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""
|
||||
Service Info Builder
|
||||
|
||||
Builds ServiceTestInfo from service definitions.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ...models import ServiceTestInfo, TestStatus, TestFramework
|
||||
from ..config import PROJECT_ROOT, get_persisted_results
|
||||
from .go_discovery import discover_go_tests
|
||||
from .python_discovery import discover_python_tests, discover_bqas_tests
|
||||
|
||||
|
||||
def build_service_info(service_def: Dict) -> ServiceTestInfo:
|
||||
"""Erstellt ServiceTestInfo aus einer Service-Definition"""
|
||||
service_id = service_def["service"]
|
||||
persisted_results = get_persisted_results()
|
||||
|
||||
# Prüfe ob Service deaktiviert ist
|
||||
if service_def.get("disabled", False):
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=f"{service_def['display_name']} (deaktiviert)",
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=0,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.SKIPPED,
|
||||
)
|
||||
|
||||
# Prüfe zuerst persistierte Ergebnisse
|
||||
if service_id in persisted_results:
|
||||
persisted = persisted_results[service_id]
|
||||
total = persisted.get("total", 0)
|
||||
passed = persisted.get("passed", 0)
|
||||
failed = persisted.get("failed", 0)
|
||||
skipped = max(0, total - passed - failed)
|
||||
pass_rate = (passed / total * 100) if total > 0 else 0.0
|
||||
last_run_str = persisted.get("last_run")
|
||||
last_run = datetime.fromisoformat(last_run_str) if last_run_str else None
|
||||
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=total,
|
||||
passed_tests=passed,
|
||||
failed_tests=failed,
|
||||
skipped_tests=skipped,
|
||||
pass_rate=pass_rate,
|
||||
coverage_percent=None,
|
||||
last_run=last_run,
|
||||
status=TestStatus.PASSED if failed == 0 and total > 0 else TestStatus.FAILED if failed > 0 else TestStatus.PENDING,
|
||||
)
|
||||
|
||||
# Falls keine persistierten Ergebnisse: Test-Discovery
|
||||
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
|
||||
framework = service_def["framework"]
|
||||
|
||||
# Fuer Container-basierte Services: keine lokale Discovery moeglich
|
||||
if service_def.get("run_in_container", False):
|
||||
# Keine lokalen Tests - warte auf tatsaechliche Ausfuehrung
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=0,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.PENDING,
|
||||
)
|
||||
|
||||
# Test-Discovery basierend auf Framework
|
||||
if framework == TestFramework.GO_TEST:
|
||||
tests = discover_go_tests(base_path)
|
||||
elif framework == TestFramework.PYTEST:
|
||||
tests = discover_python_tests(base_path)
|
||||
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
|
||||
tests = discover_bqas_tests(base_path, test_type)
|
||||
else:
|
||||
tests = []
|
||||
|
||||
total = len(tests)
|
||||
|
||||
# Ohne persistierte Ergebnisse: Tests gefunden aber noch nicht ausgefuehrt
|
||||
# Zeige nur die Anzahl entdeckter Tests, alle anderen Werte sind 0
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=total,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.PENDING,
|
||||
)
|
||||
Reference in New Issue
Block a user