fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
16
backend/api/tests/registry/discovery/__init__.py
Normal file
16
backend/api/tests/registry/discovery/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""
|
||||
Test Discovery Module
|
||||
|
||||
Functions for discovering tests in various frameworks.
|
||||
"""
|
||||
|
||||
from .go_discovery import discover_go_tests
|
||||
from .python_discovery import discover_python_tests, discover_bqas_tests
|
||||
from .service_builder import build_service_info
|
||||
|
||||
__all__ = [
|
||||
"discover_go_tests",
|
||||
"discover_python_tests",
|
||||
"discover_bqas_tests",
|
||||
"build_service_info",
|
||||
]
|
||||
45
backend/api/tests/registry/discovery/go_discovery.py
Normal file
45
backend/api/tests/registry/discovery/go_discovery.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""
|
||||
Go Test Discovery
|
||||
|
||||
Functions for discovering Go tests in a codebase.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from ...models import TestCase, TestFramework, TestCategory
|
||||
from ..config import PROJECT_ROOT
|
||||
|
||||
|
||||
def discover_go_tests(base_path: Path) -> List[TestCase]:
|
||||
"""Entdeckt Go-Tests in einem Verzeichnis"""
|
||||
tests = []
|
||||
if not base_path.exists():
|
||||
return tests
|
||||
|
||||
# Suche nach *_test.go Dateien
|
||||
test_files = list(base_path.rglob("*_test.go"))
|
||||
|
||||
for test_file in test_files:
|
||||
# Parse Test-Funktionen aus der Datei
|
||||
try:
|
||||
content = test_file.read_text()
|
||||
for i, line in enumerate(content.split("\n"), 1):
|
||||
if line.strip().startswith("func Test"):
|
||||
# Extrahiere Funktionsname
|
||||
name_start = line.find("Test")
|
||||
name_end = line.find("(", name_start)
|
||||
if name_end > name_start:
|
||||
func_name = line[name_start:name_end]
|
||||
tests.append(TestCase(
|
||||
id=f"{test_file.stem}_{func_name}",
|
||||
name=func_name,
|
||||
file_path=str(test_file.relative_to(PROJECT_ROOT)),
|
||||
line_number=i,
|
||||
framework=TestFramework.GO_TEST,
|
||||
category=TestCategory.UNIT,
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return tests
|
||||
86
backend/api/tests/registry/discovery/python_discovery.py
Normal file
86
backend/api/tests/registry/discovery/python_discovery.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""
|
||||
Python Test Discovery
|
||||
|
||||
Functions for discovering Python and BQAS tests in a codebase.
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from ...models import TestCase, TestFramework, TestCategory
|
||||
from ..config import PROJECT_ROOT
|
||||
|
||||
|
||||
def discover_python_tests(base_path: Path) -> List[TestCase]:
|
||||
"""Entdeckt Python-Tests in einem Verzeichnis"""
|
||||
tests = []
|
||||
if not base_path.exists():
|
||||
return tests
|
||||
|
||||
# Suche nach test_*.py Dateien
|
||||
test_files = list(base_path.rglob("test_*.py"))
|
||||
|
||||
for test_file in test_files:
|
||||
try:
|
||||
content = test_file.read_text()
|
||||
for i, line in enumerate(content.split("\n"), 1):
|
||||
stripped = line.strip()
|
||||
# Test-Funktionen
|
||||
if stripped.startswith("def test_"):
|
||||
name_end = stripped.find("(")
|
||||
if name_end > 4:
|
||||
func_name = stripped[4:name_end]
|
||||
tests.append(TestCase(
|
||||
id=f"{test_file.stem}_{func_name}",
|
||||
name=func_name,
|
||||
file_path=str(test_file.relative_to(PROJECT_ROOT)),
|
||||
line_number=i,
|
||||
framework=TestFramework.PYTEST,
|
||||
category=TestCategory.UNIT,
|
||||
))
|
||||
# Async Test-Methoden
|
||||
elif stripped.startswith("async def test_"):
|
||||
name_end = stripped.find("(")
|
||||
if name_end > 10:
|
||||
func_name = stripped[10:name_end]
|
||||
tests.append(TestCase(
|
||||
id=f"{test_file.stem}_{func_name}",
|
||||
name=func_name,
|
||||
file_path=str(test_file.relative_to(PROJECT_ROOT)),
|
||||
line_number=i,
|
||||
framework=TestFramework.PYTEST,
|
||||
category=TestCategory.UNIT,
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return tests
|
||||
|
||||
|
||||
def discover_bqas_tests(base_path: Path, test_type: str) -> List[TestCase]:
|
||||
"""Entdeckt BQAS-Tests (Golden/RAG)"""
|
||||
tests = []
|
||||
if not base_path.exists():
|
||||
return tests
|
||||
|
||||
# Suche nach JSON-Dateien
|
||||
test_files = list(base_path.rglob("*.json"))
|
||||
|
||||
for test_file in test_files:
|
||||
try:
|
||||
content = json.loads(test_file.read_text())
|
||||
if isinstance(content, list):
|
||||
for i, test_case in enumerate(content):
|
||||
test_id = test_case.get("id", f"{test_file.stem}_{i}")
|
||||
tests.append(TestCase(
|
||||
id=test_id,
|
||||
name=test_case.get("name", test_id),
|
||||
file_path=str(test_file.relative_to(PROJECT_ROOT)),
|
||||
framework=TestFramework.BQAS_GOLDEN if test_type == "golden" else TestFramework.BQAS_RAG,
|
||||
category=TestCategory.BQAS,
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return tests
|
||||
115
backend/api/tests/registry/discovery/service_builder.py
Normal file
115
backend/api/tests/registry/discovery/service_builder.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""
|
||||
Service Info Builder
|
||||
|
||||
Builds ServiceTestInfo from service definitions.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from ...models import ServiceTestInfo, TestStatus, TestFramework
|
||||
from ..config import PROJECT_ROOT, get_persisted_results
|
||||
from .go_discovery import discover_go_tests
|
||||
from .python_discovery import discover_python_tests, discover_bqas_tests
|
||||
|
||||
|
||||
def build_service_info(service_def: Dict) -> ServiceTestInfo:
|
||||
"""Erstellt ServiceTestInfo aus einer Service-Definition"""
|
||||
service_id = service_def["service"]
|
||||
persisted_results = get_persisted_results()
|
||||
|
||||
# Prüfe ob Service deaktiviert ist
|
||||
if service_def.get("disabled", False):
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=f"{service_def['display_name']} (deaktiviert)",
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=0,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.SKIPPED,
|
||||
)
|
||||
|
||||
# Prüfe zuerst persistierte Ergebnisse
|
||||
if service_id in persisted_results:
|
||||
persisted = persisted_results[service_id]
|
||||
total = persisted.get("total", 0)
|
||||
passed = persisted.get("passed", 0)
|
||||
failed = persisted.get("failed", 0)
|
||||
skipped = max(0, total - passed - failed)
|
||||
pass_rate = (passed / total * 100) if total > 0 else 0.0
|
||||
last_run_str = persisted.get("last_run")
|
||||
last_run = datetime.fromisoformat(last_run_str) if last_run_str else None
|
||||
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=total,
|
||||
passed_tests=passed,
|
||||
failed_tests=failed,
|
||||
skipped_tests=skipped,
|
||||
pass_rate=pass_rate,
|
||||
coverage_percent=None,
|
||||
last_run=last_run,
|
||||
status=TestStatus.PASSED if failed == 0 and total > 0 else TestStatus.FAILED if failed > 0 else TestStatus.PENDING,
|
||||
)
|
||||
|
||||
# Falls keine persistierten Ergebnisse: Test-Discovery
|
||||
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
|
||||
framework = service_def["framework"]
|
||||
|
||||
# Fuer Container-basierte Services: keine lokale Discovery moeglich
|
||||
if service_def.get("run_in_container", False):
|
||||
# Keine lokalen Tests - warte auf tatsaechliche Ausfuehrung
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=0,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.PENDING,
|
||||
)
|
||||
|
||||
# Test-Discovery basierend auf Framework
|
||||
if framework == TestFramework.GO_TEST:
|
||||
tests = discover_go_tests(base_path)
|
||||
elif framework == TestFramework.PYTEST:
|
||||
tests = discover_python_tests(base_path)
|
||||
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
|
||||
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
|
||||
tests = discover_bqas_tests(base_path, test_type)
|
||||
else:
|
||||
tests = []
|
||||
|
||||
total = len(tests)
|
||||
|
||||
# Ohne persistierte Ergebnisse: Tests gefunden aber noch nicht ausgefuehrt
|
||||
# Zeige nur die Anzahl entdeckter Tests, alle anderen Werte sind 0
|
||||
return ServiceTestInfo(
|
||||
service=service_def["service"],
|
||||
display_name=service_def["display_name"],
|
||||
port=service_def.get("port"),
|
||||
language=service_def["language"],
|
||||
total_tests=total,
|
||||
passed_tests=0,
|
||||
failed_tests=0,
|
||||
skipped_tests=0,
|
||||
pass_rate=0.0,
|
||||
coverage_percent=None,
|
||||
last_run=None,
|
||||
status=TestStatus.PENDING,
|
||||
)
|
||||
Reference in New Issue
Block a user