fix: Restore all files lost during destructive rebase

A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-02-09 09:51:32 +01:00
parent f7487ee240
commit bfdaf63ba9
2009 changed files with 749983 additions and 1731 deletions

View File

@@ -0,0 +1,35 @@
"""
Test Registry API
Zentrales Dashboard fuer alle Tests im Breakpilot-System.
Aggregiert Tests aus allen Services.
Phase 1 Update (2026-02-02):
- PostgreSQL-Integration fuer persistente Speicherung
- Backlog-Management mit Status-Workflow
- Historie und Trends ueber Zeit
"""
from .registry import router
from .database import get_db, get_db_session, init_db
from .repository import TestRepository
from .db_models import (
TestRunDB,
TestResultDB,
FailedTestBacklogDB,
TestFixHistoryDB,
TestServiceStatsDB
)
__all__ = [
"router",
"get_db",
"get_db_session",
"init_db",
"TestRepository",
"TestRunDB",
"TestResultDB",
"FailedTestBacklogDB",
"TestFixHistoryDB",
"TestServiceStatsDB"
]

View File

@@ -0,0 +1,91 @@
"""
Database Configuration fuer Test Registry.
PostgreSQL-Anbindung fuer persistente Test-Speicherung.
Ersetzt die bisherige JSON-basierte Speicherung.
"""
import os
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session, declarative_base
# Eigene Base fuer Test Registry - unabhaengig von anderen Modulen
# Dies vermeidet Import-Probleme in CI/CD Umgebungen
Base = declarative_base()
# Database URL from environment (nutzt gleiche DB wie Backend)
_raw_url = os.getenv(
"DATABASE_URL",
"postgresql://breakpilot:breakpilot123@postgres:5432/breakpilot_db"
)
# SQLAlchemy 2.0 erfordert "postgresql://" statt "postgres://"
DATABASE_URL = _raw_url.replace("postgres://", "postgresql://", 1) if _raw_url.startswith("postgres://") else _raw_url
# Engine configuration mit Connection Pool
engine = create_engine(
DATABASE_URL,
pool_pre_ping=True, # Prueft Connections vor Nutzung
pool_size=5, # Standard Pool-Groesse
max_overflow=10, # Zusaetzliche Connections bei Bedarf
pool_recycle=3600, # Recycle nach 1 Stunde
echo=os.getenv("SQL_ECHO", "false").lower() == "true"
)
# Session factory
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def get_db():
"""
Database dependency for FastAPI endpoints.
Yields a database session and ensures cleanup.
"""
db = SessionLocal()
try:
yield db
finally:
db.close()
@contextmanager
def get_db_session():
"""
Context manager for database sessions.
Use this for background tasks and non-FastAPI code.
Example:
with get_db_session() as db:
db.query(TestRun).all()
"""
db = SessionLocal()
try:
yield db
db.commit()
except Exception:
db.rollback()
raise
finally:
db.close()
def init_db():
"""
Erstellt alle Tabellen.
In Produktion sollte Alembic verwendet werden.
"""
from . import db_models # Import models to register them
Base.metadata.create_all(bind=engine)
def check_db_connection() -> bool:
"""
Prueft ob die Datenbankverbindung funktioniert.
Nuetzlich fuer Health-Checks.
"""
try:
from sqlalchemy import text
with get_db_session() as db:
db.execute(text("SELECT 1"))
return True
except Exception:
return False

View File

@@ -0,0 +1,227 @@
"""
SQLAlchemy Models fuer Test Registry.
Definiert die Datenbank-Tabellen fuer persistente Test-Speicherung:
- TestRunDB: Jeder Test-Durchlauf
- TestResultDB: Einzelne Test-Ergebnisse
- FailedTestBacklogDB: Persistenter Backlog fuer zu fixende Tests
- TestFixHistoryDB: Historie aller Fix-Versuche
- TestServiceStatsDB: Aggregierte Statistiken pro Service
"""
from datetime import datetime
from sqlalchemy import (
Column, Integer, String, Float, Text, DateTime, Boolean,
ForeignKey, UniqueConstraint, Index
)
from sqlalchemy.orm import relationship
# Nutze die gleiche Base wie Classroom Engine fuer konsistente Migrations
from classroom_engine.database import Base
class TestRunDB(Base):
"""
Speichert jeden Test-Durchlauf.
Enthaelt Metadaten und Aggregat-Statistiken.
"""
__tablename__ = 'test_runs'
id = Column(Integer, primary_key=True, autoincrement=True)
run_id = Column(String(50), unique=True, nullable=False, index=True)
service = Column(String(100), nullable=False, index=True)
framework = Column(String(50), nullable=False)
started_at = Column(DateTime, nullable=False, index=True)
completed_at = Column(DateTime, nullable=True)
status = Column(String(20), nullable=False) # queued, running, completed, failed
total_tests = Column(Integer, default=0)
passed_tests = Column(Integer, default=0)
failed_tests = Column(Integer, default=0)
skipped_tests = Column(Integer, default=0)
duration_seconds = Column(Float, default=0)
git_commit = Column(String(40), nullable=True)
git_branch = Column(String(100), nullable=True)
triggered_by = Column(String(50), nullable=True) # manual, ci, schedule
output = Column(Text, nullable=True)
created_at = Column(DateTime, default=datetime.utcnow)
# Relationship zu einzelnen Test-Ergebnissen
results = relationship("TestResultDB", back_populates="run", cascade="all, delete-orphan")
def to_dict(self):
return {
"id": self.run_id,
"run_id": self.run_id,
"service": self.service,
"framework": self.framework,
"started_at": self.started_at.isoformat() if self.started_at else None,
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
"status": self.status,
"total_tests": self.total_tests,
"passed_tests": self.passed_tests,
"failed_tests": self.failed_tests,
"skipped_tests": self.skipped_tests,
"duration_seconds": self.duration_seconds,
"git_commit": self.git_commit,
"git_branch": self.git_branch,
"triggered_by": self.triggered_by,
}
class TestResultDB(Base):
"""
Speichert einzelne Test-Ergebnisse pro Run.
Ermoeglicht detaillierte Analyse fehlgeschlagener Tests.
"""
__tablename__ = 'test_results'
id = Column(Integer, primary_key=True, autoincrement=True)
run_id = Column(String(50), ForeignKey('test_runs.run_id', ondelete='CASCADE'), nullable=False, index=True)
test_name = Column(String(500), nullable=False, index=True)
test_file = Column(String(500), nullable=True)
line_number = Column(Integer, nullable=True)
status = Column(String(20), nullable=False, index=True) # passed, failed, skipped, error
duration_ms = Column(Float, nullable=True)
error_message = Column(Text, nullable=True)
error_type = Column(String(100), nullable=True)
output = Column(Text, nullable=True)
created_at = Column(DateTime, default=datetime.utcnow)
# Relationship zum Run
run = relationship("TestRunDB", back_populates="results")
def to_dict(self):
return {
"id": self.id,
"run_id": self.run_id,
"test_name": self.test_name,
"test_file": self.test_file,
"line_number": self.line_number,
"status": self.status,
"duration_ms": self.duration_ms,
"error_message": self.error_message,
"error_type": self.error_type,
}
class FailedTestBacklogDB(Base):
"""
Persistenter Backlog fuer fehlgeschlagene Tests.
Aggregiert Fehler ueber mehrere Runs hinweg.
"""
__tablename__ = 'failed_tests_backlog'
__table_args__ = (
UniqueConstraint('test_name', 'service', name='uq_backlog_test_service'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
test_name = Column(String(500), nullable=False)
test_file = Column(String(500), nullable=True)
service = Column(String(100), nullable=False, index=True)
framework = Column(String(50), nullable=True)
error_message = Column(Text, nullable=True)
error_type = Column(String(100), nullable=True)
first_failed_at = Column(DateTime, nullable=False)
last_failed_at = Column(DateTime, nullable=False)
failure_count = Column(Integer, default=1)
status = Column(String(30), default='open', index=True) # open, in_progress, fixed, wont_fix, flaky
priority = Column(String(20), default='medium', index=True) # critical, high, medium, low
assigned_to = Column(String(100), nullable=True)
fix_suggestion = Column(Text, nullable=True)
notes = Column(Text, nullable=True)
# Resolution-Felder (auto-close wenn Tests bestehen)
resolved_at = Column(DateTime, nullable=True)
resolution_commit = Column(String(50), nullable=True)
resolution_notes = Column(Text, nullable=True)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
# Relationship zu Fix-Historie
fixes = relationship("TestFixHistoryDB", back_populates="backlog_item", cascade="all, delete-orphan")
def to_dict(self):
return {
"id": self.id,
"test_name": self.test_name,
"test_file": self.test_file,
"service": self.service,
"framework": self.framework,
"error_message": self.error_message,
"error_type": self.error_type,
"first_failed_at": self.first_failed_at.isoformat() if self.first_failed_at else None,
"last_failed_at": self.last_failed_at.isoformat() if self.last_failed_at else None,
"failure_count": self.failure_count,
"status": self.status,
"priority": self.priority,
"assigned_to": self.assigned_to,
"fix_suggestion": self.fix_suggestion,
"notes": self.notes,
"resolved_at": self.resolved_at.isoformat() if self.resolved_at else None,
"resolution_commit": self.resolution_commit,
"resolution_notes": self.resolution_notes,
"created_at": self.created_at.isoformat() if self.created_at else None,
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
class TestFixHistoryDB(Base):
"""
Historie aller Fix-Versuche fuer einen Backlog-Eintrag.
Ermoeglicht Tracking von Auto-Fix und manuellen Fixes.
"""
__tablename__ = 'test_fixes_history'
id = Column(Integer, primary_key=True, autoincrement=True)
backlog_id = Column(Integer, ForeignKey('failed_tests_backlog.id', ondelete='CASCADE'), nullable=False, index=True)
fix_type = Column(String(50), nullable=True) # manual, auto_claude, auto_script
fix_description = Column(Text, nullable=True)
commit_hash = Column(String(40), nullable=True)
success = Column(Boolean, nullable=True)
created_at = Column(DateTime, default=datetime.utcnow)
# Relationship zum Backlog-Item
backlog_item = relationship("FailedTestBacklogDB", back_populates="fixes")
def to_dict(self):
return {
"id": self.id,
"backlog_id": self.backlog_id,
"fix_type": self.fix_type,
"fix_description": self.fix_description,
"commit_hash": self.commit_hash,
"success": self.success,
"created_at": self.created_at.isoformat() if self.created_at else None,
}
class TestServiceStatsDB(Base):
"""
Aggregierte Statistiken pro Service.
Wird nach jedem Test-Run aktualisiert fuer schnelle Abfragen.
"""
__tablename__ = 'test_service_stats'
id = Column(Integer, primary_key=True, autoincrement=True)
service = Column(String(100), unique=True, nullable=False)
total_tests = Column(Integer, default=0)
passed_tests = Column(Integer, default=0)
failed_tests = Column(Integer, default=0)
skipped_tests = Column(Integer, default=0)
pass_rate = Column(Float, default=0.0)
last_run_id = Column(String(50), nullable=True)
last_run_at = Column(DateTime, nullable=True)
last_status = Column(String(20), nullable=True)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def to_dict(self):
return {
"service": self.service,
"total_tests": self.total_tests,
"passed_tests": self.passed_tests,
"failed_tests": self.failed_tests,
"skipped_tests": self.skipped_tests,
"pass_rate": round(self.pass_rate, 1) if self.pass_rate else 0.0,
"last_run_id": self.last_run_id,
"last_run_at": self.last_run_at.isoformat() if self.last_run_at else None,
"last_status": self.last_status,
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}

277
backend/api/tests/models.py Normal file
View File

@@ -0,0 +1,277 @@
"""
Test Registry Data Models
Definiert die Datenstrukturen fuer das zentrale Test-Dashboard.
"""
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from typing import Optional, List, Dict, Any
class TestFramework(str, Enum):
GO_TEST = "go_test"
PYTEST = "pytest"
JEST = "jest"
PLAYWRIGHT = "playwright"
BQAS_GOLDEN = "bqas_golden"
BQAS_RAG = "bqas_rag"
BQAS_SYNTHETIC = "bqas_synthetic"
class TestCategory(str, Enum):
UNIT = "unit"
INTEGRATION = "integration"
E2E = "e2e"
BQAS = "bqas"
SECURITY = "security"
PERFORMANCE = "performance"
class TestStatus(str, Enum):
PENDING = "pending"
RUNNING = "running"
PASSED = "passed"
FAILED = "failed"
SKIPPED = "skipped"
ERROR = "error"
class RunStatus(str, Enum):
QUEUED = "queued"
RUNNING = "running"
COMPLETED = "completed"
FAILED = "failed"
CANCELLED = "cancelled"
@dataclass
class TestCase:
"""Einzelner Testfall"""
id: str
name: str
file_path: str
line_number: Optional[int] = None
framework: TestFramework = TestFramework.GO_TEST
category: TestCategory = TestCategory.UNIT
duration_ms: Optional[float] = None
status: TestStatus = TestStatus.PENDING
error_message: Optional[str] = None
output: Optional[str] = None
@dataclass
class TestSuite:
"""Test-Suite eines Services"""
id: str
service: str
name: str
framework: TestFramework
category: TestCategory
base_path: str
pattern: str # z.B. "*_test.go" oder "test_*.py"
tests: List[TestCase] = field(default_factory=list)
total_tests: int = 0
passed_tests: int = 0
failed_tests: int = 0
skipped_tests: int = 0
duration_ms: float = 0.0
coverage_percent: Optional[float] = None
last_run: Optional[datetime] = None
status: TestStatus = TestStatus.PENDING
@dataclass
class TestRun:
"""Ein Test-Durchlauf"""
id: str
suite_id: str
service: str
started_at: datetime
completed_at: Optional[datetime] = None
status: RunStatus = RunStatus.QUEUED
total_tests: int = 0
passed_tests: int = 0
failed_tests: int = 0
skipped_tests: int = 0
duration_seconds: float = 0.0
git_commit: Optional[str] = None
git_branch: Optional[str] = None
coverage_percent: Optional[float] = None
triggered_by: str = "manual"
output: Optional[str] = None
failed_test_ids: List[str] = field(default_factory=list)
@dataclass
class CoverageReport:
"""Coverage-Bericht fuer einen Service"""
service: str
framework: TestFramework
line_coverage: float
branch_coverage: Optional[float] = None
function_coverage: Optional[float] = None
statement_coverage: Optional[float] = None
uncovered_files: List[str] = field(default_factory=list)
timestamp: datetime = field(default_factory=datetime.now)
@dataclass
class ServiceTestInfo:
"""Aggregierte Test-Informationen fuer einen Service"""
service: str
display_name: str
port: Optional[int] = None
language: str = "unknown"
total_tests: int = 0
passed_tests: int = 0
failed_tests: int = 0
skipped_tests: int = 0
pass_rate: float = 0.0
coverage_percent: Optional[float] = None
last_run: Optional[datetime] = None
status: TestStatus = TestStatus.PENDING
suites: List[TestSuite] = field(default_factory=list)
@dataclass
class TestRegistryStats:
"""Gesamtstatistik des Test-Registrys"""
total_tests: int = 0
total_passed: int = 0
total_failed: int = 0
total_skipped: int = 0
overall_pass_rate: float = 0.0
average_coverage: Optional[float] = None
services_count: int = 0
last_full_run: Optional[datetime] = None
by_category: Dict[str, int] = field(default_factory=dict)
by_framework: Dict[str, int] = field(default_factory=dict)
# Service-Definitionen mit Test-Informationen
SERVICE_DEFINITIONS = [
{
"service": "consent-service",
"display_name": "Consent Service",
"port": 8081,
"language": "go",
"base_path": "/consent-service",
"test_pattern": "*_test.go",
"framework": TestFramework.GO_TEST,
},
{
"service": "backend",
"display_name": "Python Backend",
"port": 8000,
"language": "python",
"base_path": "/backend/tests",
"test_pattern": "test_*.py",
"framework": TestFramework.PYTEST,
},
{
"service": "voice-service",
"display_name": "Voice Service",
"port": 8091,
"language": "python",
"base_path": "/app/tests",
"test_pattern": "test_*.py",
"framework": TestFramework.PYTEST,
"container_name": "breakpilot-pwa-voice-service",
"run_in_container": True,
"pytest_args": "--ignore=/app/tests/bqas", # Exclude BQAS tests - run separately
},
{
"service": "klausur-service",
"display_name": "Klausur Service",
"port": 8086,
"language": "python",
"base_path": "/app/tests",
"test_pattern": "test_*.py",
"framework": TestFramework.PYTEST,
"container_name": "breakpilot-pwa-klausur-service",
"run_in_container": True,
},
{
"service": "billing-service",
"display_name": "Billing Service",
"port": 8082,
"language": "go",
"base_path": "/billing-service",
"test_pattern": "*_test.go",
"framework": TestFramework.GO_TEST,
},
{
"service": "school-service",
"display_name": "School Service",
"port": 8084,
"language": "go",
"base_path": "/school-service",
"test_pattern": "*_test.go",
"framework": TestFramework.GO_TEST,
},
{
"service": "edu-search-service",
"display_name": "Edu Search Service",
"port": 8088,
"language": "go",
"base_path": "/edu-search-service",
"test_pattern": "*_test.go",
"framework": TestFramework.GO_TEST,
},
{
"service": "ai-compliance-sdk",
"display_name": "AI Compliance SDK",
"port": None,
"language": "go",
"base_path": "/ai-compliance-sdk",
"test_pattern": "*_test.go",
"framework": TestFramework.GO_TEST,
},
{
"service": "geo-service",
"display_name": "Geo Service",
"port": 8089,
"language": "mixed",
"base_path": "/geo-service",
"test_pattern": "*_test.go",
"framework": TestFramework.GO_TEST,
"disabled": True, # Keine Tests vorhanden - Verzeichnis ist leer
"disabled_reason": "Keine Test-Dateien vorhanden",
},
{
"service": "website",
"display_name": "Website (Jest)",
"port": 3000,
"language": "typescript",
"base_path": "/website",
"test_pattern": "*.test.{ts,tsx}",
"framework": TestFramework.JEST,
"requires_setup": True, # Erfordert npm install im Website-Verzeichnis
"setup_note": "Fuehren Sie 'npm install' im website-Verzeichnis aus, um Tests lokal auszufuehren",
},
# Website E2E entfernt - keine Playwright-Tests vorhanden
{
"service": "bqas-golden",
"display_name": "BQAS Golden Suite",
"port": 8091,
"language": "python",
"base_path": "/app/tests/bqas/test_golden.py",
"test_pattern": "test_*.py",
"framework": TestFramework.PYTEST,
"container_name": "breakpilot-pwa-voice-service",
"run_in_container": True,
},
{
"service": "bqas-rag",
"display_name": "BQAS RAG Tests",
"port": 8091,
"language": "python",
"base_path": "/app/tests/bqas/test_rag.py",
"test_pattern": "test_*.py",
"framework": TestFramework.PYTEST,
"container_name": "breakpilot-pwa-voice-service",
"run_in_container": True,
},
]

View File

@@ -0,0 +1,84 @@
"""
Test Registry API - Legacy Compatibility Wrapper
This file provides backward compatibility for code importing from registry.py.
All functionality has been moved to the registry/ module.
For new code, import directly from:
from api.tests.registry import router
from api.tests.registry.config import PROJECT_ROOT
etc.
"""
# Re-export router for backward compatibility
from .registry import router
# Re-export all public APIs from the modular structure
from .registry import (
# Config
PROJECT_ROOT,
RUN_MODE,
DATA_DIR,
RESULTS_FILE,
check_go_available,
check_pytest_available,
get_go_version,
get_pytest_version,
load_persisted_results,
save_persisted_results,
migrate_json_to_postgres,
is_postgres_available,
get_persisted_results,
get_test_runs,
get_current_runs,
get_running_tests,
# API Models
TestRunRequest,
TestRunResponse,
RegistryResponse,
BacklogStatusUpdate,
BacklogPriorityUpdate,
FixAttempt,
ManualBacklogEntry,
CIResultRequest,
# Discovery
discover_go_tests,
discover_python_tests,
discover_bqas_tests,
build_service_info,
# Executors
run_go_tests,
run_python_tests,
run_bqas_tests,
run_jest_tests,
run_playwright_tests,
run_tests_in_container,
execute_test_run,
# Services
extract_go_error,
classify_go_error,
suggest_go_fix,
extract_pytest_error,
classify_pytest_error,
suggest_pytest_fix,
)
# Legacy aliases for in-memory storage access
_test_runs = get_test_runs()
_current_runs = get_current_runs()
_running_tests = get_running_tests()
_persisted_results = get_persisted_results()
_use_postgres = is_postgres_available()
# Legacy function aliases (for compatibility with old function names)
_extract_go_error = extract_go_error
_classify_go_error = classify_go_error
_suggest_go_fix = suggest_go_fix
_extract_pytest_error = extract_pytest_error
_classify_pytest_error = classify_pytest_error
_suggest_pytest_fix = suggest_pytest_fix
_check_go_available = check_go_available
_check_pytest_available = check_pytest_available
_get_go_version = get_go_version
_get_pytest_version = get_pytest_version
_check_postgres_available = is_postgres_available

View File

@@ -0,0 +1,129 @@
"""
Test Registry Module
Zentrale API fuer das Test-Dashboard.
Entdeckt, registriert und fuehrt Tests aus allen Services aus.
Phase 1 Update (2026-02-02):
- PostgreSQL-Integration fuer persistente Speicherung
- Backlog-Management mit Status-Workflow
- Historie und Trends ueber Zeit
Modular Refactoring (2026-02-03):
- Split into sub-modules for maintainability
"""
# Re-export the router for FastAPI
from .routes import router
# Re-export config for external access
from .config import (
PROJECT_ROOT,
RUN_MODE,
DATA_DIR,
RESULTS_FILE,
check_go_available,
check_pytest_available,
get_go_version,
get_pytest_version,
load_persisted_results,
save_persisted_results,
migrate_json_to_postgres,
is_postgres_available,
get_persisted_results,
get_test_runs,
get_current_runs,
get_running_tests,
)
# Re-export API models
from .api_models import (
TestRunRequest,
TestRunResponse,
RegistryResponse,
BacklogStatusUpdate,
BacklogPriorityUpdate,
FixAttempt,
ManualBacklogEntry,
CIResultRequest,
)
# Re-export discovery functions
from .discovery import (
discover_go_tests,
discover_python_tests,
discover_bqas_tests,
build_service_info,
)
# Re-export executors
from .executors import (
run_go_tests,
run_python_tests,
run_bqas_tests,
run_jest_tests,
run_playwright_tests,
run_tests_in_container,
execute_test_run,
)
# Re-export services
from .services import (
extract_go_error,
classify_go_error,
suggest_go_fix,
extract_pytest_error,
classify_pytest_error,
suggest_pytest_fix,
)
__all__ = [
# Router
"router",
# Config
"PROJECT_ROOT",
"RUN_MODE",
"DATA_DIR",
"RESULTS_FILE",
"check_go_available",
"check_pytest_available",
"get_go_version",
"get_pytest_version",
"load_persisted_results",
"save_persisted_results",
"migrate_json_to_postgres",
"is_postgres_available",
"get_persisted_results",
"get_test_runs",
"get_current_runs",
"get_running_tests",
# API Models
"TestRunRequest",
"TestRunResponse",
"RegistryResponse",
"BacklogStatusUpdate",
"BacklogPriorityUpdate",
"FixAttempt",
"ManualBacklogEntry",
"CIResultRequest",
# Discovery
"discover_go_tests",
"discover_python_tests",
"discover_bqas_tests",
"build_service_info",
# Executors
"run_go_tests",
"run_python_tests",
"run_bqas_tests",
"run_jest_tests",
"run_playwright_tests",
"run_tests_in_container",
"execute_test_run",
# Services
"extract_go_error",
"classify_go_error",
"suggest_go_fix",
"extract_pytest_error",
"classify_pytest_error",
"suggest_pytest_fix",
]

View File

@@ -0,0 +1,73 @@
"""
Test Registry API Models
Pydantic models for API requests and responses.
"""
from typing import Optional, Dict, Any, List
from pydantic import BaseModel
# ==============================================================================
# Test Run Models
# ==============================================================================
class TestRunRequest(BaseModel):
suite_id: str
service: Optional[str] = None
triggered_by: str = "manual"
class TestRunResponse(BaseModel):
run_id: str
status: str
message: str
class RegistryResponse(BaseModel):
services: List[Dict[str, Any]]
stats: Dict[str, Any]
last_updated: str
# ==============================================================================
# Backlog Models
# ==============================================================================
class BacklogStatusUpdate(BaseModel):
status: str
notes: Optional[str] = None
assigned_to: Optional[str] = None
class BacklogPriorityUpdate(BaseModel):
priority: str
class FixAttempt(BaseModel):
fix_type: str # manual, auto_claude, auto_script
fix_description: str
commit_hash: Optional[str] = None
success: bool = False
class ManualBacklogEntry(BaseModel):
"""Manueller Backlog-Eintrag fuer nicht-integrierte Features."""
test_name: str
service: str
error_message: str
priority: str = "medium" # critical, high, medium, low
fix_suggestion: Optional[str] = None
# ==============================================================================
# CI/CD Models
# ==============================================================================
class CIResultRequest(BaseModel):
"""Daten von der CI/CD-Pipeline (Woodpecker)"""
pipeline_id: str
commit: str
branch: str
status: str # "completed", "failed", "success"
test_results: Optional[Dict[str, Any]] = None # Detaillierte Ergebnisse

View File

@@ -0,0 +1,230 @@
"""
Test Registry Configuration
Project paths, environment setup, and global state management.
"""
import os
import json
import subprocess
from pathlib import Path
from typing import List, Dict, Any, Optional
from datetime import datetime
# Projekt-Basisverzeichnis - prüfe verschiedene Pfade
# 1. Docker mit Volume-Mount: /app/project
# 2. Lokale Entwicklung: /Users/benjaminadmin/Projekte/breakpilot-pwa
# 3. Fallback: Demo-Modus
DOCKER_PROJECT_PATH = Path("/app/project")
LOCAL_PROJECT_PATH = Path("/Users/benjaminadmin/Projekte/breakpilot-pwa")
if DOCKER_PROJECT_PATH.exists():
PROJECT_ROOT = DOCKER_PROJECT_PATH
RUN_MODE = "docker"
elif LOCAL_PROJECT_PATH.exists():
PROJECT_ROOT = LOCAL_PROJECT_PATH
RUN_MODE = "local"
else:
PROJECT_ROOT = LOCAL_PROJECT_PATH # Fallback für Demo
RUN_MODE = "demo"
# Pfad fuer persistierte Ergebnisse (Legacy JSON - wird noch als Fallback verwendet)
DATA_DIR = Path("/app/data")
RESULTS_FILE = DATA_DIR / "test_results.json"
# Deaktiviert - wir wollen IMMER echte Tests wenn Tools verfügbar sind
IS_DOCKER = False # Nie Demo-Modus verwenden
# Flag fuer PostgreSQL-Verfuegbarkeit
_use_postgres = True
# ==============================================================================
# In-Memory Storage
# ==============================================================================
# In-Memory Storage (wird parallel zu PostgreSQL gepflegt fuer Abwaertskompatibilitaet)
_test_runs: List[Dict] = []
_current_runs: Dict[str, Any] = {}
_running_tests: Dict[str, Dict] = {} # Progress-Tracking fuer laufende Tests
_persisted_results: Dict[str, Dict] = {} # Persistierte Testergebnisse (Legacy)
def get_test_runs() -> List[Dict]:
"""Get all test runs."""
return _test_runs
def get_current_runs() -> Dict[str, Any]:
"""Get currently running tests."""
return _current_runs
def get_running_tests() -> Dict[str, Dict]:
"""Get running test progress."""
return _running_tests
def get_persisted_results() -> Dict[str, Dict]:
"""Get persisted test results."""
return _persisted_results
def set_persisted_results(results: Dict[str, Dict]):
"""Set persisted test results."""
global _persisted_results
_persisted_results = results
def is_postgres_available() -> bool:
"""Check if PostgreSQL is available."""
return _use_postgres
def set_postgres_available(available: bool):
"""Set PostgreSQL availability flag."""
global _use_postgres
_use_postgres = available
# ==============================================================================
# Tool Availability Checks
# ==============================================================================
def check_go_available() -> bool:
"""Prüft ob Go installiert ist"""
try:
result = subprocess.run(["go", "version"], capture_output=True, timeout=5)
return result.returncode == 0
except:
return False
def check_pytest_available() -> bool:
"""Prüft ob pytest installiert ist"""
pytest_paths = ["/opt/venv/bin/pytest", "pytest"]
for path in pytest_paths:
try:
result = subprocess.run(path.split() + ["--version"], capture_output=True, timeout=5)
if result.returncode == 0:
return True
except:
continue
return False
def get_go_version() -> Optional[str]:
"""Gibt die Go-Version zurueck"""
try:
result = subprocess.run(["go", "version"], capture_output=True, text=True, timeout=5)
if result.returncode == 0:
# "go version go1.23.5 linux/arm64" -> "1.23.5"
parts = result.stdout.strip().split()
if len(parts) >= 3:
return parts[2].replace("go", "")
except:
pass
return None
def get_pytest_version() -> Optional[str]:
"""Gibt die pytest-Version zurueck"""
try:
result = subprocess.run(["/opt/venv/bin/pytest", "--version"], capture_output=True, text=True, timeout=5)
if result.returncode == 0:
# "pytest 8.x.x" -> "8.x.x"
return result.stdout.strip().split()[1] if result.stdout else None
except:
pass
return None
# ==============================================================================
# Persistence Functions
# ==============================================================================
def check_postgres_available() -> bool:
"""Prueft ob PostgreSQL verfuegbar ist."""
global _use_postgres
try:
from ..database import check_db_connection
_use_postgres = check_db_connection()
except Exception:
_use_postgres = False
return _use_postgres
def load_persisted_results():
"""Laedt persistierte Testergebnisse beim Start - erst aus DB, dann JSON als Fallback"""
global _persisted_results
# Versuche zuerst aus PostgreSQL zu laden
if check_postgres_available():
try:
from ..database import get_db_session
from ..repository import TestRepository
with get_db_session() as db:
repo = TestRepository(db)
stats = repo.get_all_service_stats()
for stat in stats:
_persisted_results[stat.service] = {
"total": stat.total_tests,
"passed": stat.passed_tests,
"failed": stat.failed_tests,
"last_run": stat.last_run_at.isoformat() if stat.last_run_at else None,
"status": stat.last_status or "unknown",
"failed_test_ids": [] # Wird spaeter nachgeladen
}
print(f"Test-Ergebnisse aus PostgreSQL geladen: {len(stats)} Services")
return
except Exception as e:
print(f"Fehler beim Laden aus PostgreSQL: {e}")
# Fallback: JSON-Datei
if RESULTS_FILE.exists():
try:
with open(RESULTS_FILE, "r") as f:
_persisted_results = json.load(f)
print(f"Test-Ergebnisse aus JSON geladen: {len(_persisted_results)} Services")
except Exception as e:
print(f"Fehler beim Laden der Testergebnisse: {e}")
_persisted_results = {}
def save_persisted_results():
"""Speichert Testergebnisse - in PostgreSQL und JSON als Backup"""
# JSON als Backup speichern
try:
DATA_DIR.mkdir(parents=True, exist_ok=True)
with open(RESULTS_FILE, "w") as f:
json.dump(_persisted_results, f, indent=2, default=str)
except Exception as e:
print(f"Fehler beim Speichern der JSON-Testergebnisse: {e}")
def migrate_json_to_postgres() -> int:
"""Migriert bestehende JSON-Daten nach PostgreSQL (einmalig)."""
if not _use_postgres:
return 0
if not _persisted_results:
return 0
try:
from ..database import get_db_session
from ..repository import TestRepository
with get_db_session() as db:
repo = TestRepository(db)
count = repo.migrate_from_json(_persisted_results)
print(f"Migration abgeschlossen: {count} Services migriert")
return count
except Exception as e:
print(f"Fehler bei Migration: {e}")
return 0
# Lade persistierte Ergebnisse beim Import
load_persisted_results()

View File

@@ -0,0 +1,16 @@
"""
Test Discovery Module
Functions for discovering tests in various frameworks.
"""
from .go_discovery import discover_go_tests
from .python_discovery import discover_python_tests, discover_bqas_tests
from .service_builder import build_service_info
__all__ = [
"discover_go_tests",
"discover_python_tests",
"discover_bqas_tests",
"build_service_info",
]

View File

@@ -0,0 +1,45 @@
"""
Go Test Discovery
Functions for discovering Go tests in a codebase.
"""
from pathlib import Path
from typing import List
from ...models import TestCase, TestFramework, TestCategory
from ..config import PROJECT_ROOT
def discover_go_tests(base_path: Path) -> List[TestCase]:
"""Entdeckt Go-Tests in einem Verzeichnis"""
tests = []
if not base_path.exists():
return tests
# Suche nach *_test.go Dateien
test_files = list(base_path.rglob("*_test.go"))
for test_file in test_files:
# Parse Test-Funktionen aus der Datei
try:
content = test_file.read_text()
for i, line in enumerate(content.split("\n"), 1):
if line.strip().startswith("func Test"):
# Extrahiere Funktionsname
name_start = line.find("Test")
name_end = line.find("(", name_start)
if name_end > name_start:
func_name = line[name_start:name_end]
tests.append(TestCase(
id=f"{test_file.stem}_{func_name}",
name=func_name,
file_path=str(test_file.relative_to(PROJECT_ROOT)),
line_number=i,
framework=TestFramework.GO_TEST,
category=TestCategory.UNIT,
))
except Exception:
pass
return tests

View File

@@ -0,0 +1,86 @@
"""
Python Test Discovery
Functions for discovering Python and BQAS tests in a codebase.
"""
import json
from pathlib import Path
from typing import List
from ...models import TestCase, TestFramework, TestCategory
from ..config import PROJECT_ROOT
def discover_python_tests(base_path: Path) -> List[TestCase]:
"""Entdeckt Python-Tests in einem Verzeichnis"""
tests = []
if not base_path.exists():
return tests
# Suche nach test_*.py Dateien
test_files = list(base_path.rglob("test_*.py"))
for test_file in test_files:
try:
content = test_file.read_text()
for i, line in enumerate(content.split("\n"), 1):
stripped = line.strip()
# Test-Funktionen
if stripped.startswith("def test_"):
name_end = stripped.find("(")
if name_end > 4:
func_name = stripped[4:name_end]
tests.append(TestCase(
id=f"{test_file.stem}_{func_name}",
name=func_name,
file_path=str(test_file.relative_to(PROJECT_ROOT)),
line_number=i,
framework=TestFramework.PYTEST,
category=TestCategory.UNIT,
))
# Async Test-Methoden
elif stripped.startswith("async def test_"):
name_end = stripped.find("(")
if name_end > 10:
func_name = stripped[10:name_end]
tests.append(TestCase(
id=f"{test_file.stem}_{func_name}",
name=func_name,
file_path=str(test_file.relative_to(PROJECT_ROOT)),
line_number=i,
framework=TestFramework.PYTEST,
category=TestCategory.UNIT,
))
except Exception:
pass
return tests
def discover_bqas_tests(base_path: Path, test_type: str) -> List[TestCase]:
"""Entdeckt BQAS-Tests (Golden/RAG)"""
tests = []
if not base_path.exists():
return tests
# Suche nach JSON-Dateien
test_files = list(base_path.rglob("*.json"))
for test_file in test_files:
try:
content = json.loads(test_file.read_text())
if isinstance(content, list):
for i, test_case in enumerate(content):
test_id = test_case.get("id", f"{test_file.stem}_{i}")
tests.append(TestCase(
id=test_id,
name=test_case.get("name", test_id),
file_path=str(test_file.relative_to(PROJECT_ROOT)),
framework=TestFramework.BQAS_GOLDEN if test_type == "golden" else TestFramework.BQAS_RAG,
category=TestCategory.BQAS,
))
except Exception:
pass
return tests

View File

@@ -0,0 +1,115 @@
"""
Service Info Builder
Builds ServiceTestInfo from service definitions.
"""
from datetime import datetime
from pathlib import Path
from typing import Dict
from ...models import ServiceTestInfo, TestStatus, TestFramework
from ..config import PROJECT_ROOT, get_persisted_results
from .go_discovery import discover_go_tests
from .python_discovery import discover_python_tests, discover_bqas_tests
def build_service_info(service_def: Dict) -> ServiceTestInfo:
"""Erstellt ServiceTestInfo aus einer Service-Definition"""
service_id = service_def["service"]
persisted_results = get_persisted_results()
# Prüfe ob Service deaktiviert ist
if service_def.get("disabled", False):
return ServiceTestInfo(
service=service_def["service"],
display_name=f"{service_def['display_name']} (deaktiviert)",
port=service_def.get("port"),
language=service_def["language"],
total_tests=0,
passed_tests=0,
failed_tests=0,
skipped_tests=0,
pass_rate=0.0,
coverage_percent=None,
last_run=None,
status=TestStatus.SKIPPED,
)
# Prüfe zuerst persistierte Ergebnisse
if service_id in persisted_results:
persisted = persisted_results[service_id]
total = persisted.get("total", 0)
passed = persisted.get("passed", 0)
failed = persisted.get("failed", 0)
skipped = max(0, total - passed - failed)
pass_rate = (passed / total * 100) if total > 0 else 0.0
last_run_str = persisted.get("last_run")
last_run = datetime.fromisoformat(last_run_str) if last_run_str else None
return ServiceTestInfo(
service=service_def["service"],
display_name=service_def["display_name"],
port=service_def.get("port"),
language=service_def["language"],
total_tests=total,
passed_tests=passed,
failed_tests=failed,
skipped_tests=skipped,
pass_rate=pass_rate,
coverage_percent=None,
last_run=last_run,
status=TestStatus.PASSED if failed == 0 and total > 0 else TestStatus.FAILED if failed > 0 else TestStatus.PENDING,
)
# Falls keine persistierten Ergebnisse: Test-Discovery
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
framework = service_def["framework"]
# Fuer Container-basierte Services: keine lokale Discovery moeglich
if service_def.get("run_in_container", False):
# Keine lokalen Tests - warte auf tatsaechliche Ausfuehrung
return ServiceTestInfo(
service=service_def["service"],
display_name=service_def["display_name"],
port=service_def.get("port"),
language=service_def["language"],
total_tests=0,
passed_tests=0,
failed_tests=0,
skipped_tests=0,
pass_rate=0.0,
coverage_percent=None,
last_run=None,
status=TestStatus.PENDING,
)
# Test-Discovery basierend auf Framework
if framework == TestFramework.GO_TEST:
tests = discover_go_tests(base_path)
elif framework == TestFramework.PYTEST:
tests = discover_python_tests(base_path)
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
tests = discover_bqas_tests(base_path, test_type)
else:
tests = []
total = len(tests)
# Ohne persistierte Ergebnisse: Tests gefunden aber noch nicht ausgefuehrt
# Zeige nur die Anzahl entdeckter Tests, alle anderen Werte sind 0
return ServiceTestInfo(
service=service_def["service"],
display_name=service_def["display_name"],
port=service_def.get("port"),
language=service_def["language"],
total_tests=total,
passed_tests=0,
failed_tests=0,
skipped_tests=0,
pass_rate=0.0,
coverage_percent=None,
last_run=None,
status=TestStatus.PENDING,
)

View File

@@ -0,0 +1,23 @@
"""
Test Executors Module
Functions for running tests in various frameworks.
"""
from .go_executor import run_go_tests
from .python_executor import run_python_tests
from .bqas_executor import run_bqas_tests
from .jest_executor import run_jest_tests
from .playwright_executor import run_playwright_tests
from .container_executor import run_tests_in_container
from .test_runner import execute_test_run
__all__ = [
"run_go_tests",
"run_python_tests",
"run_bqas_tests",
"run_jest_tests",
"run_playwright_tests",
"run_tests_in_container",
"execute_test_run",
]

View File

@@ -0,0 +1,44 @@
"""
BQAS Test Executor
Executes BQAS tests via API proxy.
"""
import json
from typing import Dict
import httpx
from ...models import TestFramework
async def run_bqas_tests(service_def: Dict) -> Dict:
"""Proxy zu BQAS API im Voice-Service"""
test_type = "golden" if service_def["framework"] == TestFramework.BQAS_GOLDEN else "rag"
try:
async with httpx.AsyncClient(timeout=120.0) as client:
response = await client.post(
f"http://localhost:8091/api/v1/bqas/run/{test_type}",
)
if response.status_code == 200:
data = response.json()
metrics = data.get("metrics", {})
return {
"passed": metrics.get("passed_tests", 0),
"failed": metrics.get("failed_tests", 0),
"total": metrics.get("total_tests", 0),
"output": json.dumps(data, indent=2)[:5000],
"failed_test_ids": [],
}
except Exception as e:
pass
# Fehler wenn API nicht erreichbar - KEINE Demo-Daten
return {
"passed": 0,
"failed": 0,
"total": 0,
"output": f"BQAS API nicht erreichbar. Nutze docker exec fuer {test_type} Tests.",
"failed_test_ids": [],
}

View File

@@ -0,0 +1,106 @@
"""
Container Test Executor
Executes tests inside Docker containers via docker exec.
"""
import re
import asyncio
import subprocess
from typing import Dict
from ..config import get_running_tests
async def run_tests_in_container(
container_name: str,
framework: str,
base_path: str,
service_id: str,
pytest_args: str = ""
) -> Dict:
"""Fuehrt Tests in einem anderen Docker-Container aus via docker exec"""
running_tests = get_running_tests()
running_tests[service_id] = {
"current_file": "",
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "running"
}
try:
if framework == "pytest":
cmd = ["docker", "exec", container_name, "python", "-m", "pytest", base_path, "-v", "--tb=short", "-q"]
# Fuege zusaetzliche pytest Argumente hinzu
if pytest_args:
cmd.extend(pytest_args.split())
else:
cmd = ["docker", "exec", container_name, "go", "test", "-v", "./..."]
def run_docker_exec():
return subprocess.run(cmd, capture_output=True, text=True, timeout=600)
result = await asyncio.to_thread(run_docker_exec)
output = result.stdout + result.stderr
passed = 0
failed = 0
failed_test_ids = []
if framework == "pytest":
# Parse pytest output
for line in output.split("\n"):
if "passed" in line.lower():
match = re.search(r"(\d+)\s+passed", line)
if match:
passed = int(match.group(1))
if "failed" in line.lower():
match = re.search(r"(\d+)\s+failed", line)
if match:
failed = int(match.group(1))
if line.startswith("FAILED"):
test_name = line.replace("FAILED", "").strip()
failed_test_ids.append(test_name)
else:
# Parse go test output
for line in output.split("\n"):
if line.startswith("--- PASS:"):
passed += 1
elif line.startswith("--- FAIL:"):
failed += 1
match = re.search(r"--- FAIL: (\S+)", line)
if match:
failed_test_ids.append(match.group(1))
total = passed + failed
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "completed"
}
return {
"passed": passed,
"failed": failed,
"total": total,
"output": output,
"failed_test_ids": failed_test_ids
}
except Exception as e:
running_tests[service_id] = {
"current_file": str(e),
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "error"
}
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}

View File

@@ -0,0 +1,137 @@
"""
Go Test Executor
Executes Go tests and parses results.
"""
import os
import json
import subprocess
import asyncio
from pathlib import Path
from typing import Dict
from ..config import check_go_available, get_running_tests
from ..services.error_handling import extract_go_error, classify_go_error, suggest_go_fix
async def run_go_tests(base_path: Path, service_id: str = "") -> Dict:
"""Fuehrt Go-Tests aus (Thread-basiert, blockiert nicht den Event Loop)"""
running_tests = get_running_tests()
if not base_path.exists():
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
# Prüfe ob Go installiert ist
go_available = check_go_available()
if not go_available:
return {"passed": 0, "failed": 0, "total": 0, "output": "Go nicht installiert - Tests koennen nicht ausgefuehrt werden", "failed_test_ids": []}
# Initialer Progress-Status
if service_id:
running_tests[service_id] = {
"current_file": "Starte Go-Tests...",
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "running"
}
def run_go_tests_sync():
"""Laeuft in separatem Thread"""
try:
env = os.environ.copy()
env["GOPATH"] = "/tmp/go"
env["GOCACHE"] = "/tmp/go-cache"
env["CGO_ENABLED"] = "0"
result = subprocess.run(
["go", "test", "-v", "-json", "./..."],
cwd=str(base_path),
capture_output=True,
text=True,
timeout=300,
env=env,
)
passed = failed = 0
failed_test_ids = []
test_outputs = {}
for line in result.stdout.split("\n"):
if line.strip():
try:
event = json.loads(line)
action = event.get("Action")
test_name = event.get("Test", "")
pkg = event.get("Package", "")
if action == "pass" and test_name:
passed += 1
if service_id:
running_tests[service_id] = {
"current_file": f"{test_name}",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "running"
}
elif action == "fail" and test_name:
failed += 1
test_key = f"{pkg}::{test_name}"
error_output = test_outputs.get(test_key, "")
error_message = extract_go_error(error_output)
failed_test_ids.append({
"id": test_key,
"name": test_name,
"package": pkg,
"file_path": pkg.replace("github.com/", ""),
"error_message": error_message or "Test fehlgeschlagen - keine Details",
"error_type": classify_go_error(error_message),
"suggestion": suggest_go_fix(error_message),
})
if service_id:
running_tests[service_id] = {
"current_file": f"{test_name}",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "running"
}
elif action == "output" and test_name:
test_key = f"{pkg}::{test_name}"
test_outputs[test_key] = test_outputs.get(test_key, "") + event.get("Output", "")
except json.JSONDecodeError:
pass
return {
"passed": passed,
"failed": failed,
"total": passed + failed,
"output": result.stdout[:5000] if result.stdout else result.stderr[:5000],
"failed_test_ids": failed_test_ids,
}
except subprocess.TimeoutExpired:
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 5 Minuten", "failed_test_ids": []}
except Exception as e:
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(None, run_go_tests_sync)
# Finaler Status
if service_id:
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": 1,
"files_total": 1,
"passed": result.get("passed", 0),
"failed": result.get("failed", 0),
"status": "completed"
}
return result

View File

@@ -0,0 +1,130 @@
"""
Jest Test Executor
Executes Jest tests for JavaScript/TypeScript projects.
"""
import os
import re
import json
import subprocess
from pathlib import Path
from typing import Dict
from ..config import get_running_tests
async def run_jest_tests(base_path: Path, service_id: str = "") -> Dict:
"""Fuehrt Jest-Tests aus"""
running_tests = get_running_tests()
if not base_path.exists():
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
running_tests[service_id] = {
"current_file": "Starte Jest...",
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "running"
}
# Prüfe ob Node.js verfügbar ist
try:
node_check = subprocess.run(["node", "--version"], capture_output=True, timeout=5)
if node_check.returncode != 0:
return {"passed": 0, "failed": 0, "total": 0, "output": "Node.js nicht installiert", "failed_test_ids": []}
except:
return {"passed": 0, "failed": 0, "total": 0, "output": "Node.js nicht verfuegbar", "failed_test_ids": []}
try:
# Wechsle ins Projektverzeichnis und fuehre Jest aus
env = os.environ.copy()
env["CI"] = "true" # Nicht-interaktiver Modus
result = subprocess.run(
["npm", "test", "--", "--json", "--passWithNoTests"],
cwd=str(base_path),
capture_output=True,
text=True,
timeout=300,
env=env,
)
output = result.stdout + result.stderr
passed = 0
failed = 0
failed_test_ids = []
# Versuche JSON-Output zu parsen
try:
# Jest JSON beginnt mit {"num... - finde den Start
json_start = output.find('{"num')
if json_start == -1:
json_start = output.rfind('{"')
if json_start != -1:
json_str = output[json_start:]
# Versuche JSON zu parsen mit json.JSONDecoder
decoder = json.JSONDecoder()
try:
jest_result, _ = decoder.raw_decode(json_str)
passed = jest_result.get("numPassedTests", 0)
failed = jest_result.get("numFailedTests", 0)
# Extrahiere fehlgeschlagene Tests
for test_result in jest_result.get("testResults", []):
for assertion in test_result.get("assertionResults", []):
if assertion.get("status") == "failed":
failed_test_ids.append({
"id": f"{test_result.get('name', '')}::{assertion.get('fullName', '')}",
"name": assertion.get("fullName", ""),
"file_path": test_result.get("name", ""),
"error_message": " ".join(assertion.get("failureMessages", []))[:500],
"error_type": "assertion",
"suggestion": "Pruefe die Test-Assertions und erwarteten Werte",
})
except json.JSONDecodeError:
# Fallback: Parse Text-Output mit Regex
for line in output.split("\n"):
if "passed" in line.lower():
match = re.search(r"(\d+)\s+passed", line)
if match:
passed = int(match.group(1))
if "failed" in line.lower():
match = re.search(r"(\d+)\s+failed", line)
if match:
failed = int(match.group(1))
except Exception:
# Allgemeiner Fallback: Parse Text-Output
for line in output.split("\n"):
if "passed" in line.lower():
match = re.search(r"(\d+)\s+passed", line)
if match:
passed = int(match.group(1))
if "failed" in line.lower():
match = re.search(r"(\d+)\s+failed", line)
if match:
failed = int(match.group(1))
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "completed"
}
return {
"passed": passed,
"failed": failed,
"total": passed + failed,
"output": output[:5000],
"failed_test_ids": failed_test_ids,
}
except subprocess.TimeoutExpired:
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 5 Minuten", "failed_test_ids": []}
except Exception as e:
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}

View File

@@ -0,0 +1,101 @@
"""
Playwright Test Executor
Executes Playwright E2E tests.
"""
import os
import re
import json
import subprocess
from pathlib import Path
from typing import Dict
from ..config import get_running_tests
async def run_playwright_tests(base_path: Path, service_id: str = "") -> Dict:
"""Fuehrt Playwright E2E-Tests aus"""
running_tests = get_running_tests()
if not base_path.exists():
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
running_tests[service_id] = {
"current_file": "Starte Playwright...",
"files_done": 0,
"files_total": 1,
"passed": 0,
"failed": 0,
"status": "running"
}
try:
env = os.environ.copy()
env["CI"] = "true"
result = subprocess.run(
["npx", "playwright", "test", "--reporter=json"],
cwd=str(base_path),
capture_output=True,
text=True,
timeout=600, # E2E Tests brauchen laenger
env=env,
)
output = result.stdout + result.stderr
passed = 0
failed = 0
failed_test_ids = []
# Parse Playwright JSON Output
try:
pw_result = json.loads(output)
for suite in pw_result.get("suites", []):
for spec in suite.get("specs", []):
for test in spec.get("tests", []):
for result_item in test.get("results", []):
if result_item.get("status") == "passed":
passed += 1
elif result_item.get("status") == "failed":
failed += 1
failed_test_ids.append({
"id": spec.get("title", ""),
"name": spec.get("title", ""),
"file_path": spec.get("file", ""),
"error_message": result_item.get("error", {}).get("message", "")[:500],
"error_type": "e2e",
"suggestion": "Pruefe den E2E-Test und die Anwendung",
})
except json.JSONDecodeError:
# Fallback: Parse Text-Output
for line in output.split("\n"):
if "passed" in line.lower():
match = re.search(r"(\d+)\s+passed", line)
if match:
passed = int(match.group(1))
if "failed" in line.lower():
match = re.search(r"(\d+)\s+failed", line)
if match:
failed = int(match.group(1))
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": 1,
"files_total": 1,
"passed": passed,
"failed": failed,
"status": "completed"
}
return {
"passed": passed,
"failed": failed,
"total": passed + failed,
"output": output[:5000],
"failed_test_ids": failed_test_ids,
}
except subprocess.TimeoutExpired:
return {"passed": 0, "failed": 0, "total": 0, "output": "Timeout nach 10 Minuten", "failed_test_ids": []}
except Exception as e:
return {"passed": 0, "failed": 0, "total": 0, "output": str(e), "failed_test_ids": []}

View File

@@ -0,0 +1,187 @@
"""
Python Test Executor
Executes pytest tests with live progress updates.
"""
import os
import re
import subprocess
import asyncio
from pathlib import Path
from typing import Dict
from ..config import get_running_tests
from ..services.error_handling import extract_pytest_error, classify_pytest_error, suggest_pytest_fix
async def run_python_tests(base_path: Path, service_id: str = "") -> Dict:
"""Fuehrt Python-Tests aus mit Live-Progress-Updates (Thread-basiert)"""
running_tests = get_running_tests()
if not base_path.exists():
return {"passed": 0, "failed": 0, "total": 0, "output": f"Pfad existiert nicht: {base_path}", "failed_test_ids": []}
# Versuche verschiedene pytest-Pfade
pytest_paths = [
"/opt/venv/bin/pytest", # Docker venv
"pytest", # System pytest
"python -m pytest", # Als Modul
]
pytest_cmd = None
for path in pytest_paths:
try:
check = subprocess.run(
path.split() + ["--version"],
capture_output=True,
timeout=5,
)
if check.returncode == 0:
pytest_cmd = path.split()
break
except:
continue
if not pytest_cmd:
return {"passed": 0, "failed": 0, "total": 0, "output": "pytest nicht gefunden", "failed_test_ids": []}
# Erst alle Test-Dateien zaehlen
test_files = []
test_dir = base_path if base_path.is_dir() else base_path.parent
for f in test_dir.rglob("test_*.py"):
test_files.append(f.name)
total_files = len(test_files) if test_files else 1
# Initialer Progress-Status
running_tests[service_id] = {
"current_file": "Starte Tests...",
"files_done": 0,
"files_total": total_files,
"passed": 0,
"failed": 0,
"status": "running"
}
# Ergebnis-Container
result_container = {
"output_lines": [],
"passed": 0,
"failed": 0,
"files_seen": set(),
"current_file": "",
"done": False,
"error": None
}
def run_pytest_with_progress():
"""Laeuft in separatem Thread - blockiert nicht den Event Loop"""
try:
cwd = str(base_path.parent) if base_path.is_file() else str(base_path)
# Unbuffered output fuer Echtzeit-Fortschritt
env = os.environ.copy()
env["PYTHONUNBUFFERED"] = "1"
process = subprocess.Popen(
pytest_cmd + ["-v", "-s", "--tb=short", str(base_path)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
cwd=cwd,
bufsize=1,
env=env,
)
for line in iter(process.stdout.readline, ''):
if not line:
break
result_container["output_lines"].append(line)
line_stripped = line.strip()
# Parse Test-Ergebnisse
match = re.match(r'(\S+\.py)::(\S+)\s+(PASSED|FAILED|SKIPPED|ERROR)', line_stripped)
if match:
file_path = match.group(1)
status = match.group(3)
file_name = Path(file_path).name
if file_name not in result_container["files_seen"]:
result_container["files_seen"].add(file_name)
result_container["current_file"] = file_name
if status == "PASSED":
result_container["passed"] += 1
elif status in ("FAILED", "ERROR"):
result_container["failed"] += 1
# Progress aktualisieren
running_tests[service_id] = {
"current_file": result_container["current_file"],
"files_done": len(result_container["files_seen"]),
"files_total": max(total_files, len(result_container["files_seen"])),
"passed": result_container["passed"],
"failed": result_container["failed"],
"status": "running"
}
process.wait()
result_container["done"] = True
except Exception as e:
result_container["error"] = str(e)
result_container["done"] = True
# Starte Test-Ausführung in separatem Thread
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, run_pytest_with_progress)
full_output = "".join(result_container["output_lines"])
passed = result_container["passed"]
failed = result_container["failed"]
files_seen = result_container["files_seen"]
if result_container["error"]:
running_tests[service_id] = {
"current_file": result_container["error"],
"files_done": 0,
"files_total": total_files,
"passed": 0,
"failed": 0,
"status": "error"
}
return {"passed": 0, "failed": 0, "total": 0, "output": result_container["error"], "failed_test_ids": []}
# Parse fehlgeschlagene Tests aus Output
failed_test_ids = []
for match in re.finditer(r'FAILED\s+(\S+)::(\S+)', full_output):
file_path = match.group(1)
test_name = match.group(2)
error_msg = extract_pytest_error(full_output, test_name)
failed_test_ids.append({
"id": f"{file_path}::{test_name}",
"name": test_name,
"file_path": file_path,
"error_message": error_msg or "Test fehlgeschlagen - keine Details",
"error_type": classify_pytest_error(error_msg),
"suggestion": suggest_pytest_fix(error_msg),
})
# Finaler Status
running_tests[service_id] = {
"current_file": "Abgeschlossen",
"files_done": len(files_seen),
"files_total": len(files_seen),
"passed": passed,
"failed": failed,
"status": "completed"
}
return {
"passed": passed,
"failed": failed,
"total": passed + failed,
"output": full_output[:5000],
"failed_test_ids": failed_test_ids,
}

View File

@@ -0,0 +1,192 @@
"""
Test Runner
Orchestrates test execution and persists results.
"""
from datetime import datetime
from typing import Dict
from ...models import TestRun, RunStatus, TestFramework
from ..config import (
PROJECT_ROOT,
get_test_runs,
get_current_runs,
get_persisted_results,
save_persisted_results,
is_postgres_available,
)
from .go_executor import run_go_tests
from .python_executor import run_python_tests
from .bqas_executor import run_bqas_tests
from .jest_executor import run_jest_tests
from .playwright_executor import run_playwright_tests
from .container_executor import run_tests_in_container
async def execute_test_run(run_id: str, service_def: Dict):
"""Fuehrt einen Test-Run im Hintergrund aus"""
test_runs = get_test_runs()
current_runs = get_current_runs()
persisted_results = get_persisted_results()
framework = service_def["framework"]
service_id = service_def["service"]
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
# Pruefe ob Service deaktiviert ist
if service_def.get("disabled", False):
reason = service_def.get("disabled_reason", "Service deaktiviert")
run = TestRun(
id=run_id,
suite_id=service_id,
service=service_id,
started_at=datetime.now(),
completed_at=datetime.now(),
status=RunStatus.COMPLETED,
output=f"Service deaktiviert: {reason}",
)
current_runs[run_id] = run
test_runs.append({
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat(),
"status": run.status.value,
"total_tests": 0,
"passed_tests": 0,
"failed_tests": 0,
"failed_test_ids": [],
"duration_seconds": 0,
})
return
# Pruefe ob Tests in einem anderen Container laufen sollen
run_in_container = service_def.get("run_in_container", False)
container_name = service_def.get("container_name", "")
run = TestRun(
id=run_id,
suite_id=service_id,
service=service_id,
started_at=datetime.now(),
status=RunStatus.RUNNING,
)
current_runs[run_id] = run
try:
# Echte Test-Ausführung basierend auf Framework
if run_in_container and container_name:
# Tests im externen Container ausfuehren
framework_str = "pytest" if framework == TestFramework.PYTEST else "go"
container_base_path = service_def.get("base_path", "/app/tests")
pytest_args = service_def.get("pytest_args", "")
result = await run_tests_in_container(container_name, framework_str, container_base_path, service_id, pytest_args)
elif framework == TestFramework.GO_TEST:
result = await run_go_tests(base_path, service_id=service_id)
elif framework == TestFramework.PYTEST:
result = await run_python_tests(base_path, service_id=service_id)
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
result = await run_bqas_tests(service_def)
elif framework == TestFramework.JEST:
result = await run_jest_tests(base_path, service_id=service_id)
elif framework == TestFramework.PLAYWRIGHT:
result = await run_playwright_tests(base_path, service_id=service_id)
else:
result = {"passed": 0, "failed": 0, "total": 0, "output": "Framework nicht unterstuetzt"}
run.completed_at = datetime.now()
run.status = RunStatus.COMPLETED if result.get("failed", 0) == 0 else RunStatus.FAILED
run.total_tests = result.get("total", 0)
run.passed_tests = result.get("passed", 0)
run.failed_tests = result.get("failed", 0)
run.failed_test_ids = result.get("failed_test_ids", [])
run.duration_seconds = (run.completed_at - run.started_at).total_seconds()
run.output = result.get("output", "")
except Exception as e:
run.completed_at = datetime.now()
run.status = RunStatus.FAILED
run.output = str(e)
# In Historie speichern (In-Memory)
test_runs.append({
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
"status": run.status.value,
"total_tests": run.total_tests,
"passed_tests": run.passed_tests,
"failed_tests": run.failed_tests,
"failed_test_ids": run.failed_test_ids,
"duration_seconds": run.duration_seconds,
})
# Persistiere Ergebnisse (Legacy In-Memory Dict)
persisted_results[service_id] = {
"total": run.total_tests,
"passed": run.passed_tests,
"failed": run.failed_tests,
"failed_test_ids": run.failed_test_ids,
"last_run": run.completed_at.isoformat() if run.completed_at else datetime.now().isoformat(),
"status": run.status.value,
}
save_persisted_results()
# PostgreSQL-Persistierung
if is_postgres_available():
try:
from ...database import get_db_session
from ...repository import TestRepository
with get_db_session() as db:
repo = TestRepository(db)
# Run erstellen falls noch nicht vorhanden
db_run = repo.get_run(run.id)
if not db_run:
db_run = repo.create_run(
run_id=run.id,
service=service_id,
framework=framework.value,
triggered_by="manual"
)
# Run abschliessen
repo.complete_run(
run_id=run.id,
status=run.status.value,
total_tests=run.total_tests,
passed_tests=run.passed_tests,
failed_tests=run.failed_tests,
skipped_tests=0,
duration_seconds=run.duration_seconds,
output=run.output
)
# Einzelne Test-Ergebnisse speichern (fehlgeschlagene Tests)
if run.failed_test_ids:
results_to_add = []
for failed in run.failed_test_ids:
if isinstance(failed, dict):
results_to_add.append({
"name": failed.get("name") or failed.get("id", "unknown"),
"file_path": failed.get("file_path"),
"status": "failed",
"error_message": failed.get("error_message"),
"error_type": failed.get("error_type"),
"suggestion": failed.get("suggestion")
})
elif isinstance(failed, str):
results_to_add.append({
"name": failed,
"status": "failed"
})
if results_to_add:
repo.add_results(run.id, results_to_add)
except Exception as e:
print(f"Fehler beim PostgreSQL-Speichern: {e}")

View File

@@ -0,0 +1,21 @@
"""
Test Registry Routes Module
All API endpoints for the test registry.
"""
from fastapi import APIRouter
from .tests import router as tests_router
from .backlog import router as backlog_router
from .ci import router as ci_router
# Create main router
router = APIRouter(prefix="/api/tests", tags=["Test Registry"])
# Include sub-routers
router.include_router(tests_router)
router.include_router(backlog_router)
router.include_router(ci_router)
__all__ = ["router"]

View File

@@ -0,0 +1,580 @@
"""
Test Registry - Backlog Endpoints
Endpoints for failed test backlog management.
"""
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, HTTPException, Query
from ...database import get_db_session
from ...repository import TestRepository
from ..api_models import (
BacklogStatusUpdate,
BacklogPriorityUpdate,
FixAttempt,
ManualBacklogEntry,
)
from ..config import (
get_test_runs,
get_persisted_results,
is_postgres_available,
migrate_json_to_postgres,
)
router = APIRouter()
@router.get("/failed")
async def get_failed_tests():
"""
Gibt alle fehlgeschlagenen Tests aus den persistierten Ergebnissen zurueck.
Fuer Backlog-Verwaltung mit menschenverstaendlichen Fehlerbeschreibungen.
"""
persisted_results = get_persisted_results()
failed_tests = []
# Sammle fehlgeschlagene Tests aus persistierten Ergebnissen
for service, data in persisted_results.items():
run_time = data.get("last_run", "")
run_id = f"persisted_{service}"
# Hole fehlgeschlagene Test-IDs
for failed in data.get("failed_test_ids", []):
if isinstance(failed, dict):
failed_tests.append({
"id": failed.get("id", ""),
"name": failed.get("name", ""),
"service": service,
"file_path": failed.get("file_path", ""),
"line_number": failed.get("line_number"),
"error_message": failed.get("error_message", "Keine Fehlermeldung verfuegbar"),
"error_type": failed.get("error_type", "unknown"),
"suggestion": failed.get("suggestion", ""),
"run_id": run_id,
"last_failed": run_time,
"status": "open", # open, in_progress, fixed
})
elif isinstance(failed, str):
# Legacy-Format: nur Test-ID als String
failed_tests.append({
"id": failed,
"name": failed,
"service": service,
"file_path": "",
"line_number": None,
"error_message": "Keine Details verfuegbar",
"error_type": "unknown",
"suggestion": "",
"run_id": run_id,
"last_failed": run_time,
"status": "open",
})
# Dedupliziere nach Test-ID (nur neueste Version behalten)
seen = {}
for test in failed_tests:
test_id = test["id"]
if test_id not in seen or test["last_failed"] > seen[test_id]["last_failed"]:
seen[test_id] = test
unique_failed = list(seen.values())
# Gruppiere nach Service
by_service = {}
for test in unique_failed:
service = test["service"]
if service not in by_service:
by_service[service] = []
by_service[service].append(test)
return {
"total_failed": len(unique_failed),
"by_service": by_service,
"tests": unique_failed,
"last_updated": datetime.now().isoformat(),
}
@router.post("/failed/{test_id}/status")
async def update_failed_test_status(test_id: str, status: str):
"""
Aktualisiert den Status eines fehlgeschlagenen Tests.
Status: 'open', 'in_progress', 'fixed', 'wont_fix'
Legacy-Endpoint - nutzt nun PostgreSQL wenn verfuegbar.
"""
valid_statuses = ["open", "in_progress", "fixed", "wont_fix", "flaky"]
if status not in valid_statuses:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Status. Erlaubt: {', '.join(valid_statuses)}"
)
# Versuche in PostgreSQL zu speichern
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
# Suche nach Backlog-Item mit test_id
backlog_items = repo.get_backlog()
for item in backlog_items:
if item.test_name == test_id or str(item.id) == test_id:
repo.update_backlog_status(item.id, status)
return {
"test_id": test_id,
"backlog_id": item.id,
"status": status,
"updated_at": datetime.now().isoformat(),
"message": f"Test-Status auf '{status}' gesetzt (PostgreSQL)",
}
except Exception as e:
print(f"PostgreSQL-Fehler: {e}")
# Fallback: nur Bestaetigung zurueckgeben
return {
"test_id": test_id,
"status": status,
"updated_at": datetime.now().isoformat(),
"message": f"Test-Status auf '{status}' gesetzt",
}
@router.get("/backlog")
async def get_backlog(
status: Optional[str] = Query(None, description="Filter nach Status: open, in_progress, fixed, wont_fix, flaky"),
service: Optional[str] = Query(None, description="Filter nach Service"),
priority: Optional[str] = Query(None, description="Filter nach Prioritaet: critical, high, medium, low"),
limit: int = Query(100, ge=1, le=500),
offset: int = Query(0, ge=0)
):
"""
Gibt den persistenten Backlog fehlgeschlagener Tests zurueck.
Der Backlog aggregiert fehlgeschlagene Tests ueber mehrere Runs hinweg
und ermoeglicht Status-Management (open -> in_progress -> fixed).
"""
if not is_postgres_available():
# Fallback auf legacy /failed Endpoint
return await get_failed_tests()
try:
with get_db_session() as db:
repo = TestRepository(db)
items = repo.get_backlog(
status=status,
service=service,
priority=priority,
limit=limit,
offset=offset
)
total = repo.get_backlog_count(status=status, service=service)
# Gruppiere nach Service
by_service = {}
for item in items:
svc = item.service
if svc not in by_service:
by_service[svc] = []
by_service[svc].append(item.to_dict())
return {
"total": total,
"items": [item.to_dict() for item in items],
"by_service": by_service,
"filters": {
"status": status,
"service": service,
"priority": priority
},
"pagination": {
"limit": limit,
"offset": offset
}
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/backlog/{backlog_id}")
async def get_backlog_item(backlog_id: int):
"""
Gibt Details zu einem einzelnen Backlog-Eintrag zurueck.
Inklusive Fix-Historie.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.get_backlog_item(backlog_id)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
# Hole Fix-Historie
fixes = repo.get_fix_history(backlog_id)
result = item.to_dict()
result["fixes"] = [fix.to_dict() for fix in fixes]
return result
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/status")
async def update_backlog_item_status(backlog_id: int, update: BacklogStatusUpdate):
"""
Aktualisiert den Status eines Backlog-Eintrags.
Moegliche Status:
- open: Noch nicht bearbeitet
- in_progress: Wird gerade bearbeitet
- fixed: Test wurde gefixt
- wont_fix: Wird nicht gefixt (mit Begruendung)
- flaky: Flaky Test, wird separat behandelt
"""
valid_statuses = ["open", "in_progress", "fixed", "wont_fix", "flaky"]
if update.status not in valid_statuses:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Status. Erlaubt: {', '.join(valid_statuses)}"
)
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.update_backlog_status(
backlog_id=backlog_id,
status=update.status,
notes=update.notes,
assigned_to=update.assigned_to
)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
return item.to_dict()
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/priority")
async def update_backlog_item_priority(backlog_id: int, update: BacklogPriorityUpdate):
"""
Aktualisiert die Prioritaet eines Backlog-Eintrags.
Moegliche Prioritaeten:
- critical: Kritisch - sofort beheben
- high: Hoch - bald beheben
- medium: Mittel - bei Gelegenheit
- low: Niedrig - irgendwann
"""
valid_priorities = ["critical", "high", "medium", "low"]
if update.priority not in valid_priorities:
raise HTTPException(
status_code=400,
detail=f"Ungueltige Prioritaet. Erlaubt: {', '.join(valid_priorities)}"
)
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
try:
with get_db_session() as db:
repo = TestRepository(db)
item = repo.update_backlog_priority(backlog_id, update.priority)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
return item.to_dict()
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog/{backlog_id}/fix")
async def add_fix_attempt(backlog_id: int, fix: FixAttempt):
"""
Fuegt einen Fix-Versuch zur Historie hinzu.
Bei success=True wird der Backlog-Status automatisch auf 'fixed' gesetzt.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
valid_fix_types = ["manual", "auto_claude", "auto_script"]
if fix.fix_type not in valid_fix_types:
raise HTTPException(
status_code=400,
detail=f"Ungueltiger Fix-Typ. Erlaubt: {', '.join(valid_fix_types)}"
)
try:
with get_db_session() as db:
repo = TestRepository(db)
# Pruefe ob Backlog-Item existiert
item = repo.get_backlog_item(backlog_id)
if not item:
raise HTTPException(status_code=404, detail=f"Backlog-Item {backlog_id} nicht gefunden")
# Fix-Versuch hinzufuegen
fix_record = repo.add_fix_attempt(
backlog_id=backlog_id,
fix_type=fix.fix_type,
fix_description=fix.fix_description,
commit_hash=fix.commit_hash,
success=fix.success
)
return {
"fix": fix_record.to_dict(),
"backlog_status": "fixed" if fix.success else item.status
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.post("/backlog")
async def create_backlog_entry(entry: ManualBacklogEntry):
"""
Erstellt einen manuellen Backlog-Eintrag.
Nuetzlich fuer:
- Nicht-integrierte Features (xfail Tests)
- Bekannte Probleme die noch behoben werden muessen
- Feature Requests aus dem Test-Kontext
"""
from ...db_models import FailedTestBacklogDB
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
valid_priorities = ["critical", "high", "medium", "low"]
if entry.priority not in valid_priorities:
raise HTTPException(
status_code=400,
detail=f"Ungueltige Prioritaet. Erlaubt: {', '.join(valid_priorities)}"
)
try:
with get_db_session() as db:
now = datetime.utcnow()
# Pruefe ob schon ein offener Eintrag existiert
existing = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.test_name == entry.test_name,
FailedTestBacklogDB.service == entry.service,
FailedTestBacklogDB.status == "open"
).first()
if existing:
# Aktualisiere existierenden Eintrag
existing.error_message = entry.error_message
existing.priority = entry.priority
existing.fix_suggestion = entry.fix_suggestion
existing.last_failed_at = now
db.commit()
return {
"id": existing.id,
"status": "updated",
"message": f"Existierender Backlog-Eintrag aktualisiert"
}
# Neuen Eintrag erstellen
backlog = FailedTestBacklogDB(
test_name=entry.test_name,
test_file=f"{entry.service}/",
service=entry.service,
framework="manual",
error_message=entry.error_message,
error_type="feature_not_integrated",
status="open",
priority=entry.priority,
fix_suggestion=entry.fix_suggestion,
first_failed_at=now,
last_failed_at=now,
failure_count=1
)
db.add(backlog)
db.commit()
db.refresh(backlog)
return {
"id": backlog.id,
"status": "created",
"message": f"Backlog-Eintrag erstellt: {entry.test_name}"
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/history")
async def get_test_history(
service: Optional[str] = Query(None, description="Filter nach Service"),
days: int = Query(30, ge=1, le=365, description="Anzahl Tage zurueck"),
limit: int = Query(100, ge=1, le=1000)
):
"""
Gibt die Test-Run Historie fuer Trend-Analysen zurueck.
Aggregiert Daten nach Tag und Service.
"""
test_runs = get_test_runs()
if not is_postgres_available():
# Fallback auf In-Memory Historie
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit], "source": "memory"}
try:
with get_db_session() as db:
repo = TestRepository(db)
history = repo.get_run_history(
service=service,
days=days,
limit=limit
)
return {
"history": history,
"days": days,
"service": service,
"source": "postgresql"
}
except Exception as e:
# Fallback auf In-Memory
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit], "source": "memory", "error": str(e)}
@router.get("/trends")
async def get_test_trends(
service: Optional[str] = Query(None, description="Filter nach Service"),
days: int = Query(14, ge=1, le=90, description="Anzahl Tage")
):
"""
Gibt Trend-Daten fuer Visualisierungen zurueck.
Zeigt Pass-Rate und Test-Anzahl ueber Zeit.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar fuer Trends")
try:
with get_db_session() as db:
repo = TestRepository(db)
history = repo.get_run_history(service=service, days=days, limit=days * 20)
# Aggregiere nach Tag
by_date = {}
for entry in history:
date = entry["date"]
if date not in by_date:
by_date[date] = {
"date": date,
"total_tests": 0,
"passed": 0,
"failed": 0,
"runs": 0
}
by_date[date]["total_tests"] += entry["total_tests"]
by_date[date]["passed"] += entry["passed"]
by_date[date]["failed"] += entry["failed"]
by_date[date]["runs"] += entry["runs"]
# Berechne Pass-Rate pro Tag
trends = []
for date, data in sorted(by_date.items()):
total = data["total_tests"]
data["pass_rate"] = round((data["passed"] / total * 100) if total > 0 else 0, 1)
trends.append(data)
return {
"trends": trends,
"days": days,
"service": service
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Datenbankfehler: {str(e)}")
@router.get("/stats")
async def get_aggregated_stats():
"""
Gibt aggregierte Statistiken ueber alle Services zurueck.
Kombiniert Daten aus PostgreSQL und Service-Definitionen.
"""
from ...models import TestRegistryStats
persisted_results = get_persisted_results()
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
summary = repo.get_summary_stats()
service_stats = repo.get_all_service_stats()
return {
"summary": summary,
"services": [s.to_dict() for s in service_stats],
"source": "postgresql"
}
except Exception as e:
print(f"PostgreSQL-Fehler: {e}")
# Fallback auf Legacy-Daten
stats = TestRegistryStats()
for service, data in persisted_results.items():
stats.total_tests += data.get("total", 0)
stats.total_passed += data.get("passed", 0)
stats.total_failed += data.get("failed", 0)
stats.services_count = len(persisted_results)
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0
return {
"summary": {
"total_tests": stats.total_tests,
"total_passed": stats.total_passed,
"total_failed": stats.total_failed,
"total_skipped": stats.total_skipped,
"services_count": stats.services_count,
"overall_pass_rate": round(stats.overall_pass_rate, 1)
},
"services": list(persisted_results.keys()),
"source": "memory"
}
@router.post("/migrate")
async def trigger_migration():
"""
Migriert bestehende JSON-Daten nach PostgreSQL.
Einmalig ausfuehren um historische Daten zu uebernehmen.
"""
if not is_postgres_available():
raise HTTPException(status_code=503, detail="PostgreSQL nicht verfuegbar")
count = migrate_json_to_postgres()
return {
"migrated_services": count,
"message": f"{count} Services von JSON nach PostgreSQL migriert"
}

View File

@@ -0,0 +1,295 @@
"""
Test Registry - CI/CD Integration Endpoints
Endpoints for receiving results from CI/CD pipelines.
"""
from datetime import datetime
from typing import Dict
from fastapi import APIRouter, BackgroundTasks
from ...database import get_db_session
from ...repository import TestRepository
from ..api_models import CIResultRequest
from ..config import (
get_test_runs,
get_persisted_results,
is_postgres_available,
)
router = APIRouter()
@router.post("/ci-result")
async def receive_ci_result(result: CIResultRequest, background_tasks: BackgroundTasks):
"""
Empfaengt Test-Ergebnisse von der CI/CD-Pipeline.
Wird vom report-test-results Step in .woodpecker/main.yml aufgerufen.
Flow:
1. Pipeline fuehrt Tests aus und sammelt JSON-Ergebnisse
2. Pipeline sendet detaillierte Ergebnisse pro Service hierher
3. Dieser Endpoint speichert in PostgreSQL
4. Dashboard zeigt die Daten an
test_results Format:
{
"service": "consent-service",
"framework": "go",
"total": 57,
"passed": 57,
"failed": 0,
"skipped": 0,
"coverage": 75.5
}
"""
test_runs = get_test_runs()
persisted_results = get_persisted_results()
# Extrahiere Service-spezifische Daten aus test_results
tr = result.test_results or {}
service_name = tr.get("service", "ci-pipeline")
framework = tr.get("framework", "unknown")
total = tr.get("total", 0)
passed = tr.get("passed", 0)
failed = tr.get("failed", 0)
skipped = tr.get("skipped", 0)
coverage = tr.get("coverage", 0)
# Log zur Debugging
print(f"[CI-RESULT] Pipeline {result.pipeline_id} - Service: {service_name}")
print(f"[CI-RESULT] Tests: {passed}/{total} passed, {failed} failed, {skipped} skipped")
print(f"[CI-RESULT] Coverage: {coverage}%, Commit: {result.commit[:8]}")
# Speichere in PostgreSQL wenn verfuegbar
if is_postgres_available():
try:
with get_db_session() as db:
repo = TestRepository(db)
# Erstelle eindeutige Run-ID pro Service
run_id = f"ci-{result.pipeline_id}-{service_name}"
# Erstelle Test-Run Eintrag
run = repo.create_run(
run_id=run_id,
service=service_name,
framework=framework,
triggered_by="ci",
git_commit=result.commit[:8] if result.commit else None,
git_branch=result.branch
)
# Markiere als abgeschlossen mit detaillierten Zahlen
status = "passed" if failed == 0 else "failed"
repo.complete_run(
run_id=run_id,
status=status,
total_tests=total,
passed_tests=passed,
failed_tests=failed,
skipped_tests=skipped,
duration_seconds=0
)
print(f"[CI-RESULT] Stored as run_id: {run_id}, status: {status}")
# WICHTIG: Aktualisiere den In-Memory Cache fuer sofortige Frontend-Updates
persisted_results[service_name] = {
"total": total,
"passed": passed,
"failed": failed,
"last_run": datetime.utcnow().isoformat(),
"status": status,
"failed_test_ids": []
}
print(f"[CI-RESULT] Updated cache for {service_name}: {passed}/{total} passed")
# Bei fehlgeschlagenen Tests: Backlog-Eintrag erstellen
if failed > 0:
background_tasks.add_task(
_create_backlog_entry,
service_name,
framework,
failed,
result.pipeline_id,
result.commit,
result.branch
)
else:
# Alle Tests bestanden: Schließe offene Backlog-Einträge
background_tasks.add_task(
_close_backlog_entry,
service_name,
result.pipeline_id,
result.commit
)
return {
"received": True,
"run_id": run_id,
"service": service_name,
"pipeline_id": result.pipeline_id,
"status": status,
"tests": {"total": total, "passed": passed, "failed": failed},
"stored_in": "postgres"
}
except Exception as e:
print(f"[CI-RESULT] PostgreSQL Error: {e}")
# Fallback auf Memory-Storage
pass
# Memory-Fallback
ci_run = {
"id": f"ci-{result.pipeline_id}",
"pipeline_id": result.pipeline_id,
"commit": result.commit,
"branch": result.branch,
"status": result.status,
"timestamp": datetime.now().isoformat(),
"test_results": result.test_results
}
test_runs.append(ci_run)
return {
"received": True,
"pipeline_id": result.pipeline_id,
"status": result.status,
"stored_in": "memory"
}
async def _create_backlog_entry(
service_name: str,
framework: str,
failed_count: int,
pipeline_id: str,
commit: str,
branch: str
):
"""
Background-Task: Erstellt Backlog-Eintraege fuer fehlgeschlagene Tests.
Wird asynchron aufgerufen wenn Tests fehlgeschlagen sind.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Creating backlog entry for {service_name}: {failed_count} failed tests")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
# Pruefe ob schon ein offener Backlog-Eintrag fuer diesen Service existiert
existing = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.service == service_name,
FailedTestBacklogDB.status == "open"
).first()
if existing:
# Aktualisiere existierenden Eintrag
existing.last_failed_at = now
existing.failure_count += 1
existing.error_message = f"{failed_count} Tests fehlgeschlagen in Pipeline {pipeline_id} (Branch: {branch})"
db.commit()
print(f"[CI-RESULT] Updated existing backlog entry (ID: {existing.id})")
else:
# Neuen Eintrag erstellen
backlog = FailedTestBacklogDB(
test_name=f"{service_name} Tests",
test_file=f"{service_name}/",
service=service_name,
framework=framework,
error_message=f"{failed_count} Tests fehlgeschlagen in Pipeline {pipeline_id} (Branch: {branch})",
error_type="TEST_FAILURE",
first_failed_at=now,
last_failed_at=now,
failure_count=1,
status="open",
priority="high" if failed_count > 5 else "medium"
)
db.add(backlog)
db.commit()
print(f"[CI-RESULT] Created new backlog entry (ID: {backlog.id})")
except Exception as e:
print(f"[CI-RESULT] Error creating backlog entry: {e}")
async def _close_backlog_entry(
service_name: str,
pipeline_id: str,
commit: str
):
"""
Background-Task: Schließt Backlog-Einträge wenn alle Tests bestanden.
Wird asynchron aufgerufen wenn Tests erfolgreich waren.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Checking for open backlog entries to close for {service_name}")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
# Finde offene Backlog-Einträge für diesen Service
open_entries = db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.service == service_name,
FailedTestBacklogDB.status == "open"
).all()
for entry in open_entries:
entry.status = "resolved"
entry.resolved_at = now
entry.resolution_commit = commit[:8] if commit else None
entry.resolution_notes = f"Automatisch geschlossen - alle Tests in Pipeline {pipeline_id} bestanden"
print(f"[CI-RESULT] Auto-closed backlog entry (ID: {entry.id}) for {service_name}")
if open_entries:
db.commit()
print(f"[CI-RESULT] Closed {len(open_entries)} backlog entries for {service_name}")
else:
print(f"[CI-RESULT] No open backlog entries for {service_name}")
except Exception as e:
print(f"[CI-RESULT] Error closing backlog entries: {e}")
async def _fetch_and_store_failed_tests(pipeline_id: str, commit: str, branch: str):
"""
Legacy Background-Task fuer generische Pipeline-Fehler.
"""
from ...db_models import FailedTestBacklogDB
print(f"[CI-RESULT] Fetching failed test details for pipeline {pipeline_id}")
if is_postgres_available():
try:
with get_db_session() as db:
now = datetime.utcnow()
backlog = FailedTestBacklogDB(
test_name=f"CI Pipeline {pipeline_id}",
test_file=".woodpecker/main.yml",
service="ci-pipeline",
framework="woodpecker",
error_message=f"Pipeline {pipeline_id} fehlgeschlagen auf Branch {branch}",
error_type="CI_FAILURE",
first_failed_at=now,
last_failed_at=now,
failure_count=1,
status="open",
priority="high"
)
db.add(backlog)
db.commit()
print(f"[CI-RESULT] Added pipeline failure to backlog (ID: {backlog.id})")
except Exception as e:
print(f"[CI-RESULT] Error adding to backlog: {e}")

View File

@@ -0,0 +1,335 @@
"""
Test Registry - Test Endpoints
Endpoints for test discovery, running, and monitoring.
"""
from datetime import datetime
from typing import Dict, Any
from fastapi import APIRouter, HTTPException, BackgroundTasks
from ...models import (
TestFramework,
TestRegistryStats,
SERVICE_DEFINITIONS,
)
from ..api_models import TestRunResponse, RegistryResponse
from ..config import (
PROJECT_ROOT,
RUN_MODE,
check_go_available,
check_pytest_available,
get_go_version,
get_pytest_version,
get_test_runs,
get_current_runs,
get_running_tests,
)
from ..discovery import (
build_service_info,
discover_go_tests,
discover_python_tests,
discover_bqas_tests,
)
from ..executors import execute_test_run
router = APIRouter()
@router.get("/registry", response_model=RegistryResponse)
async def get_test_registry():
"""
Gibt alle registrierten Tests zurueck.
Scannt alle Services und aggregiert Test-Informationen.
"""
services = []
stats = TestRegistryStats()
by_category: Dict[str, int] = {}
by_framework: Dict[str, int] = {}
for service_def in SERVICE_DEFINITIONS:
info = build_service_info(service_def)
services.append({
"service": info.service,
"display_name": info.display_name,
"port": info.port,
"language": info.language,
"total_tests": info.total_tests,
"passed_tests": info.passed_tests,
"failed_tests": info.failed_tests,
"skipped_tests": info.skipped_tests,
"pass_rate": round(info.pass_rate, 1),
"coverage_percent": round(info.coverage_percent, 1) if info.coverage_percent else None,
"last_run": info.last_run.isoformat() if info.last_run else None,
"status": info.status.value,
})
stats.total_tests += info.total_tests
stats.total_passed += info.passed_tests
stats.total_failed += info.failed_tests
stats.total_skipped += info.skipped_tests
# Framework-Stats
framework_name = service_def["framework"].value
by_framework[framework_name] = by_framework.get(framework_name, 0) + info.total_tests
# Category basierend auf Framework
if service_def["framework"] in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
by_category["bqas"] = by_category.get("bqas", 0) + info.total_tests
elif service_def["framework"] == TestFramework.PLAYWRIGHT:
by_category["e2e"] = by_category.get("e2e", 0) + info.total_tests
else:
by_category["unit"] = by_category.get("unit", 0) + info.total_tests
stats.services_count = len(services)
stats.overall_pass_rate = (stats.total_passed / stats.total_tests * 100) if stats.total_tests > 0 else 0.0
stats.by_category = by_category
stats.by_framework = by_framework
return RegistryResponse(
services=services,
stats={
"total_tests": stats.total_tests,
"total_passed": stats.total_passed,
"total_failed": stats.total_failed,
"total_skipped": stats.total_skipped,
"overall_pass_rate": round(stats.overall_pass_rate, 1),
"services_count": stats.services_count,
"by_category": stats.by_category,
"by_framework": stats.by_framework,
},
last_updated=datetime.now().isoformat(),
)
@router.get("/registry/{service}")
async def get_service_tests(service: str):
"""
Gibt Tests fuer einen spezifischen Service zurueck.
"""
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == service), None)
if not service_def:
raise HTTPException(status_code=404, detail=f"Service '{service}' nicht gefunden")
info = build_service_info(service_def)
base_path = PROJECT_ROOT / service_def["base_path"].lstrip("/")
framework = service_def["framework"]
# Test-Discovery
if framework == TestFramework.GO_TEST:
tests = discover_go_tests(base_path)
elif framework == TestFramework.PYTEST:
tests = discover_python_tests(base_path)
elif framework in [TestFramework.BQAS_GOLDEN, TestFramework.BQAS_RAG]:
test_type = "golden" if framework == TestFramework.BQAS_GOLDEN else "rag"
tests = discover_bqas_tests(base_path, test_type)
else:
tests = []
return {
"service": info.service,
"display_name": info.display_name,
"port": info.port,
"language": info.language,
"total_tests": len(tests),
"passed_tests": info.passed_tests,
"failed_tests": info.failed_tests,
"coverage_percent": info.coverage_percent,
"tests": [
{
"id": t.id,
"name": t.name,
"file_path": t.file_path,
"line_number": t.line_number,
"framework": t.framework.value,
"status": t.status.value,
}
for t in tests
],
}
@router.post("/run/{suite}", response_model=TestRunResponse)
async def run_test_suite(suite: str, background_tasks: BackgroundTasks):
"""
Startet einen Test-Run fuer eine Suite.
Fuehrt Tests im Hintergrund aus.
"""
service_def = next((s for s in SERVICE_DEFINITIONS if s["service"] == suite), None)
if not service_def:
raise HTTPException(status_code=404, detail=f"Suite '{suite}' nicht gefunden")
run_id = f"run_{suite}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
# Background Task starten
background_tasks.add_task(execute_test_run, run_id, service_def)
return TestRunResponse(
run_id=run_id,
status="queued",
message=f"Test-Run fuer {service_def['display_name']} gestartet",
)
@router.get("/runs")
async def get_test_runs_list(limit: int = 20):
"""
Gibt die Test-Run Historie zurueck.
"""
test_runs = get_test_runs()
# Sortiert nach Startzeit, neueste zuerst
sorted_runs = sorted(test_runs, key=lambda r: r["started_at"], reverse=True)
return {"runs": sorted_runs[:limit]}
@router.get("/progress/{service_id}")
async def get_test_progress(service_id: str):
"""
Gibt den Fortschritt eines laufenden Tests zurueck.
Wird vom Frontend gepollt um Live-Updates anzuzeigen.
"""
running_tests = get_running_tests()
if service_id in running_tests:
return running_tests[service_id]
# Kein laufender Test - Standard-Antwort
return {
"current_file": "",
"files_done": 0,
"files_total": 0,
"passed": 0,
"failed": 0,
"status": "idle"
}
@router.get("/progress")
async def get_all_progress():
"""
Gibt den Fortschritt aller laufenden Tests zurueck.
"""
return get_running_tests()
@router.get("/runs/{run_id}")
async def get_test_run(run_id: str):
"""
Gibt Details zu einem spezifischen Test-Run zurueck.
"""
current_runs = get_current_runs()
test_runs = get_test_runs()
if run_id in current_runs:
run = current_runs[run_id]
return {
"id": run.id,
"suite_id": run.suite_id,
"service": run.service,
"started_at": run.started_at.isoformat(),
"completed_at": run.completed_at.isoformat() if run.completed_at else None,
"status": run.status.value,
"total_tests": run.total_tests,
"passed_tests": run.passed_tests,
"failed_tests": run.failed_tests,
"duration_seconds": run.duration_seconds,
"output": run.output,
}
# In Historie suchen
for run in test_runs:
if run["id"] == run_id:
return run
raise HTTPException(status_code=404, detail=f"Run '{run_id}' nicht gefunden")
@router.get("/coverage")
async def get_coverage():
"""
Gibt aggregierte Coverage-Informationen zurueck.
"""
coverage_data = []
total_coverage = 0.0
count = 0
for service_def in SERVICE_DEFINITIONS:
info = build_service_info(service_def)
if info.coverage_percent:
coverage_data.append({
"service": info.service,
"display_name": info.display_name,
"coverage_percent": round(info.coverage_percent, 1),
"language": info.language,
})
total_coverage += info.coverage_percent
count += 1
return {
"services": coverage_data,
"average_coverage": round(total_coverage / count, 1) if count > 0 else 0,
"total_services": count,
}
@router.get("/health")
async def get_test_health():
"""
Gibt den Status der Test-Infrastruktur zurueck.
"""
go_available = check_go_available()
pytest_available = check_pytest_available()
return {
"status": "healthy",
"mode": RUN_MODE, # "docker", "local", oder "demo"
"services_monitored": len(SERVICE_DEFINITIONS),
"project_root": str(PROJECT_ROOT),
"project_root_exists": PROJECT_ROOT.exists(),
"timestamp": datetime.now().isoformat(),
"runners": {
"go_test": "available" if go_available else "not_installed",
"pytest": "available" if pytest_available else "not_installed",
"jest": "available", # TODO: check Node.js
"playwright": "available", # TODO: check Playwright
"bqas": "available", # BQAS hat seinen eigenen Service
},
"versions": {
"go": get_go_version() if go_available else None,
"pytest": get_pytest_version() if pytest_available else None,
},
}
@router.get("/db-status")
async def get_db_status():
"""
Gibt den Status der PostgreSQL-Datenbankverbindung zurueck.
Wird vom Dashboard ServiceStatus verwendet.
"""
import time
from ...database import check_db_connection, DATABASE_URL
start_time = time.time()
is_connected = check_db_connection()
response_time = int((time.time() - start_time) * 1000)
# Parse host from DATABASE_URL (hide password)
try:
# postgresql://user:pass@host:port/db -> host:port
url_parts = DATABASE_URL.split("@")
if len(url_parts) > 1:
host_part = url_parts[1].split("/")[0]
else:
host_part = "unknown"
except:
host_part = "unknown"
return {
"status": "online" if is_connected else "offline",
"host": host_part,
"response_time_ms": response_time,
"timestamp": datetime.now().isoformat(),
}

View File

@@ -0,0 +1,23 @@
"""
Test Registry Services
Business logic and helper services.
"""
from .error_handling import (
extract_go_error,
classify_go_error,
suggest_go_fix,
extract_pytest_error,
classify_pytest_error,
suggest_pytest_fix,
)
__all__ = [
"extract_go_error",
"classify_go_error",
"suggest_go_fix",
"extract_pytest_error",
"classify_pytest_error",
"suggest_pytest_fix",
]

View File

@@ -0,0 +1,137 @@
"""
Error Analysis and Classification Helpers
Provides error extraction, classification, and fix suggestions for Go and Python tests.
"""
import re
from typing import Optional
# ==============================================================================
# Go Error Helpers
# ==============================================================================
def extract_go_error(output: str) -> str:
"""Extrahiert die Fehlermeldung aus Go-Test-Output"""
if not output:
return ""
lines = output.strip().split("\n")
error_lines = []
for line in lines:
# Typische Go-Fehlermuster
if "Error:" in line or "FAIL" in line or "panic:" in line:
error_lines.append(line.strip())
elif line.strip().startswith("---"):
continue
elif "expected" in line.lower() or "got" in line.lower():
error_lines.append(line.strip())
elif ".go:" in line:
error_lines.append(line.strip())
return " | ".join(error_lines[:3]) if error_lines else output[:200]
def classify_go_error(error_msg: str) -> str:
"""Klassifiziert einen Go-Fehler"""
if not error_msg:
return "unknown"
error_lower = error_msg.lower()
if "nil pointer" in error_lower or "panic" in error_lower:
return "nil_pointer"
elif "expected" in error_lower and "got" in error_lower:
return "assertion"
elif "timeout" in error_lower:
return "timeout"
elif "connection" in error_lower or "dial" in error_lower:
return "network"
elif "not found" in error_lower or "does not exist" in error_lower:
return "not_found"
elif "permission" in error_lower or "unauthorized" in error_lower:
return "permission"
return "logic_error"
def suggest_go_fix(error_msg: str) -> str:
"""Gibt einen Loesungsvorschlag fuer Go-Fehler"""
error_type = classify_go_error(error_msg)
suggestions = {
"nil_pointer": "Pruefe ob alle Pointer initialisiert sind. Fuege nil-Checks hinzu.",
"assertion": "Vergleiche die erwarteten mit den tatsaechlichen Werten. Pruefe die Test-Eingabedaten.",
"timeout": "Erhoehe das Timeout oder optimiere die Funktion. Pruefe Netzwerkverbindungen.",
"network": "Pruefe ob der Service erreichbar ist. Stelle sicher dass Mocks korrekt konfiguriert sind.",
"not_found": "Pruefe ob die erwarteten Ressourcen existieren. Aktualisiere Test-Fixtures.",
"permission": "Pruefe Berechtigungen und Auth-Token im Test-Setup.",
"logic_error": "Pruefe die Geschaeftslogik und die Test-Annahmen.",
"unknown": "Analysiere den Stack-Trace fuer mehr Details.",
}
return suggestions.get(error_type, suggestions["unknown"])
# ==============================================================================
# Python Error Helpers
# ==============================================================================
def extract_pytest_error(output: str, test_name: str) -> str:
"""Extrahiert die Fehlermeldung aus pytest-Output"""
if not output:
return ""
# Suche nach dem Fehler-Block fuer diesen Test
pattern = rf'FAILED.*{re.escape(test_name)}.*?\n(.*?)(?=FAILED|PASSED|====|$)'
match = re.search(pattern, output, re.DOTALL)
if match:
error_block = match.group(1)
# Extrahiere die relevanten Zeilen
lines = [l.strip() for l in error_block.split("\n") if l.strip()]
# Suche nach AssertionError oder Exception
for i, line in enumerate(lines):
if "AssertionError" in line or "Error" in line or "Exception" in line:
return " | ".join(lines[max(0, i-1):min(len(lines), i+3)])
return ""
def classify_pytest_error(error_msg: str) -> str:
"""Klassifiziert einen Python-Fehler"""
if not error_msg:
return "unknown"
if "AssertionError" in error_msg:
return "assertion"
elif "TypeError" in error_msg:
return "type_error"
elif "AttributeError" in error_msg:
return "attribute_error"
elif "KeyError" in error_msg:
return "key_error"
elif "ValueError" in error_msg:
return "value_error"
elif "ImportError" in error_msg or "ModuleNotFoundError" in error_msg:
return "import_error"
elif "ConnectionError" in error_msg or "timeout" in error_msg.lower():
return "network"
return "logic_error"
def suggest_pytest_fix(error_msg: str) -> str:
"""Gibt einen Loesungsvorschlag fuer Python-Fehler"""
error_type = classify_pytest_error(error_msg)
suggestions = {
"assertion": "Pruefe die erwarteten vs. tatsaechlichen Werte. Sind die Test-Daten aktuell?",
"type_error": "Pruefe die Typen der uebergebenen Argumente. Evtl. fehlt eine Typkonvertierung.",
"attribute_error": "Das Objekt hat dieses Attribut nicht. Pruefe die Initialisierung.",
"key_error": "Der Schluessel existiert nicht im Dict. Pruefe die Test-Daten.",
"value_error": "Ungueltiger Wert uebergeben. Pruefe die Eingabeparameter.",
"import_error": "Modul nicht gefunden. Pruefe die Abhaengigkeiten und den Pfad.",
"network": "Netzwerkfehler. Sind alle Services gestartet? Sind Mocks konfiguriert?",
"logic_error": "Logikfehler. Pruefe die Geschaeftslogik und Test-Annahmen.",
"unknown": "Analysiere den Stack-Trace fuer mehr Details.",
}
return suggestions.get(error_type, suggestions["unknown"])

View File

@@ -0,0 +1,500 @@
"""
Repository fuer Test Registry Datenbank-Operationen.
Abstrahiert alle DB-Zugriffe fuer:
- Test-Runs speichern und abrufen
- Test-Ergebnisse verwalten
- Backlog-Items verwalten
- Service-Statistiken aktualisieren
"""
from datetime import datetime
from typing import List, Optional, Dict, Any
from sqlalchemy.orm import Session
from sqlalchemy import func, desc
from .db_models import (
TestRunDB,
TestResultDB,
FailedTestBacklogDB,
TestFixHistoryDB,
TestServiceStatsDB
)
class TestRepository:
"""Repository fuer Test-bezogene Datenbank-Operationen."""
def __init__(self, db: Session):
self.db = db
# ========================================
# Test Runs
# ========================================
def create_run(
self,
run_id: str,
service: str,
framework: str,
triggered_by: str = "manual",
git_commit: Optional[str] = None,
git_branch: Optional[str] = None
) -> TestRunDB:
"""Erstellt einen neuen Test-Run."""
run = TestRunDB(
run_id=run_id,
service=service,
framework=framework,
started_at=datetime.utcnow(),
status="running",
triggered_by=triggered_by,
git_commit=git_commit,
git_branch=git_branch
)
self.db.add(run)
self.db.commit()
self.db.refresh(run)
return run
def complete_run(
self,
run_id: str,
status: str,
total_tests: int,
passed_tests: int,
failed_tests: int,
skipped_tests: int = 0,
duration_seconds: float = 0,
output: Optional[str] = None
) -> Optional[TestRunDB]:
"""Markiert einen Run als abgeschlossen und aktualisiert Statistiken."""
run = self.db.query(TestRunDB).filter(TestRunDB.run_id == run_id).first()
if run:
run.completed_at = datetime.utcnow()
run.status = status
run.total_tests = total_tests
run.passed_tests = passed_tests
run.failed_tests = failed_tests
run.skipped_tests = skipped_tests
run.duration_seconds = duration_seconds
run.output = output[:10000] if output else None # Truncate output
self.db.commit()
self.db.refresh(run)
# Aktualisiere Service-Statistiken
self._update_service_stats(run)
return run
def get_run(self, run_id: str) -> Optional[TestRunDB]:
"""Holt einen Run anhand der ID."""
return self.db.query(TestRunDB).filter(TestRunDB.run_id == run_id).first()
def get_runs(
self,
service: Optional[str] = None,
limit: int = 50,
offset: int = 0
) -> List[TestRunDB]:
"""Holt Test-Runs mit optionalem Service-Filter."""
query = self.db.query(TestRunDB)
if service:
query = query.filter(TestRunDB.service == service)
return query.order_by(desc(TestRunDB.started_at)).offset(offset).limit(limit).all()
def get_runs_count(self, service: Optional[str] = None) -> int:
"""Zaehlt Test-Runs."""
query = self.db.query(func.count(TestRunDB.id))
if service:
query = query.filter(TestRunDB.service == service)
return query.scalar() or 0
# ========================================
# Test Results
# ========================================
def add_results(self, run_id: str, results: List[Dict[str, Any]]) -> int:
"""Fuegt mehrere Test-Ergebnisse hinzu."""
count = 0
for result in results:
db_result = TestResultDB(
run_id=run_id,
test_name=result.get("name") or result.get("test_name", "unknown"),
test_file=result.get("file_path") or result.get("test_file"),
line_number=result.get("line_number"),
status=result.get("status", "unknown"),
duration_ms=result.get("duration_ms"),
error_message=result.get("error_message"),
error_type=result.get("error_type"),
output=result.get("output")
)
self.db.add(db_result)
count += 1
# Bei fehlgeschlagenen Tests: Backlog aktualisieren
if result.get("status") in ["failed", "error"]:
self._update_backlog(
run_id=run_id,
test_name=result.get("name") or result.get("test_name", "unknown"),
test_file=result.get("file_path") or result.get("test_file"),
error_message=result.get("error_message"),
error_type=result.get("error_type"),
suggestion=result.get("suggestion")
)
self.db.commit()
return count
def get_results(self, run_id: str) -> List[TestResultDB]:
"""Holt alle Ergebnisse eines Runs."""
return self.db.query(TestResultDB).filter(TestResultDB.run_id == run_id).all()
def get_failed_results(self, run_id: str) -> List[TestResultDB]:
"""Holt nur fehlgeschlagene Ergebnisse eines Runs."""
return self.db.query(TestResultDB).filter(
TestResultDB.run_id == run_id,
TestResultDB.status.in_(["failed", "error"])
).all()
# ========================================
# Backlog
# ========================================
def _update_backlog(
self,
run_id: str,
test_name: str,
test_file: Optional[str],
error_message: Optional[str],
error_type: Optional[str],
suggestion: Optional[str] = None
):
"""Aktualisiert oder erstellt einen Backlog-Eintrag fuer einen fehlgeschlagenen Test."""
# Hole den Run um Service und Framework zu bekommen
run = self.db.query(TestRunDB).filter(TestRunDB.run_id == run_id).first()
if not run:
return
# Suche nach existierendem Backlog-Eintrag
backlog = self.db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.test_name == test_name,
FailedTestBacklogDB.service == run.service
).first()
now = datetime.utcnow()
if backlog:
# Existiert bereits - aktualisiere
backlog.last_failed_at = now
backlog.failure_count += 1
backlog.error_message = error_message or backlog.error_message
backlog.error_type = error_type or backlog.error_type
if suggestion:
backlog.fix_suggestion = suggestion
# Reset status wenn es wieder fehlschlaegt
if backlog.status == "fixed":
backlog.status = "open"
backlog.notes = f"Erneut fehlgeschlagen nach Fix am {now.isoformat()}"
else:
# Neu erstellen
backlog = FailedTestBacklogDB(
test_name=test_name,
test_file=test_file,
service=run.service,
framework=run.framework,
error_message=error_message,
error_type=error_type,
first_failed_at=now,
last_failed_at=now,
failure_count=1,
status="open",
priority=self._calculate_priority(error_type),
fix_suggestion=suggestion
)
self.db.add(backlog)
def _calculate_priority(self, error_type: Optional[str]) -> str:
"""Berechnet Prioritaet basierend auf Fehlertyp."""
high_priority = ["nil_pointer", "panic", "security", "critical"]
medium_priority = ["assertion", "type_error", "value_error"]
if error_type:
if any(p in error_type.lower() for p in high_priority):
return "high"
if any(p in error_type.lower() for p in medium_priority):
return "medium"
return "medium"
def get_backlog(
self,
status: Optional[str] = None,
service: Optional[str] = None,
priority: Optional[str] = None,
limit: int = 100,
offset: int = 0
) -> List[FailedTestBacklogDB]:
"""Holt Backlog-Eintraege mit optionalen Filtern."""
query = self.db.query(FailedTestBacklogDB)
if status:
query = query.filter(FailedTestBacklogDB.status == status)
if service:
query = query.filter(FailedTestBacklogDB.service == service)
if priority:
query = query.filter(FailedTestBacklogDB.priority == priority)
return query.order_by(
desc(FailedTestBacklogDB.failure_count),
desc(FailedTestBacklogDB.last_failed_at)
).offset(offset).limit(limit).all()
def get_backlog_count(
self,
status: Optional[str] = None,
service: Optional[str] = None
) -> int:
"""Zaehlt Backlog-Eintraege."""
query = self.db.query(func.count(FailedTestBacklogDB.id))
if status:
query = query.filter(FailedTestBacklogDB.status == status)
if service:
query = query.filter(FailedTestBacklogDB.service == service)
return query.scalar() or 0
def get_backlog_item(self, backlog_id: int) -> Optional[FailedTestBacklogDB]:
"""Holt einen einzelnen Backlog-Eintrag."""
return self.db.query(FailedTestBacklogDB).filter(FailedTestBacklogDB.id == backlog_id).first()
def update_backlog_status(
self,
backlog_id: int,
status: str,
notes: Optional[str] = None,
assigned_to: Optional[str] = None
) -> Optional[FailedTestBacklogDB]:
"""Aktualisiert den Status eines Backlog-Eintrags."""
backlog = self.get_backlog_item(backlog_id)
if backlog:
backlog.status = status
if notes:
backlog.notes = notes
if assigned_to:
backlog.assigned_to = assigned_to
self.db.commit()
self.db.refresh(backlog)
return backlog
def update_backlog_priority(self, backlog_id: int, priority: str) -> Optional[FailedTestBacklogDB]:
"""Aktualisiert die Prioritaet eines Backlog-Eintrags."""
backlog = self.get_backlog_item(backlog_id)
if backlog:
backlog.priority = priority
self.db.commit()
self.db.refresh(backlog)
return backlog
# ========================================
# Fix History
# ========================================
def add_fix_attempt(
self,
backlog_id: int,
fix_type: str,
fix_description: str,
commit_hash: Optional[str] = None,
success: bool = False
) -> TestFixHistoryDB:
"""Fuegt einen Fix-Versuch zur Historie hinzu."""
fix = TestFixHistoryDB(
backlog_id=backlog_id,
fix_type=fix_type,
fix_description=fix_description,
commit_hash=commit_hash,
success=success
)
self.db.add(fix)
# Bei Erfolg: Backlog-Status aktualisieren
if success:
backlog = self.get_backlog_item(backlog_id)
if backlog:
backlog.status = "fixed"
self.db.commit()
self.db.refresh(fix)
return fix
def get_fix_history(self, backlog_id: int) -> List[TestFixHistoryDB]:
"""Holt die Fix-Historie fuer einen Backlog-Eintrag."""
return self.db.query(TestFixHistoryDB).filter(
TestFixHistoryDB.backlog_id == backlog_id
).order_by(desc(TestFixHistoryDB.created_at)).all()
# ========================================
# Service Statistics
# ========================================
def _update_service_stats(self, run: TestRunDB):
"""Aktualisiert die Service-Statistiken nach einem Run."""
stats = self.db.query(TestServiceStatsDB).filter(
TestServiceStatsDB.service == run.service
).first()
if not stats:
stats = TestServiceStatsDB(service=run.service)
self.db.add(stats)
stats.total_tests = run.total_tests
stats.passed_tests = run.passed_tests
stats.failed_tests = run.failed_tests
stats.skipped_tests = run.skipped_tests
stats.pass_rate = (run.passed_tests / run.total_tests * 100) if run.total_tests > 0 else 0.0
stats.last_run_id = run.run_id
stats.last_run_at = run.completed_at or datetime.utcnow()
stats.last_status = run.status
self.db.commit()
def get_service_stats(self, service: str) -> Optional[TestServiceStatsDB]:
"""Holt Statistiken fuer einen Service."""
return self.db.query(TestServiceStatsDB).filter(
TestServiceStatsDB.service == service
).first()
def get_all_service_stats(self) -> List[TestServiceStatsDB]:
"""Holt Statistiken fuer alle Services."""
return self.db.query(TestServiceStatsDB).all()
# ========================================
# History & Trends
# ========================================
def get_run_history(
self,
service: Optional[str] = None,
days: int = 30,
limit: int = 100
) -> List[Dict[str, Any]]:
"""
Holt die Run-Historie fuer Trend-Analysen.
Gruppiert nach Tag.
"""
from datetime import timedelta
cutoff = datetime.utcnow() - timedelta(days=days)
query = self.db.query(
func.date(TestRunDB.started_at).label('date'),
TestRunDB.service,
func.count(TestRunDB.id).label('runs'),
func.sum(TestRunDB.total_tests).label('total_tests'),
func.sum(TestRunDB.passed_tests).label('passed'),
func.sum(TestRunDB.failed_tests).label('failed')
).filter(TestRunDB.started_at >= cutoff)
if service:
query = query.filter(TestRunDB.service == service)
results = query.group_by(
func.date(TestRunDB.started_at),
TestRunDB.service
).order_by(desc(func.date(TestRunDB.started_at))).limit(limit).all()
return [
{
"date": str(r.date),
"service": r.service,
"runs": r.runs,
"total_tests": r.total_tests or 0,
"passed": r.passed or 0,
"failed": r.failed or 0,
"pass_rate": round((r.passed / r.total_tests * 100) if r.total_tests else 0, 1)
}
for r in results
]
def get_summary_stats(self) -> Dict[str, Any]:
"""Holt aggregierte Statistiken ueber alle Services."""
stats = self.db.query(
func.sum(TestServiceStatsDB.total_tests).label('total_tests'),
func.sum(TestServiceStatsDB.passed_tests).label('passed'),
func.sum(TestServiceStatsDB.failed_tests).label('failed'),
func.sum(TestServiceStatsDB.skipped_tests).label('skipped'),
func.count(TestServiceStatsDB.id).label('services_count')
).first()
total = stats.total_tests or 0
passed = stats.passed or 0
return {
"total_tests": total,
"total_passed": passed,
"total_failed": stats.failed or 0,
"total_skipped": stats.skipped or 0,
"services_count": stats.services_count or 0,
"overall_pass_rate": round((passed / total * 100) if total > 0 else 0, 1)
}
# ========================================
# Migration Helper
# ========================================
def migrate_from_json(self, persisted_results: Dict[str, Dict]) -> int:
"""
Migriert bestehende JSON-Daten in die Datenbank.
Wird einmalig beim Upgrade ausgefuehrt.
"""
count = 0
for service, data in persisted_results.items():
# Service-Stats aktualisieren
stats = self.db.query(TestServiceStatsDB).filter(
TestServiceStatsDB.service == service
).first()
if not stats:
stats = TestServiceStatsDB(service=service)
self.db.add(stats)
stats.total_tests = data.get("total", 0)
stats.passed_tests = data.get("passed", 0)
stats.failed_tests = data.get("failed", 0)
stats.pass_rate = (stats.passed_tests / stats.total_tests * 100) if stats.total_tests > 0 else 0.0
last_run = data.get("last_run")
if last_run:
try:
stats.last_run_at = datetime.fromisoformat(last_run)
except:
stats.last_run_at = datetime.utcnow()
stats.last_status = data.get("status", "unknown")
# Fehlgeschlagene Tests ins Backlog
for failed in data.get("failed_test_ids", []):
if isinstance(failed, dict):
test_name = failed.get("id") or failed.get("name", "unknown")
existing = self.db.query(FailedTestBacklogDB).filter(
FailedTestBacklogDB.test_name == test_name,
FailedTestBacklogDB.service == service
).first()
if not existing:
backlog = FailedTestBacklogDB(
test_name=test_name,
test_file=failed.get("file_path"),
service=service,
error_message=failed.get("error_message"),
error_type=failed.get("error_type"),
first_failed_at=stats.last_run_at or datetime.utcnow(),
last_failed_at=stats.last_run_at or datetime.utcnow(),
failure_count=1,
status="open",
priority=self._calculate_priority(failed.get("error_type")),
fix_suggestion=failed.get("suggestion")
)
self.db.add(backlog)
count += 1
self.db.commit()
return count

View File

@@ -0,0 +1,11 @@
"""
Test Runners
Spezialisierte Runner fuer verschiedene Test-Frameworks.
"""
from .go_runner import GoTestRunner
from .python_runner import PytestRunner
from .bqas_runner import BQASRunner
__all__ = ["GoTestRunner", "PytestRunner", "BQASRunner"]

View File

@@ -0,0 +1,285 @@
"""
BQAS Test Runner
Proxy zu den BQAS-Endpoints im Voice-Service.
"""
import httpx
from datetime import datetime
from typing import Dict, Optional
from dataclasses import dataclass, field
@dataclass
class BQASResult:
"""Ergebnis eines BQAS-Test-Runs"""
suite_type: str # "golden", "rag", "synthetic"
total_tests: int = 0
passed_tests: int = 0
failed_tests: int = 0
avg_score: float = 0.0
duration_seconds: float = 0.0
metrics: Dict = field(default_factory=dict)
failed_test_ids: list = field(default_factory=list)
raw_output: str = ""
class BQASRunner:
"""
Runner fuer BQAS-Tests.
Leitet Anfragen an den Voice-Service weiter (Port 8091).
"""
VOICE_SERVICE_URL = "http://localhost:8091"
def __init__(self, api_base: Optional[str] = None):
self.api_base = api_base or self.VOICE_SERVICE_URL
async def run_golden(self, timeout: int = 120) -> BQASResult:
"""
Fuehrt die Golden Test Suite aus.
Returns:
BQASResult mit allen Metriken
"""
return await self._run_suite("golden", timeout)
async def run_rag(self, timeout: int = 120) -> BQASResult:
"""
Fuehrt die RAG Test Suite aus.
Returns:
BQASResult mit allen Metriken
"""
return await self._run_suite("rag", timeout)
async def run_synthetic(self, timeout: int = 300) -> BQASResult:
"""
Fuehrt die Synthetic Test Suite aus.
Dauert laenger wegen LLM-Generierung.
Returns:
BQASResult mit allen Metriken
"""
return await self._run_suite("synthetic", timeout)
async def _run_suite(self, suite_type: str, timeout: int) -> BQASResult:
"""Interne Methode zum Ausfuehren einer Suite"""
start_time = datetime.now()
try:
async with httpx.AsyncClient(timeout=float(timeout)) as client:
response = await client.post(
f"{self.api_base}/api/v1/bqas/run/{suite_type}",
)
if response.status_code == 200:
data = response.json()
metrics = data.get("metrics", {})
return BQASResult(
suite_type=suite_type,
total_tests=metrics.get("total_tests", 0),
passed_tests=metrics.get("passed_tests", 0),
failed_tests=metrics.get("failed_tests", 0),
avg_score=metrics.get("avg_composite_score", 0.0),
duration_seconds=(datetime.now() - start_time).total_seconds(),
metrics=metrics,
failed_test_ids=metrics.get("failed_test_ids", []),
raw_output=str(data),
)
else:
return BQASResult(
suite_type=suite_type,
raw_output=f"HTTP {response.status_code}: {response.text}",
)
except httpx.TimeoutException:
return BQASResult(
suite_type=suite_type,
duration_seconds=(datetime.now() - start_time).total_seconds(),
raw_output=f"Timeout nach {timeout} Sekunden",
)
except httpx.ConnectError:
# Demo-Daten wenn Service nicht erreichbar
return self._get_demo_result(suite_type)
except Exception as e:
return BQASResult(
suite_type=suite_type,
duration_seconds=(datetime.now() - start_time).total_seconds(),
raw_output=str(e),
)
def _get_demo_result(self, suite_type: str) -> BQASResult:
"""Gibt Demo-Daten zurueck wenn Service nicht erreichbar"""
if suite_type == "golden":
return BQASResult(
suite_type=suite_type,
total_tests=97,
passed_tests=89,
failed_tests=8,
avg_score=4.15,
duration_seconds=45.2,
metrics={
"avg_intent_accuracy": 91.7,
"avg_faithfulness": 4.2,
"avg_relevance": 4.1,
"avg_coherence": 4.3,
"safety_pass_rate": 0.98,
},
failed_test_ids=["GT-023", "GT-045", "GT-067", "GT-072", "GT-081", "GT-089", "GT-092", "GT-095"],
raw_output="Demo-Modus: Voice-Service nicht erreichbar",
)
elif suite_type == "rag":
return BQASResult(
suite_type=suite_type,
total_tests=20,
passed_tests=18,
failed_tests=2,
avg_score=4.25,
duration_seconds=62.1,
metrics={
"avg_faithfulness": 4.3,
"avg_relevance": 4.2,
"citation_accuracy": 0.92,
},
failed_test_ids=["RAG-EH-003", "RAG-HAL-002"],
raw_output="Demo-Modus: Voice-Service nicht erreichbar",
)
else: # synthetic
return BQASResult(
suite_type=suite_type,
total_tests=50,
passed_tests=45,
failed_tests=5,
avg_score=3.95,
duration_seconds=180.5,
metrics={
"avg_robustness": 3.8,
"avg_coherence": 4.1,
},
failed_test_ids=["SYN-001", "SYN-015", "SYN-023", "SYN-041", "SYN-048"],
raw_output="Demo-Modus: Voice-Service nicht erreichbar",
)
async def get_latest_metrics(self) -> Optional[Dict]:
"""
Holt die neuesten Metriken aus dem Voice-Service.
Returns:
Dict mit allen Metriken oder None
"""
try:
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.get(
f"{self.api_base}/api/v1/bqas/latest-metrics",
)
if response.status_code == 200:
return response.json()
except Exception:
pass
# Demo-Daten
return {
"golden": {
"total_tests": 97,
"passed_tests": 89,
"failed_tests": 8,
"avg_composite_score": 4.15,
"last_run": datetime.now().isoformat(),
},
"rag": {
"total_tests": 20,
"passed_tests": 18,
"failed_tests": 2,
"avg_composite_score": 4.25,
"last_run": datetime.now().isoformat(),
},
"synthetic": None,
}
async def get_trend(self, days: int = 30) -> Optional[Dict]:
"""
Holt Trend-Daten.
Args:
days: Anzahl der Tage
Returns:
Dict mit Trend-Daten oder None
"""
try:
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.get(
f"{self.api_base}/api/v1/bqas/trend",
params={"days": days},
)
if response.status_code == 200:
return response.json()
except Exception:
pass
# Demo-Daten
return {
"dates": ["2026-01-02", "2026-01-09", "2026-01-16", "2026-01-23", "2026-01-30"],
"scores": [3.9, 4.0, 4.1, 4.15, 4.15],
"trend": "improving",
}
async def get_runs(self, limit: int = 20) -> list:
"""
Holt die letzten Test-Runs.
Args:
limit: Maximale Anzahl
Returns:
Liste von Test-Runs
"""
try:
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.get(
f"{self.api_base}/api/v1/bqas/runs",
params={"limit": limit},
)
if response.status_code == 200:
data = response.json()
return data.get("runs", [])
except Exception:
pass
# Demo-Daten
return [
{
"id": 1,
"timestamp": "2026-01-30T07:00:00Z",
"git_commit": "abc1234",
"golden_score": 4.15,
"total_tests": 97,
"passed_tests": 89,
"failed_tests": 8,
"duration_seconds": 45.2,
},
{
"id": 2,
"timestamp": "2026-01-29T07:00:00Z",
"git_commit": "def5678",
"golden_score": 4.12,
"total_tests": 97,
"passed_tests": 88,
"failed_tests": 9,
"duration_seconds": 44.8,
},
]

View File

@@ -0,0 +1,229 @@
"""
Go Test Runner
Fuehrt Go-Tests aus und parsed die Ergebnisse.
"""
import subprocess
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass, field
@dataclass
class GoTestResult:
"""Ergebnis eines einzelnen Go-Tests"""
package: str
test_name: str
passed: bool
duration_seconds: float
output: str = ""
@dataclass
class GoTestSummary:
"""Zusammenfassung eines Go-Test-Runs"""
total: int = 0
passed: int = 0
failed: int = 0
skipped: int = 0
duration_seconds: float = 0.0
coverage_percent: Optional[float] = None
results: List[GoTestResult] = field(default_factory=list)
raw_output: str = ""
class GoTestRunner:
"""
Runner fuer Go-Tests.
Verwendet `go test -json` fuer strukturierte Ausgabe.
"""
def __init__(self, base_path: Path):
self.base_path = base_path
async def run(self, with_coverage: bool = True, timeout: int = 300) -> GoTestSummary:
"""
Fuehrt Go-Tests aus.
Args:
with_coverage: Coverage erfassen
timeout: Timeout in Sekunden
Returns:
GoTestSummary mit allen Ergebnissen
"""
if not self.base_path.exists():
return GoTestSummary(raw_output="Pfad existiert nicht")
cmd = ["go", "test", "-v", "-json"]
if with_coverage:
cmd.extend(["-cover", "-coverprofile=coverage.out"])
cmd.append("./...")
try:
result = subprocess.run(
cmd,
cwd=str(self.base_path),
capture_output=True,
text=True,
timeout=timeout,
)
return self._parse_output(result.stdout, result.stderr)
except subprocess.TimeoutExpired:
return GoTestSummary(raw_output=f"Timeout nach {timeout} Sekunden")
except FileNotFoundError:
return GoTestSummary(raw_output="Go nicht installiert")
except Exception as e:
return GoTestSummary(raw_output=str(e))
def _parse_output(self, stdout: str, stderr: str) -> GoTestSummary:
"""Parsed die JSON-Ausgabe von go test"""
summary = GoTestSummary(raw_output=stdout[:10000] if stdout else stderr[:10000])
current_test: Dict[str, str] = {}
test_outputs: Dict[str, List[str]] = {}
for line in stdout.split("\n"):
if not line.strip():
continue
try:
event = json.loads(line)
action = event.get("Action")
package = event.get("Package", "")
test = event.get("Test", "")
elapsed = event.get("Elapsed", 0)
output = event.get("Output", "")
# Test-Output sammeln
if test and output:
key = f"{package}:{test}"
if key not in test_outputs:
test_outputs[key] = []
test_outputs[key].append(output)
# Test-Ergebnis
if action == "pass" and test:
summary.passed += 1
summary.total += 1
summary.results.append(GoTestResult(
package=package,
test_name=test,
passed=True,
duration_seconds=elapsed,
output="".join(test_outputs.get(f"{package}:{test}", [])),
))
elif action == "fail" and test:
summary.failed += 1
summary.total += 1
summary.results.append(GoTestResult(
package=package,
test_name=test,
passed=False,
duration_seconds=elapsed,
output="".join(test_outputs.get(f"{package}:{test}", [])),
))
elif action == "skip" and test:
summary.skipped += 1
summary.total += 1
# Package-Ergebnis (Gesamtdauer)
elif action in ["pass", "fail"] and not test and elapsed:
summary.duration_seconds = max(summary.duration_seconds, elapsed)
except json.JSONDecodeError:
# Nicht-JSON-Zeilen ignorieren (z.B. Coverage-Output)
if "coverage:" in line.lower():
# z.B. "coverage: 75.2% of statements"
try:
parts = line.split("coverage:")
if len(parts) > 1:
percent_str = parts[1].strip().split("%")[0]
summary.coverage_percent = float(percent_str)
except (ValueError, IndexError):
pass
return summary
async def run_single_test(self, test_name: str, timeout: int = 60) -> GoTestResult:
"""
Fuehrt einen einzelnen Test aus.
Args:
test_name: Name des Tests (z.B. "TestMyFunction")
timeout: Timeout in Sekunden
Returns:
GoTestResult fuer den spezifischen Test
"""
cmd = ["go", "test", "-v", "-run", test_name, "./..."]
try:
result = subprocess.run(
cmd,
cwd=str(self.base_path),
capture_output=True,
text=True,
timeout=timeout,
)
passed = "PASS" in result.stdout
return GoTestResult(
package=str(self.base_path),
test_name=test_name,
passed=passed,
duration_seconds=0.0,
output=result.stdout + result.stderr,
)
except Exception as e:
return GoTestResult(
package=str(self.base_path),
test_name=test_name,
passed=False,
duration_seconds=0.0,
output=str(e),
)
async def get_coverage_report(self) -> Optional[Dict]:
"""
Liest den Coverage-Bericht.
Returns:
Dict mit Coverage-Details oder None
"""
coverage_file = self.base_path / "coverage.out"
if not coverage_file.exists():
return None
try:
result = subprocess.run(
["go", "tool", "cover", "-func=coverage.out"],
cwd=str(self.base_path),
capture_output=True,
text=True,
)
# Parse "total:" Zeile
for line in result.stdout.split("\n"):
if "total:" in line:
parts = line.split()
if len(parts) >= 3:
percent_str = parts[-1].replace("%", "")
return {
"total_coverage": float(percent_str),
"raw_output": result.stdout,
}
except Exception:
pass
return None

View File

@@ -0,0 +1,266 @@
"""
Python Test Runner (pytest)
Fuehrt Python-Tests aus und parsed die Ergebnisse.
"""
import subprocess
import json
import re
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass, field
@dataclass
class PytestResult:
"""Ergebnis eines einzelnen pytest-Tests"""
node_id: str
test_name: str
file_path: str
passed: bool
duration_seconds: float
output: str = ""
error_message: Optional[str] = None
@dataclass
class PytestSummary:
"""Zusammenfassung eines pytest-Runs"""
total: int = 0
passed: int = 0
failed: int = 0
skipped: int = 0
errors: int = 0
duration_seconds: float = 0.0
coverage_percent: Optional[float] = None
results: List[PytestResult] = field(default_factory=list)
raw_output: str = ""
class PytestRunner:
"""
Runner fuer Python-Tests mit pytest.
Verwendet `pytest --json-report` fuer strukturierte Ausgabe.
"""
def __init__(self, base_path: Path, venv_path: Optional[Path] = None):
self.base_path = base_path
self.venv_path = venv_path
def _get_python_cmd(self) -> str:
"""Gibt den Python-Befehl zurueck (aus venv wenn vorhanden)"""
if self.venv_path and (self.venv_path / "bin" / "python").exists():
return str(self.venv_path / "bin" / "python")
return "python"
async def run(self, with_coverage: bool = True, timeout: int = 300) -> PytestSummary:
"""
Fuehrt pytest aus.
Args:
with_coverage: Coverage erfassen mit pytest-cov
timeout: Timeout in Sekunden
Returns:
PytestSummary mit allen Ergebnissen
"""
if not self.base_path.exists():
return PytestSummary(raw_output="Pfad existiert nicht")
python_cmd = self._get_python_cmd()
cmd = [python_cmd, "-m", "pytest", "-v", "--tb=short"]
if with_coverage:
cmd.extend(["--cov=.", "--cov-report=term-missing"])
cmd.append(str(self.base_path))
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=timeout,
)
return self._parse_output(result.stdout, result.stderr)
except subprocess.TimeoutExpired:
return PytestSummary(raw_output=f"Timeout nach {timeout} Sekunden")
except FileNotFoundError:
return PytestSummary(raw_output="Python/pytest nicht installiert")
except Exception as e:
return PytestSummary(raw_output=str(e))
def _parse_output(self, stdout: str, stderr: str) -> PytestSummary:
"""Parsed die pytest-Ausgabe"""
output = stdout + stderr
summary = PytestSummary(raw_output=output[:10000])
# Teste-Zeilen parsen (z.B. "test_file.py::test_name PASSED")
test_pattern = re.compile(r"([\w/]+\.py)::(\w+)(?:\[.+\])?\s+(PASSED|FAILED|SKIPPED|ERROR)")
for match in test_pattern.finditer(output):
file_path, test_name, status = match.groups()
result = PytestResult(
node_id=f"{file_path}::{test_name}",
test_name=test_name,
file_path=file_path,
passed=status == "PASSED",
duration_seconds=0.0,
)
summary.results.append(result)
if status == "PASSED":
summary.passed += 1
elif status == "FAILED":
summary.failed += 1
elif status == "SKIPPED":
summary.skipped += 1
elif status == "ERROR":
summary.errors += 1
summary.total = len(summary.results)
# Zusammenfassung parsen (z.B. "5 passed, 2 failed in 3.45s")
summary_pattern = re.compile(
r"=+\s*(?:(\d+)\s+passed)?[,\s]*(?:(\d+)\s+failed)?[,\s]*(?:(\d+)\s+skipped)?[,\s]*(?:(\d+)\s+error)?.*?in\s+([\d.]+)s"
)
match = summary_pattern.search(output)
if match:
if match.group(1):
summary.passed = int(match.group(1))
if match.group(2):
summary.failed = int(match.group(2))
if match.group(3):
summary.skipped = int(match.group(3))
if match.group(4):
summary.errors = int(match.group(4))
if match.group(5):
summary.duration_seconds = float(match.group(5))
summary.total = summary.passed + summary.failed + summary.skipped + summary.errors
# Coverage parsen (z.B. "TOTAL 1234 567 54%")
coverage_pattern = re.compile(r"TOTAL\s+\d+\s+\d+\s+(\d+)%")
coverage_match = coverage_pattern.search(output)
if coverage_match:
summary.coverage_percent = float(coverage_match.group(1))
return summary
async def run_single_test(self, test_path: str, timeout: int = 60) -> PytestResult:
"""
Fuehrt einen einzelnen Test aus.
Args:
test_path: Pfad zum Test (z.B. "test_file.py::test_name")
timeout: Timeout in Sekunden
Returns:
PytestResult fuer den spezifischen Test
"""
python_cmd = self._get_python_cmd()
cmd = [python_cmd, "-m", "pytest", "-v", test_path]
try:
result = subprocess.run(
cmd,
cwd=str(self.base_path),
capture_output=True,
text=True,
timeout=timeout,
)
passed = "passed" in result.stdout.lower() and "failed" not in result.stdout.lower()
return PytestResult(
node_id=test_path,
test_name=test_path.split("::")[-1] if "::" in test_path else test_path,
file_path=test_path.split("::")[0] if "::" in test_path else test_path,
passed=passed,
duration_seconds=0.0,
output=result.stdout + result.stderr,
)
except Exception as e:
return PytestResult(
node_id=test_path,
test_name=test_path,
file_path="",
passed=False,
duration_seconds=0.0,
output=str(e),
)
async def get_coverage_report(self, format: str = "term") -> Optional[Dict]:
"""
Generiert einen Coverage-Bericht.
Args:
format: "term", "html", oder "xml"
Returns:
Dict mit Coverage-Details oder None
"""
python_cmd = self._get_python_cmd()
cmd = [python_cmd, "-m", "pytest", "--cov=.", f"--cov-report={format}"]
try:
result = subprocess.run(
cmd,
cwd=str(self.base_path),
capture_output=True,
text=True,
timeout=120,
)
# Parse "TOTAL" Zeile
coverage_pattern = re.compile(r"TOTAL\s+\d+\s+\d+\s+(\d+)%")
match = coverage_pattern.search(result.stdout)
if match:
return {
"total_coverage": float(match.group(1)),
"format": format,
"raw_output": result.stdout,
}
except Exception:
pass
return None
async def list_tests(self) -> List[str]:
"""
Listet alle verfuegbaren Tests auf.
Returns:
Liste von Test-IDs
"""
python_cmd = self._get_python_cmd()
cmd = [python_cmd, "-m", "pytest", "--collect-only", "-q"]
try:
result = subprocess.run(
cmd,
cwd=str(self.base_path),
capture_output=True,
text=True,
timeout=30,
)
tests = []
for line in result.stdout.split("\n"):
line = line.strip()
if "::" in line and not line.startswith("<"):
tests.append(line)
return tests
except Exception:
return []