fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
1
backend/compliance/tests/__init__.py
Normal file
1
backend/compliance/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Compliance Module Tests
|
||||
591
backend/compliance/tests/test_audit_routes.py
Normal file
591
backend/compliance/tests/test_audit_routes.py
Normal file
@@ -0,0 +1,591 @@
|
||||
"""
|
||||
Unit tests for Compliance Audit Routes (Sprint 3).
|
||||
|
||||
Tests all audit session and sign-off endpoints.
|
||||
|
||||
Run with: pytest backend/compliance/tests/test_audit_routes.py -v
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import hashlib
|
||||
from datetime import datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
from uuid import uuid4
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
# Import the app and dependencies
|
||||
import sys
|
||||
sys.path.insert(0, '/Users/benjaminadmin/Projekte/breakpilot-pwa/backend')
|
||||
|
||||
from compliance.db.models import (
|
||||
AuditSessionDB, AuditSignOffDB, AuditResultEnum, AuditSessionStatusEnum,
|
||||
RequirementDB, RegulationDB
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test Fixtures
|
||||
# ============================================================================
|
||||
|
||||
@pytest.fixture
|
||||
def mock_db():
|
||||
"""Create a mock database session."""
|
||||
return MagicMock(spec=Session)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_regulation():
|
||||
"""Create a sample regulation for testing."""
|
||||
return RegulationDB(
|
||||
id=str(uuid4()),
|
||||
code="GDPR",
|
||||
name="General Data Protection Regulation",
|
||||
full_name="Regulation (EU) 2016/679",
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_requirement(sample_regulation):
|
||||
"""Create a sample requirement for testing."""
|
||||
return RequirementDB(
|
||||
id=str(uuid4()),
|
||||
regulation_id=sample_regulation.id,
|
||||
regulation=sample_regulation,
|
||||
article="Art. 32",
|
||||
title="Security of processing",
|
||||
description="Implement appropriate technical measures",
|
||||
implementation_status="not_started",
|
||||
priority=1,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_session():
|
||||
"""Create a sample audit session for testing."""
|
||||
session_id = str(uuid4())
|
||||
return AuditSessionDB(
|
||||
id=session_id,
|
||||
name="Q1 2026 Compliance Audit",
|
||||
description="Quarterly compliance review",
|
||||
auditor_name="Dr. Thomas Mueller",
|
||||
auditor_email="mueller@audit.de",
|
||||
auditor_organization="Audit GmbH",
|
||||
status=AuditSessionStatusEnum.DRAFT,
|
||||
regulation_ids=["GDPR", "AIACT"],
|
||||
total_items=100,
|
||||
completed_items=0,
|
||||
compliant_count=0,
|
||||
non_compliant_count=0,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_signoff(sample_session, sample_requirement):
|
||||
"""Create a sample sign-off for testing."""
|
||||
return AuditSignOffDB(
|
||||
id=str(uuid4()),
|
||||
session_id=sample_session.id,
|
||||
requirement_id=sample_requirement.id,
|
||||
result=AuditResultEnum.COMPLIANT,
|
||||
notes="All checks passed",
|
||||
signature_hash=None,
|
||||
signed_at=None,
|
||||
signed_by=None,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Audit Session Creation
|
||||
# ============================================================================
|
||||
|
||||
class TestCreateAuditSession:
|
||||
"""Tests for POST /audit/sessions endpoint."""
|
||||
|
||||
def test_create_session_valid_data_returns_session(self, mock_db, sample_regulation):
|
||||
"""Creating a session with valid data should return the session."""
|
||||
# Arrange
|
||||
mock_db.query.return_value.filter.return_value.all.return_value = [(sample_regulation.id,)]
|
||||
mock_db.query.return_value.count.return_value = 50
|
||||
|
||||
request_data = {
|
||||
"name": "Test Audit Session",
|
||||
"description": "Test description",
|
||||
"auditor_name": "Test Auditor",
|
||||
"auditor_email": "auditor@test.de",
|
||||
"regulation_codes": ["GDPR"],
|
||||
}
|
||||
|
||||
# The session should be created with correct data
|
||||
assert request_data["name"] == "Test Audit Session"
|
||||
assert request_data["auditor_name"] == "Test Auditor"
|
||||
|
||||
def test_create_session_minimal_data_returns_session(self):
|
||||
"""Creating a session with minimal data should work."""
|
||||
request_data = {
|
||||
"name": "Minimal Audit",
|
||||
"auditor_name": "Auditor",
|
||||
}
|
||||
|
||||
assert "name" in request_data
|
||||
assert "auditor_name" in request_data
|
||||
assert "description" not in request_data or request_data.get("description") is None
|
||||
|
||||
def test_create_session_with_multiple_regulations(self):
|
||||
"""Creating a session with multiple regulations should filter correctly."""
|
||||
request_data = {
|
||||
"name": "Multi-Regulation Audit",
|
||||
"auditor_name": "Auditor",
|
||||
"regulation_codes": ["GDPR", "AIACT", "CRA"],
|
||||
}
|
||||
|
||||
assert len(request_data["regulation_codes"]) == 3
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Audit Session List
|
||||
# ============================================================================
|
||||
|
||||
class TestListAuditSessions:
|
||||
"""Tests for GET /audit/sessions endpoint."""
|
||||
|
||||
def test_list_sessions_returns_all(self, mock_db, sample_session):
|
||||
"""Listing sessions without filter should return all sessions."""
|
||||
mock_db.query.return_value.order_by.return_value.all.return_value = [sample_session]
|
||||
|
||||
sessions = [sample_session]
|
||||
assert len(sessions) == 1
|
||||
assert sessions[0].name == "Q1 2026 Compliance Audit"
|
||||
|
||||
def test_list_sessions_filter_by_status_draft(self, mock_db, sample_session):
|
||||
"""Filtering by draft status should only return draft sessions."""
|
||||
sample_session.status = AuditSessionStatusEnum.DRAFT
|
||||
|
||||
assert sample_session.status == AuditSessionStatusEnum.DRAFT
|
||||
|
||||
def test_list_sessions_filter_by_status_in_progress(self, sample_session):
|
||||
"""Filtering by in_progress status should only return in_progress sessions."""
|
||||
sample_session.status = AuditSessionStatusEnum.IN_PROGRESS
|
||||
|
||||
assert sample_session.status == AuditSessionStatusEnum.IN_PROGRESS
|
||||
|
||||
def test_list_sessions_invalid_status_raises_error(self):
|
||||
"""Filtering by invalid status should raise an error."""
|
||||
invalid_status = "invalid_status"
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
AuditSessionStatusEnum(invalid_status)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Audit Session Get
|
||||
# ============================================================================
|
||||
|
||||
class TestGetAuditSession:
|
||||
"""Tests for GET /audit/sessions/{session_id} endpoint."""
|
||||
|
||||
def test_get_session_existing_returns_details(self, sample_session):
|
||||
"""Getting an existing session should return full details."""
|
||||
assert sample_session.id is not None
|
||||
assert sample_session.name == "Q1 2026 Compliance Audit"
|
||||
assert sample_session.auditor_name == "Dr. Thomas Mueller"
|
||||
|
||||
def test_get_session_includes_statistics(self, sample_session, sample_signoff):
|
||||
"""Getting a session should include statistics."""
|
||||
# Simulate statistics calculation
|
||||
signoffs = [sample_signoff]
|
||||
compliant = sum(1 for s in signoffs if s.result == AuditResultEnum.COMPLIANT)
|
||||
|
||||
assert compliant == 1
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Audit Session Lifecycle
|
||||
# ============================================================================
|
||||
|
||||
class TestAuditSessionLifecycle:
|
||||
"""Tests for session status transitions."""
|
||||
|
||||
def test_start_session_from_draft_success(self, sample_session):
|
||||
"""Starting a draft session should change status to in_progress."""
|
||||
assert sample_session.status == AuditSessionStatusEnum.DRAFT
|
||||
|
||||
sample_session.status = AuditSessionStatusEnum.IN_PROGRESS
|
||||
sample_session.started_at = datetime.utcnow()
|
||||
|
||||
assert sample_session.status == AuditSessionStatusEnum.IN_PROGRESS
|
||||
assert sample_session.started_at is not None
|
||||
|
||||
def test_start_session_from_completed_fails(self, sample_session):
|
||||
"""Starting a completed session should fail."""
|
||||
sample_session.status = AuditSessionStatusEnum.COMPLETED
|
||||
|
||||
# Can only start from DRAFT
|
||||
assert sample_session.status != AuditSessionStatusEnum.DRAFT
|
||||
|
||||
def test_complete_session_from_in_progress_success(self, sample_session):
|
||||
"""Completing an in_progress session should succeed."""
|
||||
sample_session.status = AuditSessionStatusEnum.IN_PROGRESS
|
||||
|
||||
sample_session.status = AuditSessionStatusEnum.COMPLETED
|
||||
sample_session.completed_at = datetime.utcnow()
|
||||
|
||||
assert sample_session.status == AuditSessionStatusEnum.COMPLETED
|
||||
assert sample_session.completed_at is not None
|
||||
|
||||
def test_archive_session_from_completed_success(self, sample_session):
|
||||
"""Archiving a completed session should succeed."""
|
||||
sample_session.status = AuditSessionStatusEnum.COMPLETED
|
||||
|
||||
sample_session.status = AuditSessionStatusEnum.ARCHIVED
|
||||
|
||||
assert sample_session.status == AuditSessionStatusEnum.ARCHIVED
|
||||
|
||||
def test_archive_session_from_in_progress_fails(self, sample_session):
|
||||
"""Archiving an in_progress session should fail."""
|
||||
sample_session.status = AuditSessionStatusEnum.IN_PROGRESS
|
||||
|
||||
# Can only archive from COMPLETED
|
||||
assert sample_session.status != AuditSessionStatusEnum.COMPLETED
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Audit Session Delete
|
||||
# ============================================================================
|
||||
|
||||
class TestDeleteAuditSession:
|
||||
"""Tests for DELETE /audit/sessions/{session_id} endpoint."""
|
||||
|
||||
def test_delete_draft_session_success(self, sample_session):
|
||||
"""Deleting a draft session should succeed."""
|
||||
sample_session.status = AuditSessionStatusEnum.DRAFT
|
||||
|
||||
assert sample_session.status in [
|
||||
AuditSessionStatusEnum.DRAFT,
|
||||
AuditSessionStatusEnum.ARCHIVED
|
||||
]
|
||||
|
||||
def test_delete_archived_session_success(self, sample_session):
|
||||
"""Deleting an archived session should succeed."""
|
||||
sample_session.status = AuditSessionStatusEnum.ARCHIVED
|
||||
|
||||
assert sample_session.status in [
|
||||
AuditSessionStatusEnum.DRAFT,
|
||||
AuditSessionStatusEnum.ARCHIVED
|
||||
]
|
||||
|
||||
def test_delete_in_progress_session_fails(self, sample_session):
|
||||
"""Deleting an in_progress session should fail."""
|
||||
sample_session.status = AuditSessionStatusEnum.IN_PROGRESS
|
||||
|
||||
assert sample_session.status not in [
|
||||
AuditSessionStatusEnum.DRAFT,
|
||||
AuditSessionStatusEnum.ARCHIVED
|
||||
]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Audit Checklist
|
||||
# ============================================================================
|
||||
|
||||
class TestGetAuditChecklist:
|
||||
"""Tests for GET /audit/checklist/{session_id} endpoint."""
|
||||
|
||||
def test_checklist_returns_paginated_items(self, sample_session, sample_requirement):
|
||||
"""Checklist should return paginated items."""
|
||||
page = 1
|
||||
page_size = 50
|
||||
|
||||
# Simulate pagination
|
||||
offset = (page - 1) * page_size
|
||||
assert offset == 0
|
||||
|
||||
def test_checklist_includes_signoff_status(self, sample_requirement, sample_signoff):
|
||||
"""Checklist items should include sign-off status."""
|
||||
signoff_map = {sample_signoff.requirement_id: sample_signoff}
|
||||
|
||||
signoff = signoff_map.get(sample_requirement.id)
|
||||
if signoff:
|
||||
current_result = signoff.result.value
|
||||
else:
|
||||
current_result = "pending"
|
||||
|
||||
assert current_result in ["compliant", "pending"]
|
||||
|
||||
def test_checklist_filter_by_status(self, sample_signoff):
|
||||
"""Filtering checklist by status should work."""
|
||||
status_filter = "compliant"
|
||||
sample_signoff.result = AuditResultEnum.COMPLIANT
|
||||
|
||||
assert sample_signoff.result.value == status_filter
|
||||
|
||||
def test_checklist_search_by_title(self, sample_requirement):
|
||||
"""Searching checklist by title should work."""
|
||||
search_term = "Security"
|
||||
sample_requirement.title = "Security of processing"
|
||||
|
||||
assert search_term.lower() in sample_requirement.title.lower()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Sign-off
|
||||
# ============================================================================
|
||||
|
||||
class TestSignOff:
|
||||
"""Tests for PUT /audit/checklist/{session_id}/items/{requirement_id}/sign-off endpoint."""
|
||||
|
||||
def test_signoff_compliant_creates_record(self, sample_session, sample_requirement):
|
||||
"""Signing off as compliant should create a sign-off record."""
|
||||
signoff = AuditSignOffDB(
|
||||
id=str(uuid4()),
|
||||
session_id=sample_session.id,
|
||||
requirement_id=sample_requirement.id,
|
||||
result=AuditResultEnum.COMPLIANT,
|
||||
notes="All requirements met",
|
||||
)
|
||||
|
||||
assert signoff.result == AuditResultEnum.COMPLIANT
|
||||
assert signoff.notes == "All requirements met"
|
||||
|
||||
def test_signoff_with_signature_creates_hash(self, sample_session, sample_requirement):
|
||||
"""Signing off with signature should create SHA-256 hash."""
|
||||
result = AuditResultEnum.COMPLIANT
|
||||
timestamp = datetime.utcnow().isoformat()
|
||||
data = f"{result.value}|{sample_requirement.id}|{sample_session.auditor_name}|{timestamp}"
|
||||
signature_hash = hashlib.sha256(data.encode()).hexdigest()
|
||||
|
||||
assert len(signature_hash) == 64 # SHA-256 produces 64 hex chars
|
||||
assert signature_hash.isalnum()
|
||||
|
||||
def test_signoff_non_compliant_increments_count(self, sample_session):
|
||||
"""Non-compliant sign-off should increment non_compliant_count."""
|
||||
initial_count = sample_session.non_compliant_count
|
||||
|
||||
sample_session.non_compliant_count += 1
|
||||
|
||||
assert sample_session.non_compliant_count == initial_count + 1
|
||||
|
||||
def test_signoff_updates_completion_items(self, sample_session):
|
||||
"""Sign-off should increment completed_items."""
|
||||
initial_completed = sample_session.completed_items
|
||||
|
||||
sample_session.completed_items += 1
|
||||
|
||||
assert sample_session.completed_items == initial_completed + 1
|
||||
|
||||
def test_signoff_auto_starts_session(self, sample_session):
|
||||
"""First sign-off should auto-start a draft session."""
|
||||
assert sample_session.status == AuditSessionStatusEnum.DRAFT
|
||||
|
||||
# First sign-off should trigger auto-start
|
||||
sample_session.status = AuditSessionStatusEnum.IN_PROGRESS
|
||||
sample_session.started_at = datetime.utcnow()
|
||||
|
||||
assert sample_session.status == AuditSessionStatusEnum.IN_PROGRESS
|
||||
|
||||
def test_signoff_update_existing_record(self, sample_signoff):
|
||||
"""Updating an existing sign-off should work."""
|
||||
sample_signoff.result = AuditResultEnum.NON_COMPLIANT
|
||||
sample_signoff.notes = "Updated: needs improvement"
|
||||
sample_signoff.updated_at = datetime.utcnow()
|
||||
|
||||
assert sample_signoff.result == AuditResultEnum.NON_COMPLIANT
|
||||
assert "Updated" in sample_signoff.notes
|
||||
|
||||
def test_signoff_invalid_result_raises_error(self):
|
||||
"""Sign-off with invalid result should raise an error."""
|
||||
invalid_result = "super_compliant"
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
AuditResultEnum(invalid_result)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Get Sign-off
|
||||
# ============================================================================
|
||||
|
||||
class TestGetSignOff:
|
||||
"""Tests for GET /audit/checklist/{session_id}/items/{requirement_id} endpoint."""
|
||||
|
||||
def test_get_signoff_existing_returns_details(self, sample_signoff):
|
||||
"""Getting an existing sign-off should return its details."""
|
||||
assert sample_signoff.id is not None
|
||||
assert sample_signoff.result == AuditResultEnum.COMPLIANT
|
||||
|
||||
def test_get_signoff_includes_signature_info(self, sample_signoff):
|
||||
"""Sign-off response should include signature information."""
|
||||
# Without signature
|
||||
assert sample_signoff.signature_hash is None
|
||||
assert sample_signoff.signed_at is None
|
||||
|
||||
# With signature
|
||||
sample_signoff.signature_hash = "abc123"
|
||||
sample_signoff.signed_at = datetime.utcnow()
|
||||
sample_signoff.signed_by = "Test Auditor"
|
||||
|
||||
assert sample_signoff.signature_hash == "abc123"
|
||||
assert sample_signoff.signed_by == "Test Auditor"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: AuditResultEnum Values
|
||||
# ============================================================================
|
||||
|
||||
class TestAuditResultEnum:
|
||||
"""Tests for AuditResultEnum values."""
|
||||
|
||||
def test_compliant_value(self):
|
||||
"""Compliant enum should have correct value."""
|
||||
assert AuditResultEnum.COMPLIANT.value == "compliant"
|
||||
|
||||
def test_compliant_with_notes_value(self):
|
||||
"""Compliant with notes enum should have correct value."""
|
||||
assert AuditResultEnum.COMPLIANT_WITH_NOTES.value == "compliant_notes"
|
||||
|
||||
def test_non_compliant_value(self):
|
||||
"""Non-compliant enum should have correct value."""
|
||||
assert AuditResultEnum.NON_COMPLIANT.value == "non_compliant"
|
||||
|
||||
def test_not_applicable_value(self):
|
||||
"""Not applicable enum should have correct value."""
|
||||
assert AuditResultEnum.NOT_APPLICABLE.value == "not_applicable"
|
||||
|
||||
def test_pending_value(self):
|
||||
"""Pending enum should have correct value."""
|
||||
assert AuditResultEnum.PENDING.value == "pending"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: AuditSessionStatusEnum Values
|
||||
# ============================================================================
|
||||
|
||||
class TestAuditSessionStatusEnum:
|
||||
"""Tests for AuditSessionStatusEnum values."""
|
||||
|
||||
def test_draft_value(self):
|
||||
"""Draft enum should have correct value."""
|
||||
assert AuditSessionStatusEnum.DRAFT.value == "draft"
|
||||
|
||||
def test_in_progress_value(self):
|
||||
"""In progress enum should have correct value."""
|
||||
assert AuditSessionStatusEnum.IN_PROGRESS.value == "in_progress"
|
||||
|
||||
def test_completed_value(self):
|
||||
"""Completed enum should have correct value."""
|
||||
assert AuditSessionStatusEnum.COMPLETED.value == "completed"
|
||||
|
||||
def test_archived_value(self):
|
||||
"""Archived enum should have correct value."""
|
||||
assert AuditSessionStatusEnum.ARCHIVED.value == "archived"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Completion Percentage Calculation
|
||||
# ============================================================================
|
||||
|
||||
class TestCompletionPercentage:
|
||||
"""Tests for completion percentage calculation."""
|
||||
|
||||
def test_completion_percentage_zero_items(self, sample_session):
|
||||
"""Completion percentage with zero total items should be 0."""
|
||||
sample_session.total_items = 0
|
||||
sample_session.completed_items = 0
|
||||
|
||||
percentage = 0.0 if sample_session.total_items == 0 else (
|
||||
sample_session.completed_items / sample_session.total_items * 100
|
||||
)
|
||||
|
||||
assert percentage == 0.0
|
||||
|
||||
def test_completion_percentage_partial(self, sample_session):
|
||||
"""Completion percentage should calculate correctly."""
|
||||
sample_session.total_items = 100
|
||||
sample_session.completed_items = 50
|
||||
|
||||
percentage = sample_session.completed_items / sample_session.total_items * 100
|
||||
|
||||
assert percentage == 50.0
|
||||
|
||||
def test_completion_percentage_complete(self, sample_session):
|
||||
"""Completion percentage at 100% should be correct."""
|
||||
sample_session.total_items = 100
|
||||
sample_session.completed_items = 100
|
||||
|
||||
percentage = sample_session.completed_items / sample_session.total_items * 100
|
||||
|
||||
assert percentage == 100.0
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Digital Signature Generation
|
||||
# ============================================================================
|
||||
|
||||
class TestDigitalSignature:
|
||||
"""Tests for digital signature generation."""
|
||||
|
||||
def test_signature_is_sha256(self):
|
||||
"""Signature should be a valid SHA-256 hash."""
|
||||
data = "compliant|req-123|Dr. Mueller|2026-01-18T12:00:00"
|
||||
signature = hashlib.sha256(data.encode()).hexdigest()
|
||||
|
||||
assert len(signature) == 64
|
||||
assert all(c in '0123456789abcdef' for c in signature)
|
||||
|
||||
def test_signature_is_deterministic(self):
|
||||
"""Same input should produce same signature."""
|
||||
data = "compliant|req-123|Dr. Mueller|2026-01-18T12:00:00"
|
||||
signature1 = hashlib.sha256(data.encode()).hexdigest()
|
||||
signature2 = hashlib.sha256(data.encode()).hexdigest()
|
||||
|
||||
assert signature1 == signature2
|
||||
|
||||
def test_signature_changes_with_input(self):
|
||||
"""Different input should produce different signature."""
|
||||
data1 = "compliant|req-123|Dr. Mueller|2026-01-18T12:00:00"
|
||||
data2 = "non_compliant|req-123|Dr. Mueller|2026-01-18T12:00:00"
|
||||
|
||||
signature1 = hashlib.sha256(data1.encode()).hexdigest()
|
||||
signature2 = hashlib.sha256(data2.encode()).hexdigest()
|
||||
|
||||
assert signature1 != signature2
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Statistics Calculation
|
||||
# ============================================================================
|
||||
|
||||
class TestStatisticsCalculation:
|
||||
"""Tests for audit statistics calculation."""
|
||||
|
||||
def test_statistics_counts_by_result(self):
|
||||
"""Statistics should correctly count by result type."""
|
||||
signoffs = [
|
||||
MagicMock(result=AuditResultEnum.COMPLIANT),
|
||||
MagicMock(result=AuditResultEnum.COMPLIANT),
|
||||
MagicMock(result=AuditResultEnum.COMPLIANT_WITH_NOTES),
|
||||
MagicMock(result=AuditResultEnum.NON_COMPLIANT),
|
||||
MagicMock(result=AuditResultEnum.NOT_APPLICABLE),
|
||||
]
|
||||
|
||||
compliant = sum(1 for s in signoffs if s.result == AuditResultEnum.COMPLIANT)
|
||||
compliant_notes = sum(1 for s in signoffs if s.result == AuditResultEnum.COMPLIANT_WITH_NOTES)
|
||||
non_compliant = sum(1 for s in signoffs if s.result == AuditResultEnum.NON_COMPLIANT)
|
||||
not_applicable = sum(1 for s in signoffs if s.result == AuditResultEnum.NOT_APPLICABLE)
|
||||
|
||||
assert compliant == 2
|
||||
assert compliant_notes == 1
|
||||
assert non_compliant == 1
|
||||
assert not_applicable == 1
|
||||
|
||||
def test_statistics_pending_calculation(self):
|
||||
"""Pending count should be total minus reviewed."""
|
||||
total_items = 100
|
||||
reviewed_items = 75
|
||||
|
||||
pending = total_items - reviewed_items
|
||||
|
||||
assert pending == 25
|
||||
434
backend/compliance/tests/test_auto_risk_updater.py
Normal file
434
backend/compliance/tests/test_auto_risk_updater.py
Normal file
@@ -0,0 +1,434 @@
|
||||
"""
|
||||
Tests for the AutoRiskUpdater Service.
|
||||
|
||||
Sprint 6: CI/CD Evidence Collection & Automatic Risk Updates (2026-01-18)
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
from uuid import uuid4
|
||||
|
||||
from ..services.auto_risk_updater import (
|
||||
AutoRiskUpdater,
|
||||
ScanType,
|
||||
FindingSeverity,
|
||||
ScanResult,
|
||||
RiskUpdateResult,
|
||||
CONTROL_SCAN_MAPPING,
|
||||
)
|
||||
from ..db.models import (
|
||||
ControlDB, ControlStatusEnum,
|
||||
EvidenceDB, EvidenceStatusEnum,
|
||||
RiskDB, RiskLevelEnum,
|
||||
)
|
||||
|
||||
|
||||
class TestDetermineControlStatus:
|
||||
"""Tests for _determine_control_status method."""
|
||||
|
||||
def test_critical_findings_return_fail(self):
|
||||
"""Any critical finding should result in FAIL status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
findings = {"critical": 1, "high": 0, "medium": 0, "low": 0}
|
||||
result = updater._determine_control_status(findings)
|
||||
|
||||
assert result == ControlStatusEnum.FAIL.value
|
||||
|
||||
def test_multiple_critical_findings_return_fail(self):
|
||||
"""Multiple critical findings should result in FAIL status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
findings = {"critical": 5, "high": 2, "medium": 10, "low": 50}
|
||||
result = updater._determine_control_status(findings)
|
||||
|
||||
assert result == ControlStatusEnum.FAIL.value
|
||||
|
||||
def test_more_than_5_high_findings_return_fail(self):
|
||||
"""More than 5 HIGH findings should result in FAIL status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
findings = {"critical": 0, "high": 6, "medium": 0, "low": 0}
|
||||
result = updater._determine_control_status(findings)
|
||||
|
||||
assert result == ControlStatusEnum.FAIL.value
|
||||
|
||||
def test_exactly_5_high_findings_return_partial(self):
|
||||
"""Exactly 5 HIGH findings should result in PARTIAL status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
findings = {"critical": 0, "high": 5, "medium": 0, "low": 0}
|
||||
result = updater._determine_control_status(findings)
|
||||
|
||||
assert result == ControlStatusEnum.PARTIAL.value
|
||||
|
||||
def test_1_to_5_high_findings_return_partial(self):
|
||||
"""1-5 HIGH findings should result in PARTIAL status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
for high_count in [1, 2, 3, 4, 5]:
|
||||
findings = {"critical": 0, "high": high_count, "medium": 0, "low": 0}
|
||||
result = updater._determine_control_status(findings)
|
||||
assert result == ControlStatusEnum.PARTIAL.value, f"Failed for {high_count} HIGH findings"
|
||||
|
||||
def test_more_than_10_medium_findings_return_partial(self):
|
||||
"""More than 10 MEDIUM findings should result in PARTIAL status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
findings = {"critical": 0, "high": 0, "medium": 11, "low": 0}
|
||||
result = updater._determine_control_status(findings)
|
||||
|
||||
assert result == ControlStatusEnum.PARTIAL.value
|
||||
|
||||
def test_only_medium_and_low_findings_return_pass(self):
|
||||
"""Only MEDIUM (<=10) and LOW findings should result in PASS status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
findings = {"critical": 0, "high": 0, "medium": 5, "low": 100}
|
||||
result = updater._determine_control_status(findings)
|
||||
|
||||
assert result == ControlStatusEnum.PASS.value
|
||||
|
||||
def test_no_findings_return_pass(self):
|
||||
"""No findings should result in PASS status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
findings = {"critical": 0, "high": 0, "medium": 0, "low": 0}
|
||||
result = updater._determine_control_status(findings)
|
||||
|
||||
assert result == ControlStatusEnum.PASS.value
|
||||
|
||||
def test_empty_findings_return_pass(self):
|
||||
"""Empty findings dict should result in PASS status."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
findings = {}
|
||||
result = updater._determine_control_status(findings)
|
||||
|
||||
assert result == ControlStatusEnum.PASS.value
|
||||
|
||||
|
||||
class TestGenerateStatusNotes:
|
||||
"""Tests for _generate_status_notes method."""
|
||||
|
||||
def test_notes_include_tool_name(self):
|
||||
"""Status notes should include the scan tool name."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
scan_result = ScanResult(
|
||||
scan_type=ScanType.SAST,
|
||||
tool="Semgrep",
|
||||
timestamp=datetime(2026, 1, 18, 14, 30),
|
||||
commit_sha="abc123",
|
||||
branch="main",
|
||||
control_id="SDLC-001",
|
||||
findings={"critical": 1, "high": 2, "medium": 0, "low": 0},
|
||||
)
|
||||
|
||||
notes = updater._generate_status_notes(scan_result)
|
||||
|
||||
assert "Semgrep" in notes
|
||||
assert "1 CRITICAL" in notes
|
||||
assert "2 HIGH" in notes
|
||||
|
||||
def test_notes_include_timestamp(self):
|
||||
"""Status notes should include scan timestamp."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
scan_result = ScanResult(
|
||||
scan_type=ScanType.DEPENDENCY,
|
||||
tool="Trivy",
|
||||
timestamp=datetime(2026, 1, 18, 10, 0),
|
||||
commit_sha="def456",
|
||||
branch="develop",
|
||||
control_id="SDLC-002",
|
||||
findings={"critical": 0, "high": 3, "medium": 5, "low": 10},
|
||||
)
|
||||
|
||||
notes = updater._generate_status_notes(scan_result)
|
||||
|
||||
assert "2026-01-18 10:00" in notes
|
||||
|
||||
def test_notes_for_no_findings(self):
|
||||
"""Status notes for no findings should indicate clean scan."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
scan_result = ScanResult(
|
||||
scan_type=ScanType.SECRET,
|
||||
tool="Gitleaks",
|
||||
timestamp=datetime(2026, 1, 18, 12, 0),
|
||||
commit_sha="ghi789",
|
||||
branch="main",
|
||||
control_id="SDLC-003",
|
||||
findings={"critical": 0, "high": 0, "medium": 0, "low": 0},
|
||||
)
|
||||
|
||||
notes = updater._generate_status_notes(scan_result)
|
||||
|
||||
assert "No significant findings" in notes
|
||||
|
||||
|
||||
class TestGenerateAlerts:
|
||||
"""Tests for _generate_alerts method."""
|
||||
|
||||
def test_alert_for_critical_findings(self):
|
||||
"""Critical findings should generate an alert."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
scan_result = ScanResult(
|
||||
scan_type=ScanType.DEPENDENCY,
|
||||
tool="Trivy",
|
||||
timestamp=datetime.utcnow(),
|
||||
commit_sha="abc123",
|
||||
branch="main",
|
||||
control_id="SDLC-002",
|
||||
findings={"critical": 2, "high": 0, "medium": 0, "low": 0},
|
||||
)
|
||||
|
||||
alerts = updater._generate_alerts(scan_result, ControlStatusEnum.FAIL.value)
|
||||
|
||||
assert len(alerts) >= 1
|
||||
assert any("CRITICAL" in alert for alert in alerts)
|
||||
assert any("2 critical" in alert.lower() for alert in alerts)
|
||||
|
||||
def test_alert_for_fail_status(self):
|
||||
"""Control status change to FAIL should generate an alert."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
scan_result = ScanResult(
|
||||
scan_type=ScanType.SAST,
|
||||
tool="Semgrep",
|
||||
timestamp=datetime.utcnow(),
|
||||
commit_sha="def456",
|
||||
branch="main",
|
||||
control_id="SDLC-001",
|
||||
findings={"critical": 0, "high": 10, "medium": 0, "low": 0},
|
||||
)
|
||||
|
||||
alerts = updater._generate_alerts(scan_result, ControlStatusEnum.FAIL.value)
|
||||
|
||||
assert any("FAIL" in alert for alert in alerts)
|
||||
|
||||
def test_alert_for_many_high_findings(self):
|
||||
"""More than 10 HIGH findings should generate an alert."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
scan_result = ScanResult(
|
||||
scan_type=ScanType.CONTAINER,
|
||||
tool="Trivy",
|
||||
timestamp=datetime.utcnow(),
|
||||
commit_sha="ghi789",
|
||||
branch="main",
|
||||
control_id="SDLC-006",
|
||||
findings={"critical": 0, "high": 15, "medium": 0, "low": 0},
|
||||
)
|
||||
|
||||
alerts = updater._generate_alerts(scan_result, ControlStatusEnum.FAIL.value)
|
||||
|
||||
assert any("HIGH" in alert and "15" in alert for alert in alerts)
|
||||
|
||||
def test_no_alert_for_pass_with_low_findings(self):
|
||||
"""No alert should be generated for PASS status with only low findings."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
scan_result = ScanResult(
|
||||
scan_type=ScanType.SAST,
|
||||
tool="Semgrep",
|
||||
timestamp=datetime.utcnow(),
|
||||
commit_sha="jkl012",
|
||||
branch="main",
|
||||
control_id="SDLC-001",
|
||||
findings={"critical": 0, "high": 0, "medium": 5, "low": 20},
|
||||
)
|
||||
|
||||
alerts = updater._generate_alerts(scan_result, ControlStatusEnum.PASS.value)
|
||||
|
||||
assert len(alerts) == 0
|
||||
|
||||
|
||||
class TestControlScanMapping:
|
||||
"""Tests for CONTROL_SCAN_MAPPING constant."""
|
||||
|
||||
def test_sdlc_001_maps_to_sast(self):
|
||||
"""SDLC-001 should map to SAST scan type."""
|
||||
assert CONTROL_SCAN_MAPPING["SDLC-001"] == ScanType.SAST
|
||||
|
||||
def test_sdlc_002_maps_to_dependency(self):
|
||||
"""SDLC-002 should map to DEPENDENCY scan type."""
|
||||
assert CONTROL_SCAN_MAPPING["SDLC-002"] == ScanType.DEPENDENCY
|
||||
|
||||
def test_sdlc_003_maps_to_secret(self):
|
||||
"""SDLC-003 should map to SECRET scan type."""
|
||||
assert CONTROL_SCAN_MAPPING["SDLC-003"] == ScanType.SECRET
|
||||
|
||||
def test_sdlc_006_maps_to_container(self):
|
||||
"""SDLC-006 should map to CONTAINER scan type."""
|
||||
assert CONTROL_SCAN_MAPPING["SDLC-006"] == ScanType.CONTAINER
|
||||
|
||||
def test_cra_001_maps_to_sbom(self):
|
||||
"""CRA-001 should map to SBOM scan type."""
|
||||
assert CONTROL_SCAN_MAPPING["CRA-001"] == ScanType.SBOM
|
||||
|
||||
|
||||
class TestProcessEvidenceCollectRequest:
|
||||
"""Tests for process_evidence_collect_request method."""
|
||||
|
||||
def test_parses_iso_timestamp(self):
|
||||
"""Should correctly parse ISO format timestamps."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
|
||||
# Mock the control repo to return None (control not found)
|
||||
updater.control_repo.get_by_control_id = MagicMock(return_value=None)
|
||||
|
||||
result = updater.process_evidence_collect_request(
|
||||
tool="Semgrep",
|
||||
control_id="SDLC-001",
|
||||
evidence_type="ci_semgrep",
|
||||
timestamp="2026-01-18T14:30:00Z",
|
||||
commit_sha="abc123",
|
||||
findings={"critical": 0, "high": 0, "medium": 0, "low": 0},
|
||||
)
|
||||
|
||||
# Control not found, so control_updated should be False
|
||||
assert result.control_updated is False
|
||||
|
||||
def test_handles_invalid_timestamp(self):
|
||||
"""Should handle invalid timestamps gracefully."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
updater.control_repo.get_by_control_id = MagicMock(return_value=None)
|
||||
|
||||
# Should not raise exception
|
||||
result = updater.process_evidence_collect_request(
|
||||
tool="Trivy",
|
||||
control_id="SDLC-002",
|
||||
evidence_type="ci_trivy",
|
||||
timestamp="invalid-timestamp",
|
||||
commit_sha="def456",
|
||||
findings={"critical": 0, "high": 0, "medium": 0, "low": 0},
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
|
||||
def test_control_not_found_returns_result(self):
|
||||
"""Should return appropriate result when control is not found."""
|
||||
db = MagicMock()
|
||||
updater = AutoRiskUpdater(db)
|
||||
updater.control_repo.get_by_control_id = MagicMock(return_value=None)
|
||||
|
||||
result = updater.process_evidence_collect_request(
|
||||
tool="Gitleaks",
|
||||
control_id="UNKNOWN-001",
|
||||
evidence_type="ci_gitleaks",
|
||||
timestamp="2026-01-18T10:00:00Z",
|
||||
commit_sha="ghi789",
|
||||
findings={"critical": 0, "high": 0, "medium": 0, "low": 0},
|
||||
)
|
||||
|
||||
assert result.control_id == "UNKNOWN-001"
|
||||
assert result.control_updated is False
|
||||
assert "not found" in result.message
|
||||
|
||||
|
||||
class TestScanResult:
|
||||
"""Tests for ScanResult dataclass."""
|
||||
|
||||
def test_scan_result_creation(self):
|
||||
"""Should create ScanResult with all required fields."""
|
||||
result = ScanResult(
|
||||
scan_type=ScanType.SAST,
|
||||
tool="Semgrep",
|
||||
timestamp=datetime(2026, 1, 18, 14, 0),
|
||||
commit_sha="abc123def456",
|
||||
branch="main",
|
||||
control_id="SDLC-001",
|
||||
findings={"critical": 0, "high": 2, "medium": 5, "low": 10},
|
||||
)
|
||||
|
||||
assert result.scan_type == ScanType.SAST
|
||||
assert result.tool == "Semgrep"
|
||||
assert result.control_id == "SDLC-001"
|
||||
assert result.findings["high"] == 2
|
||||
|
||||
def test_scan_result_optional_fields(self):
|
||||
"""Should handle optional fields correctly."""
|
||||
result = ScanResult(
|
||||
scan_type=ScanType.DEPENDENCY,
|
||||
tool="Trivy",
|
||||
timestamp=datetime.utcnow(),
|
||||
commit_sha="xyz789",
|
||||
branch="develop",
|
||||
control_id="SDLC-002",
|
||||
findings={"critical": 1},
|
||||
raw_report={"vulnerabilities": []},
|
||||
ci_job_id="github-actions-12345",
|
||||
)
|
||||
|
||||
assert result.raw_report is not None
|
||||
assert result.ci_job_id == "github-actions-12345"
|
||||
|
||||
|
||||
class TestRiskUpdateResult:
|
||||
"""Tests for RiskUpdateResult dataclass."""
|
||||
|
||||
def test_risk_update_result_creation(self):
|
||||
"""Should create RiskUpdateResult with all fields."""
|
||||
result = RiskUpdateResult(
|
||||
control_id="SDLC-001",
|
||||
control_updated=True,
|
||||
old_status="pass",
|
||||
new_status="fail",
|
||||
evidence_created=True,
|
||||
evidence_id="ev-12345",
|
||||
risks_affected=["RISK-001", "RISK-002"],
|
||||
alerts_generated=["Critical vulnerability found"],
|
||||
message="Processed successfully",
|
||||
)
|
||||
|
||||
assert result.control_updated is True
|
||||
assert result.old_status == "pass"
|
||||
assert result.new_status == "fail"
|
||||
assert len(result.risks_affected) == 2
|
||||
assert len(result.alerts_generated) == 1
|
||||
|
||||
|
||||
class TestFindingSeverity:
|
||||
"""Tests for FindingSeverity enum."""
|
||||
|
||||
def test_severity_levels(self):
|
||||
"""Should have all expected severity levels."""
|
||||
assert FindingSeverity.CRITICAL.value == "critical"
|
||||
assert FindingSeverity.HIGH.value == "high"
|
||||
assert FindingSeverity.MEDIUM.value == "medium"
|
||||
assert FindingSeverity.LOW.value == "low"
|
||||
assert FindingSeverity.INFO.value == "info"
|
||||
|
||||
|
||||
class TestScanType:
|
||||
"""Tests for ScanType enum."""
|
||||
|
||||
def test_scan_types(self):
|
||||
"""Should have all expected scan types."""
|
||||
assert ScanType.SAST.value == "sast"
|
||||
assert ScanType.DEPENDENCY.value == "dependency"
|
||||
assert ScanType.SECRET.value == "secret"
|
||||
assert ScanType.CONTAINER.value == "container"
|
||||
assert ScanType.SBOM.value == "sbom"
|
||||
696
backend/compliance/tests/test_isms_routes.py
Normal file
696
backend/compliance/tests/test_isms_routes.py
Normal file
@@ -0,0 +1,696 @@
|
||||
"""
|
||||
Unit tests for ISMS (Information Security Management System) API Routes.
|
||||
|
||||
Tests all ISO 27001 certification-related endpoints:
|
||||
- ISMS Scope (Chapter 4.3)
|
||||
- ISMS Policies (Chapter 5.2)
|
||||
- Security Objectives (Chapter 6.2)
|
||||
- Statement of Applicability (SoA)
|
||||
- Audit Findings & CAPA
|
||||
- Management Reviews (Chapter 9.3)
|
||||
- Internal Audits (Chapter 9.2)
|
||||
- Readiness Check
|
||||
|
||||
Run with: pytest backend/compliance/tests/test_isms_routes.py -v
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, date
|
||||
from unittest.mock import MagicMock, patch
|
||||
from uuid import uuid4
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
import sys
|
||||
sys.path.insert(0, '/Users/benjaminadmin/Projekte/breakpilot-pwa/backend')
|
||||
|
||||
from compliance.db.models import (
|
||||
ISMSScopeDB, ISMSContextDB, ISMSPolicyDB, SecurityObjectiveDB,
|
||||
StatementOfApplicabilityDB, AuditFindingDB, CorrectiveActionDB,
|
||||
ManagementReviewDB, InternalAuditDB, AuditTrailDB, ISMSReadinessCheckDB,
|
||||
ApprovalStatusEnum, FindingTypeEnum, FindingStatusEnum, CAPATypeEnum
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test Fixtures
|
||||
# ============================================================================
|
||||
|
||||
@pytest.fixture
|
||||
def mock_db():
|
||||
"""Create a mock database session."""
|
||||
return MagicMock(spec=Session)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_scope():
|
||||
"""Create a sample ISMS scope for testing."""
|
||||
return ISMSScopeDB(
|
||||
id=str(uuid4()),
|
||||
scope_statement="BreakPilot ISMS covers all digital learning platform operations",
|
||||
included_locations=["Frankfurt Office", "AWS eu-central-1"],
|
||||
included_processes=["Software Development", "Data Processing", "Customer Support"],
|
||||
included_services=["BreakPilot PWA", "Consent Service", "AI Assistant"],
|
||||
excluded_items=["Marketing Website"],
|
||||
exclusion_justification="Marketing website is static and contains no user data",
|
||||
status=ApprovalStatusEnum.DRAFT,
|
||||
version="1.0",
|
||||
created_by="admin@breakpilot.de",
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_approved_scope(sample_scope):
|
||||
"""Create an approved ISMS scope for testing."""
|
||||
sample_scope.status = ApprovalStatusEnum.APPROVED
|
||||
sample_scope.approved_by = "ceo@breakpilot.de"
|
||||
sample_scope.approved_at = datetime.utcnow()
|
||||
sample_scope.effective_date = date.today()
|
||||
sample_scope.review_date = date(date.today().year + 1, date.today().month, date.today().day)
|
||||
sample_scope.approval_signature = "sha256_signature_hash"
|
||||
return sample_scope
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_policy():
|
||||
"""Create a sample ISMS policy for testing."""
|
||||
return ISMSPolicyDB(
|
||||
id=str(uuid4()),
|
||||
policy_id="POL-ISMS-001",
|
||||
title="Information Security Policy",
|
||||
policy_type="master",
|
||||
description="Master ISMS policy for BreakPilot",
|
||||
policy_text="This policy establishes the framework for information security...",
|
||||
applies_to=["All Employees", "Contractors", "Partners"],
|
||||
review_frequency_months=12,
|
||||
related_controls=["GOV-001", "GOV-002"],
|
||||
authored_by="iso@breakpilot.de",
|
||||
status=ApprovalStatusEnum.DRAFT,
|
||||
version="1.0",
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_objective():
|
||||
"""Create a sample security objective for testing."""
|
||||
return SecurityObjectiveDB(
|
||||
id=str(uuid4()),
|
||||
objective_id="OBJ-2026-001",
|
||||
title="Reduce Security Incidents",
|
||||
description="Reduce the number of security incidents by 30% compared to previous year",
|
||||
category="operational",
|
||||
specific="Reduce security incidents from 10 to 7 per year",
|
||||
measurable="Number of security incidents recorded in ticketing system",
|
||||
achievable="Based on trend analysis and planned control improvements",
|
||||
relevant="Directly supports information security goals",
|
||||
time_bound="By end of Q4 2026",
|
||||
kpi_name="Security Incident Count",
|
||||
kpi_target=7.0,
|
||||
kpi_unit="incidents/year",
|
||||
kpi_current=10.0,
|
||||
measurement_frequency="monthly",
|
||||
owner="security@breakpilot.de",
|
||||
target_date=date(2026, 12, 31),
|
||||
related_controls=["OPS-003"],
|
||||
status="active",
|
||||
progress_percentage=0.0,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_soa_entry():
|
||||
"""Create a sample SoA entry for testing."""
|
||||
return StatementOfApplicabilityDB(
|
||||
id=str(uuid4()),
|
||||
annex_a_control="A.5.1",
|
||||
annex_a_title="Policies for information security",
|
||||
annex_a_category="organizational",
|
||||
is_applicable=True,
|
||||
applicability_justification="Required for ISMS governance",
|
||||
implementation_status="implemented",
|
||||
implementation_notes="Implemented via GOV-001, GOV-002 controls",
|
||||
breakpilot_control_ids=["GOV-001", "GOV-002"],
|
||||
coverage_level="full",
|
||||
evidence_description="ISMS Policy v2.0, signed by CEO",
|
||||
version="1.0",
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_finding():
|
||||
"""Create a sample audit finding for testing."""
|
||||
return AuditFindingDB(
|
||||
id=str(uuid4()),
|
||||
finding_id="FIND-2026-001",
|
||||
finding_type=FindingTypeEnum.MINOR,
|
||||
iso_chapter="9.2",
|
||||
annex_a_control="A.5.35",
|
||||
title="Internal audit schedule not documented",
|
||||
description="The internal audit schedule for 2026 was not formally documented",
|
||||
objective_evidence="No document found in DMS",
|
||||
impact_description="Cannot demonstrate planned approach to internal audits",
|
||||
owner="iso@breakpilot.de",
|
||||
auditor="external.auditor@cert.de",
|
||||
identified_date=date.today(),
|
||||
due_date=date(2026, 3, 31),
|
||||
status=FindingStatusEnum.OPEN,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_major_finding():
|
||||
"""Create a major finding (certification blocking)."""
|
||||
return AuditFindingDB(
|
||||
id=str(uuid4()),
|
||||
finding_id="FIND-2026-002",
|
||||
finding_type=FindingTypeEnum.MAJOR,
|
||||
iso_chapter="5.2",
|
||||
title="Information Security Policy not approved",
|
||||
description="The ISMS master policy has not been approved by top management",
|
||||
objective_evidence="Policy document shows 'Draft' status",
|
||||
owner="ceo@breakpilot.de",
|
||||
auditor="external.auditor@cert.de",
|
||||
identified_date=date.today(),
|
||||
due_date=date(2026, 2, 28),
|
||||
status=FindingStatusEnum.OPEN,
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_capa(sample_finding):
|
||||
"""Create a sample CAPA for testing."""
|
||||
return CorrectiveActionDB(
|
||||
id=str(uuid4()),
|
||||
capa_id="CAPA-2026-001",
|
||||
finding_id=sample_finding.id,
|
||||
capa_type=CAPATypeEnum.CORRECTIVE,
|
||||
title="Create and approve internal audit schedule",
|
||||
description="Create a formal internal audit schedule document and get management approval",
|
||||
expected_outcome="Approved internal audit schedule for 2026",
|
||||
assigned_to="iso@breakpilot.de",
|
||||
planned_start=date.today(),
|
||||
planned_completion=date(2026, 2, 15),
|
||||
effectiveness_criteria="Document approved and distributed to audit team",
|
||||
status="planned",
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_management_review():
|
||||
"""Create a sample management review for testing."""
|
||||
return ManagementReviewDB(
|
||||
id=str(uuid4()),
|
||||
review_id="MR-2026-Q1",
|
||||
title="Q1 2026 Management Review",
|
||||
review_date=date(2026, 1, 15),
|
||||
review_period_start=date(2025, 10, 1),
|
||||
review_period_end=date(2025, 12, 31),
|
||||
chairperson="ceo@breakpilot.de",
|
||||
attendees=[
|
||||
{"name": "CEO", "role": "Chairperson"},
|
||||
{"name": "CTO", "role": "Technical Lead"},
|
||||
{"name": "ISO", "role": "ISMS Manager"},
|
||||
],
|
||||
status="draft",
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_internal_audit():
|
||||
"""Create a sample internal audit for testing."""
|
||||
return InternalAuditDB(
|
||||
id=str(uuid4()),
|
||||
audit_id="IA-2026-001",
|
||||
title="Annual ISMS Internal Audit 2026",
|
||||
audit_type="full",
|
||||
scope_description="Complete ISMS audit covering all ISO 27001 chapters and Annex A controls",
|
||||
iso_chapters_covered=["4", "5", "6", "7", "8", "9", "10"],
|
||||
annex_a_controls_covered=["A.5", "A.6", "A.7", "A.8"],
|
||||
criteria="ISO 27001:2022, Internal ISMS procedures",
|
||||
planned_date=date(2026, 3, 1),
|
||||
lead_auditor="internal.auditor@breakpilot.de",
|
||||
audit_team=["internal.auditor@breakpilot.de", "qa@breakpilot.de"],
|
||||
status="planned",
|
||||
created_at=datetime.utcnow(),
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: ISMS Scope
|
||||
# ============================================================================
|
||||
|
||||
class TestISMSScope:
|
||||
"""Tests for ISMS Scope endpoints."""
|
||||
|
||||
def test_scope_has_required_fields(self, sample_scope):
|
||||
"""ISMS scope should have all required fields."""
|
||||
assert sample_scope.scope_statement is not None
|
||||
assert sample_scope.status == ApprovalStatusEnum.DRAFT
|
||||
assert sample_scope.created_by is not None
|
||||
|
||||
def test_scope_approval_sets_correct_fields(self, sample_approved_scope):
|
||||
"""Approving scope should set approval fields."""
|
||||
assert sample_approved_scope.status == ApprovalStatusEnum.APPROVED
|
||||
assert sample_approved_scope.approved_by is not None
|
||||
assert sample_approved_scope.approved_at is not None
|
||||
assert sample_approved_scope.effective_date is not None
|
||||
assert sample_approved_scope.review_date is not None
|
||||
assert sample_approved_scope.approval_signature is not None
|
||||
|
||||
def test_scope_can_include_multiple_locations(self, sample_scope):
|
||||
"""Scope should support multiple locations."""
|
||||
assert isinstance(sample_scope.included_locations, list)
|
||||
assert len(sample_scope.included_locations) >= 1
|
||||
|
||||
def test_scope_exclusions_require_justification(self, sample_scope):
|
||||
"""Scope exclusions should have justification (ISO 27001 requirement)."""
|
||||
if sample_scope.excluded_items:
|
||||
assert sample_scope.exclusion_justification is not None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: ISMS Policies
|
||||
# ============================================================================
|
||||
|
||||
class TestISMSPolicy:
|
||||
"""Tests for ISMS Policy endpoints."""
|
||||
|
||||
def test_policy_has_unique_id(self, sample_policy):
|
||||
"""Each policy should have a unique policy_id."""
|
||||
assert sample_policy.policy_id is not None
|
||||
assert sample_policy.policy_id.startswith("POL-")
|
||||
|
||||
def test_master_policy_type_exists(self, sample_policy):
|
||||
"""Master policy type should be 'master'."""
|
||||
assert sample_policy.policy_type == "master"
|
||||
|
||||
def test_policy_has_review_frequency(self, sample_policy):
|
||||
"""Policy should specify review frequency."""
|
||||
assert sample_policy.review_frequency_months > 0
|
||||
assert sample_policy.review_frequency_months <= 36 # Max 3 years
|
||||
|
||||
def test_policy_can_link_to_controls(self, sample_policy):
|
||||
"""Policy should link to related controls."""
|
||||
assert isinstance(sample_policy.related_controls, list)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Security Objectives
|
||||
# ============================================================================
|
||||
|
||||
class TestSecurityObjective:
|
||||
"""Tests for Security Objectives endpoints."""
|
||||
|
||||
def test_objective_follows_smart_criteria(self, sample_objective):
|
||||
"""Objectives should follow SMART criteria."""
|
||||
# S - Specific
|
||||
assert sample_objective.specific is not None
|
||||
# M - Measurable
|
||||
assert sample_objective.measurable is not None
|
||||
# A - Achievable
|
||||
assert sample_objective.achievable is not None
|
||||
# R - Relevant
|
||||
assert sample_objective.relevant is not None
|
||||
# T - Time-bound
|
||||
assert sample_objective.time_bound is not None
|
||||
|
||||
def test_objective_has_kpi(self, sample_objective):
|
||||
"""Objectives should have measurable KPIs."""
|
||||
assert sample_objective.kpi_name is not None
|
||||
assert sample_objective.kpi_target is not None
|
||||
assert sample_objective.kpi_unit is not None
|
||||
|
||||
def test_objective_progress_calculation(self, sample_objective):
|
||||
"""Objective progress should be calculable."""
|
||||
if sample_objective.kpi_target and sample_objective.kpi_current:
|
||||
# Progress towards reducing incidents (lower is better for this KPI)
|
||||
expected_progress = max(0, min(100,
|
||||
(sample_objective.kpi_target / sample_objective.kpi_current) * 100
|
||||
))
|
||||
assert expected_progress >= 0
|
||||
assert expected_progress <= 100
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Statement of Applicability (SoA)
|
||||
# ============================================================================
|
||||
|
||||
class TestStatementOfApplicability:
|
||||
"""Tests for SoA endpoints."""
|
||||
|
||||
def test_soa_entry_has_annex_a_reference(self, sample_soa_entry):
|
||||
"""SoA entry should reference Annex A control."""
|
||||
assert sample_soa_entry.annex_a_control is not None
|
||||
assert sample_soa_entry.annex_a_control.startswith("A.")
|
||||
|
||||
def test_soa_entry_requires_justification_for_not_applicable(self):
|
||||
"""Non-applicable controls must have justification."""
|
||||
soa_entry = StatementOfApplicabilityDB(
|
||||
id=str(uuid4()),
|
||||
annex_a_control="A.7.2",
|
||||
annex_a_title="Physical entry",
|
||||
annex_a_category="physical",
|
||||
is_applicable=False,
|
||||
applicability_justification="Cloud-only infrastructure, no physical data center",
|
||||
)
|
||||
assert not soa_entry.is_applicable
|
||||
assert soa_entry.applicability_justification is not None
|
||||
|
||||
def test_soa_entry_tracks_implementation_status(self, sample_soa_entry):
|
||||
"""SoA should track implementation status."""
|
||||
valid_statuses = ["planned", "in_progress", "implemented", "not_implemented"]
|
||||
assert sample_soa_entry.implementation_status in valid_statuses
|
||||
|
||||
def test_soa_entry_maps_to_breakpilot_controls(self, sample_soa_entry):
|
||||
"""SoA should map Annex A controls to Breakpilot controls."""
|
||||
assert isinstance(sample_soa_entry.breakpilot_control_ids, list)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Audit Findings
|
||||
# ============================================================================
|
||||
|
||||
class TestAuditFinding:
|
||||
"""Tests for Audit Finding endpoints."""
|
||||
|
||||
def test_finding_has_classification(self, sample_finding):
|
||||
"""Finding should have Major/Minor/OFI classification."""
|
||||
valid_types = [FindingTypeEnum.MAJOR, FindingTypeEnum.MINOR,
|
||||
FindingTypeEnum.OFI, FindingTypeEnum.POSITIVE]
|
||||
assert sample_finding.finding_type in valid_types
|
||||
|
||||
def test_major_finding_blocks_certification(self, sample_major_finding):
|
||||
"""Major findings should be identified as certification blocking."""
|
||||
assert sample_major_finding.finding_type == FindingTypeEnum.MAJOR
|
||||
# is_blocking is a property, so we check the type
|
||||
is_blocking = (sample_major_finding.finding_type == FindingTypeEnum.MAJOR and
|
||||
sample_major_finding.status != FindingStatusEnum.CLOSED)
|
||||
assert is_blocking == True
|
||||
|
||||
def test_finding_has_objective_evidence(self, sample_finding):
|
||||
"""Findings should have objective evidence."""
|
||||
assert sample_finding.objective_evidence is not None
|
||||
|
||||
def test_finding_has_due_date(self, sample_finding):
|
||||
"""Findings should have a due date for closure."""
|
||||
assert sample_finding.due_date is not None
|
||||
|
||||
def test_finding_lifecycle_statuses(self):
|
||||
"""Finding should follow proper lifecycle."""
|
||||
valid_statuses = [
|
||||
FindingStatusEnum.OPEN,
|
||||
FindingStatusEnum.CORRECTIVE_ACTION_PENDING,
|
||||
FindingStatusEnum.VERIFICATION_PENDING,
|
||||
FindingStatusEnum.CLOSED,
|
||||
]
|
||||
for status in valid_statuses:
|
||||
assert status in FindingStatusEnum
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Corrective Actions (CAPA)
|
||||
# ============================================================================
|
||||
|
||||
class TestCorrectiveAction:
|
||||
"""Tests for CAPA endpoints."""
|
||||
|
||||
def test_capa_links_to_finding(self, sample_capa, sample_finding):
|
||||
"""CAPA should link to a finding."""
|
||||
assert sample_capa.finding_id == sample_finding.id
|
||||
|
||||
def test_capa_has_type(self, sample_capa):
|
||||
"""CAPA should have corrective or preventive type."""
|
||||
valid_types = [CAPATypeEnum.CORRECTIVE, CAPATypeEnum.PREVENTIVE]
|
||||
assert sample_capa.capa_type in valid_types
|
||||
|
||||
def test_capa_has_effectiveness_criteria(self, sample_capa):
|
||||
"""CAPA should define how effectiveness will be verified."""
|
||||
assert sample_capa.effectiveness_criteria is not None
|
||||
|
||||
def test_capa_has_completion_date(self, sample_capa):
|
||||
"""CAPA should have planned completion date."""
|
||||
assert sample_capa.planned_completion is not None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Management Review
|
||||
# ============================================================================
|
||||
|
||||
class TestManagementReview:
|
||||
"""Tests for Management Review endpoints."""
|
||||
|
||||
def test_review_has_chairperson(self, sample_management_review):
|
||||
"""Management review must have a chairperson (top management)."""
|
||||
assert sample_management_review.chairperson is not None
|
||||
|
||||
def test_review_has_review_period(self, sample_management_review):
|
||||
"""Review should cover a specific period."""
|
||||
assert sample_management_review.review_period_start is not None
|
||||
assert sample_management_review.review_period_end is not None
|
||||
|
||||
def test_review_id_includes_quarter(self, sample_management_review):
|
||||
"""Review ID should indicate the quarter."""
|
||||
assert "Q" in sample_management_review.review_id
|
||||
|
||||
def test_review_tracks_attendees(self, sample_management_review):
|
||||
"""Review should track attendees."""
|
||||
assert sample_management_review.attendees is not None
|
||||
assert len(sample_management_review.attendees) >= 1
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Internal Audit
|
||||
# ============================================================================
|
||||
|
||||
class TestInternalAudit:
|
||||
"""Tests for Internal Audit endpoints."""
|
||||
|
||||
def test_audit_has_scope(self, sample_internal_audit):
|
||||
"""Internal audit should define scope."""
|
||||
assert sample_internal_audit.scope_description is not None
|
||||
|
||||
def test_audit_covers_iso_chapters(self, sample_internal_audit):
|
||||
"""Audit should specify which ISO chapters are covered."""
|
||||
assert sample_internal_audit.iso_chapters_covered is not None
|
||||
assert len(sample_internal_audit.iso_chapters_covered) >= 1
|
||||
|
||||
def test_audit_has_lead_auditor(self, sample_internal_audit):
|
||||
"""Audit should have a lead auditor."""
|
||||
assert sample_internal_audit.lead_auditor is not None
|
||||
|
||||
def test_audit_has_criteria(self, sample_internal_audit):
|
||||
"""Audit should define audit criteria."""
|
||||
assert sample_internal_audit.criteria is not None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: ISMS Readiness Check
|
||||
# ============================================================================
|
||||
|
||||
class TestISMSReadinessCheck:
|
||||
"""Tests for ISMS Readiness Check."""
|
||||
|
||||
def test_readiness_check_identifies_potential_majors(self):
|
||||
"""Readiness check should identify potential major findings."""
|
||||
check = ISMSReadinessCheckDB(
|
||||
id=str(uuid4()),
|
||||
check_date=datetime.utcnow(),
|
||||
triggered_by="admin@breakpilot.de",
|
||||
overall_status="not_ready",
|
||||
certification_possible=False,
|
||||
chapter_4_status="fail",
|
||||
chapter_5_status="fail",
|
||||
chapter_6_status="warning",
|
||||
chapter_7_status="pass",
|
||||
chapter_8_status="pass",
|
||||
chapter_9_status="fail",
|
||||
chapter_10_status="pass",
|
||||
potential_majors=[
|
||||
{"check": "ISMS Scope not approved", "iso_reference": "4.3"},
|
||||
{"check": "Master policy not approved", "iso_reference": "5.2"},
|
||||
{"check": "No internal audit conducted", "iso_reference": "9.2"},
|
||||
],
|
||||
potential_minors=[
|
||||
{"check": "Risk treatment incomplete", "iso_reference": "6.1.2"},
|
||||
],
|
||||
readiness_score=30.0,
|
||||
)
|
||||
|
||||
assert check.certification_possible == False
|
||||
assert len(check.potential_majors) >= 1
|
||||
assert check.readiness_score < 100
|
||||
|
||||
def test_readiness_check_shows_chapter_status(self):
|
||||
"""Readiness check should show status for each ISO chapter."""
|
||||
check = ISMSReadinessCheckDB(
|
||||
id=str(uuid4()),
|
||||
check_date=datetime.utcnow(),
|
||||
triggered_by="admin@breakpilot.de",
|
||||
overall_status="ready",
|
||||
certification_possible=True,
|
||||
chapter_4_status="pass",
|
||||
chapter_5_status="pass",
|
||||
chapter_6_status="pass",
|
||||
chapter_7_status="pass",
|
||||
chapter_8_status="pass",
|
||||
chapter_9_status="pass",
|
||||
chapter_10_status="pass",
|
||||
potential_majors=[],
|
||||
potential_minors=[],
|
||||
readiness_score=100.0,
|
||||
)
|
||||
|
||||
assert check.chapter_4_status == "pass"
|
||||
assert check.chapter_5_status == "pass"
|
||||
assert check.chapter_9_status == "pass"
|
||||
assert check.certification_possible == True
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: ISO 27001 Annex A Coverage
|
||||
# ============================================================================
|
||||
|
||||
class TestAnnexACoverage:
|
||||
"""Tests for ISO 27001 Annex A control coverage."""
|
||||
|
||||
def test_annex_a_has_93_controls(self):
|
||||
"""ISO 27001:2022 has exactly 93 controls."""
|
||||
from compliance.data.iso27001_annex_a import ISO27001_ANNEX_A_CONTROLS, ANNEX_A_SUMMARY
|
||||
|
||||
assert len(ISO27001_ANNEX_A_CONTROLS) == 93
|
||||
assert ANNEX_A_SUMMARY["total_controls"] == 93
|
||||
|
||||
def test_annex_a_categories(self):
|
||||
"""Annex A should have 4 control categories."""
|
||||
from compliance.data.iso27001_annex_a import ANNEX_A_SUMMARY
|
||||
|
||||
# A.5 Organizational (37), A.6 People (8), A.7 Physical (14), A.8 Technological (34)
|
||||
assert ANNEX_A_SUMMARY["organizational_controls"] == 37
|
||||
assert ANNEX_A_SUMMARY["people_controls"] == 8
|
||||
assert ANNEX_A_SUMMARY["physical_controls"] == 14
|
||||
assert ANNEX_A_SUMMARY["technological_controls"] == 34
|
||||
|
||||
def test_annex_a_control_structure(self):
|
||||
"""Each Annex A control should have required fields."""
|
||||
from compliance.data.iso27001_annex_a import ISO27001_ANNEX_A_CONTROLS
|
||||
|
||||
for control in ISO27001_ANNEX_A_CONTROLS:
|
||||
assert "control_id" in control
|
||||
assert "title" in control
|
||||
assert "category" in control
|
||||
assert "description" in control
|
||||
assert control["control_id"].startswith("A.")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Audit Trail
|
||||
# ============================================================================
|
||||
|
||||
class TestAuditTrail:
|
||||
"""Tests for Audit Trail functionality."""
|
||||
|
||||
def test_audit_trail_entry_has_required_fields(self):
|
||||
"""Audit trail entry should have all required fields."""
|
||||
entry = AuditTrailDB(
|
||||
id=str(uuid4()),
|
||||
entity_type="isms_scope",
|
||||
entity_id=str(uuid4()),
|
||||
entity_name="ISMS Scope v1.0",
|
||||
action="approve",
|
||||
performed_by="ceo@breakpilot.de",
|
||||
performed_at=datetime.utcnow(),
|
||||
checksum="sha256_hash",
|
||||
)
|
||||
|
||||
assert entry.entity_type is not None
|
||||
assert entry.entity_id is not None
|
||||
assert entry.action is not None
|
||||
assert entry.performed_by is not None
|
||||
assert entry.performed_at is not None
|
||||
assert entry.checksum is not None
|
||||
|
||||
def test_audit_trail_tracks_changes(self):
|
||||
"""Audit trail should track field changes."""
|
||||
entry = AuditTrailDB(
|
||||
id=str(uuid4()),
|
||||
entity_type="isms_policy",
|
||||
entity_id=str(uuid4()),
|
||||
entity_name="POL-ISMS-001",
|
||||
action="update",
|
||||
field_changed="status",
|
||||
old_value="draft",
|
||||
new_value="approved",
|
||||
change_summary="Policy approved by CEO",
|
||||
performed_by="ceo@breakpilot.de",
|
||||
performed_at=datetime.utcnow(),
|
||||
checksum="sha256_hash",
|
||||
)
|
||||
|
||||
assert entry.field_changed == "status"
|
||||
assert entry.old_value == "draft"
|
||||
assert entry.new_value == "approved"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test: Certification Blockers
|
||||
# ============================================================================
|
||||
|
||||
class TestCertificationBlockers:
|
||||
"""Tests for certification blocking scenarios."""
|
||||
|
||||
def test_open_major_blocks_certification(self):
|
||||
"""Open major findings should block certification."""
|
||||
finding = AuditFindingDB(
|
||||
id=str(uuid4()),
|
||||
finding_id="FIND-2026-001",
|
||||
finding_type=FindingTypeEnum.MAJOR,
|
||||
title="Critical finding",
|
||||
description="Test",
|
||||
auditor="auditor@test.de",
|
||||
status=FindingStatusEnum.OPEN,
|
||||
)
|
||||
|
||||
is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and
|
||||
finding.status != FindingStatusEnum.CLOSED)
|
||||
assert is_blocking == True
|
||||
|
||||
def test_closed_major_allows_certification(self):
|
||||
"""Closed major findings should not block certification."""
|
||||
finding = AuditFindingDB(
|
||||
id=str(uuid4()),
|
||||
finding_id="FIND-2026-001",
|
||||
finding_type=FindingTypeEnum.MAJOR,
|
||||
title="Critical finding",
|
||||
description="Test",
|
||||
auditor="auditor@test.de",
|
||||
status=FindingStatusEnum.CLOSED,
|
||||
closed_date=date.today(),
|
||||
)
|
||||
|
||||
is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and
|
||||
finding.status != FindingStatusEnum.CLOSED)
|
||||
assert is_blocking == False
|
||||
|
||||
def test_minor_findings_dont_block_certification(self):
|
||||
"""Minor findings should not block certification."""
|
||||
finding = AuditFindingDB(
|
||||
id=str(uuid4()),
|
||||
finding_id="FIND-2026-002",
|
||||
finding_type=FindingTypeEnum.MINOR,
|
||||
title="Minor finding",
|
||||
description="Test",
|
||||
auditor="auditor@test.de",
|
||||
status=FindingStatusEnum.OPEN,
|
||||
)
|
||||
|
||||
is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and
|
||||
finding.status != FindingStatusEnum.CLOSED)
|
||||
assert is_blocking == False
|
||||
Reference in New Issue
Block a user