feat: Anti-Fake-Evidence System (Phase 1-4b)
Implement full evidence integrity pipeline to prevent compliance theater: - Confidence levels (E0-E4), truth status tracking, assertion engine - Four-Eyes approval workflow, audit trail, reject endpoint - Evidence distribution dashboard, LLM audit routes - Traceability matrix (backend endpoint + Compliance Hub UI tab) - Anti-fake badges, control status machine, normative patterns - 2 migrations, 4 test suites, MkDocs documentation Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
191
backend-compliance/tests/test_anti_fake_evidence_phase3.py
Normal file
191
backend-compliance/tests/test_anti_fake_evidence_phase3.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""Tests for Anti-Fake-Evidence Phase 3: Enforcement.
|
||||
|
||||
~8 tests covering:
|
||||
- Evidence distribution endpoint (confidence counts, four-eyes pending)
|
||||
- Dashboard multi-score presence
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from unittest.mock import MagicMock, patch, PropertyMock
|
||||
from fastapi import FastAPI
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from compliance.api.dashboard_routes import router as dashboard_router
|
||||
from compliance.db.models import EvidenceConfidenceEnum, EvidenceTruthStatusEnum
|
||||
from classroom_engine.database import get_db
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# App setup with mocked DB dependency
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
app = FastAPI()
|
||||
app.include_router(dashboard_router)
|
||||
|
||||
mock_db = MagicMock()
|
||||
|
||||
|
||||
def override_get_db():
|
||||
yield mock_db
|
||||
|
||||
|
||||
app.dependency_overrides[get_db] = override_get_db
|
||||
client = TestClient(app)
|
||||
|
||||
NOW = datetime(2026, 3, 23, 14, 0, 0)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def make_evidence(confidence="E1", requires_four_eyes=False, approval_status="none"):
|
||||
e = MagicMock()
|
||||
e.confidence_level = MagicMock()
|
||||
e.confidence_level.value = confidence
|
||||
e.requires_four_eyes = requires_four_eyes
|
||||
e.approval_status = approval_status
|
||||
return e
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# 1. TestEvidenceDistributionEndpoint
|
||||
# ===========================================================================
|
||||
|
||||
class TestEvidenceDistributionEndpoint:
|
||||
"""Test GET /dashboard/evidence-distribution endpoint."""
|
||||
|
||||
def _setup_evidence(self, evidence_list):
|
||||
"""Configure mock DB to return evidence list via EvidenceRepository."""
|
||||
mock_db.reset_mock()
|
||||
# EvidenceRepository(db).get_all() internally does db.query(...).all()
|
||||
# We patch the EvidenceRepository class to return our list
|
||||
return evidence_list
|
||||
|
||||
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||
def test_empty_db_returns_zero_counts(self, mock_repo_cls):
|
||||
mock_repo = MagicMock()
|
||||
mock_repo.get_all.return_value = []
|
||||
mock_repo_cls.return_value = mock_repo
|
||||
|
||||
resp = client.get("/dashboard/evidence-distribution")
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert data["total"] == 0
|
||||
assert data["four_eyes_pending"] == 0
|
||||
assert data["by_confidence"] == {"E0": 0, "E1": 0, "E2": 0, "E3": 0, "E4": 0}
|
||||
|
||||
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||
def test_counts_by_confidence_level(self, mock_repo_cls):
|
||||
evidence = [
|
||||
make_evidence("E0"),
|
||||
make_evidence("E1"),
|
||||
make_evidence("E1"),
|
||||
make_evidence("E2"),
|
||||
make_evidence("E3"),
|
||||
make_evidence("E3"),
|
||||
make_evidence("E3"),
|
||||
make_evidence("E4"),
|
||||
]
|
||||
mock_repo = MagicMock()
|
||||
mock_repo.get_all.return_value = evidence
|
||||
mock_repo_cls.return_value = mock_repo
|
||||
|
||||
resp = client.get("/dashboard/evidence-distribution")
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert data["total"] == 8
|
||||
assert data["by_confidence"]["E0"] == 1
|
||||
assert data["by_confidence"]["E1"] == 2
|
||||
assert data["by_confidence"]["E2"] == 1
|
||||
assert data["by_confidence"]["E3"] == 3
|
||||
assert data["by_confidence"]["E4"] == 1
|
||||
|
||||
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||
def test_four_eyes_pending_count(self, mock_repo_cls):
|
||||
evidence = [
|
||||
make_evidence("E1", requires_four_eyes=True, approval_status="pending_first"),
|
||||
make_evidence("E2", requires_four_eyes=True, approval_status="first_approved"),
|
||||
make_evidence("E2", requires_four_eyes=True, approval_status="approved"),
|
||||
make_evidence("E1", requires_four_eyes=True, approval_status="rejected"),
|
||||
make_evidence("E1", requires_four_eyes=False, approval_status="none"),
|
||||
]
|
||||
mock_repo = MagicMock()
|
||||
mock_repo.get_all.return_value = evidence
|
||||
mock_repo_cls.return_value = mock_repo
|
||||
|
||||
resp = client.get("/dashboard/evidence-distribution")
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
# pending_first and first_approved are pending; approved and rejected are not
|
||||
assert data["four_eyes_pending"] == 2
|
||||
assert data["total"] == 5
|
||||
|
||||
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||
def test_null_confidence_defaults_to_e1(self, mock_repo_cls):
|
||||
e = MagicMock()
|
||||
e.confidence_level = None
|
||||
e.requires_four_eyes = False
|
||||
e.approval_status = "none"
|
||||
|
||||
mock_repo = MagicMock()
|
||||
mock_repo.get_all.return_value = [e]
|
||||
mock_repo_cls.return_value = mock_repo
|
||||
|
||||
resp = client.get("/dashboard/evidence-distribution")
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert data["by_confidence"]["E1"] == 1
|
||||
assert data["total"] == 1
|
||||
|
||||
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||
def test_all_four_eyes_approved_zero_pending(self, mock_repo_cls):
|
||||
evidence = [
|
||||
make_evidence("E2", requires_four_eyes=True, approval_status="approved"),
|
||||
make_evidence("E3", requires_four_eyes=True, approval_status="approved"),
|
||||
]
|
||||
mock_repo = MagicMock()
|
||||
mock_repo.get_all.return_value = evidence
|
||||
mock_repo_cls.return_value = mock_repo
|
||||
|
||||
resp = client.get("/dashboard/evidence-distribution")
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert data["four_eyes_pending"] == 0
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# 2. TestDashboardMultiScore
|
||||
# ===========================================================================
|
||||
|
||||
class TestDashboardMultiScore:
|
||||
"""Test that dashboard response includes multi_score."""
|
||||
|
||||
def test_dashboard_response_schema_includes_multi_score(self):
|
||||
"""DashboardResponse schema must include the multi_score field."""
|
||||
from compliance.api.schemas import DashboardResponse
|
||||
fields = DashboardResponse.model_fields
|
||||
assert "multi_score" in fields, "DashboardResponse must have multi_score field"
|
||||
|
||||
def test_multi_score_schema_has_required_fields(self):
|
||||
"""MultiDimensionalScore schema should have all 7 fields."""
|
||||
from compliance.api.schemas import MultiDimensionalScore
|
||||
fields = MultiDimensionalScore.model_fields
|
||||
required = [
|
||||
"requirement_coverage",
|
||||
"evidence_strength",
|
||||
"validation_quality",
|
||||
"evidence_freshness",
|
||||
"control_effectiveness",
|
||||
"overall_readiness",
|
||||
"hard_blocks",
|
||||
]
|
||||
for field in required:
|
||||
assert field in fields, f"Missing field: {field}"
|
||||
|
||||
def test_multi_score_default_values(self):
|
||||
"""MultiDimensionalScore defaults should be sensible."""
|
||||
from compliance.api.schemas import MultiDimensionalScore
|
||||
score = MultiDimensionalScore()
|
||||
assert score.overall_readiness == 0.0
|
||||
assert score.hard_blocks == []
|
||||
assert score.requirement_coverage == 0.0
|
||||
Reference in New Issue
Block a user