feat: Anti-Fake-Evidence System (Phase 1-4b)
Implement full evidence integrity pipeline to prevent compliance theater: - Confidence levels (E0-E4), truth status tracking, assertion engine - Four-Eyes approval workflow, audit trail, reject endpoint - Evidence distribution dashboard, LLM audit routes - Traceability matrix (backend endpoint + Compliance Hub UI tab) - Anti-fake badges, control status machine, normative patterns - 2 migrations, 4 test suites, MkDocs documentation Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -65,6 +65,7 @@ class ControlStatusEnum(str, enum.Enum):
|
||||
FAIL = "fail" # Not passing
|
||||
NOT_APPLICABLE = "n/a" # Not applicable
|
||||
PLANNED = "planned" # Planned for implementation
|
||||
IN_PROGRESS = "in_progress" # Implementation in progress
|
||||
|
||||
|
||||
class RiskLevelEnum(str, enum.Enum):
|
||||
@@ -83,6 +84,26 @@ class EvidenceStatusEnum(str, enum.Enum):
|
||||
FAILED = "failed" # Failed validation
|
||||
|
||||
|
||||
class EvidenceConfidenceEnum(str, enum.Enum):
|
||||
"""Confidence level of evidence (Anti-Fake-Evidence)."""
|
||||
E0 = "E0" # Generated / no real evidence (LLM output, placeholder)
|
||||
E1 = "E1" # Uploaded but unreviewed (manual upload, no hash, no reviewer)
|
||||
E2 = "E2" # Reviewed internally (human reviewed, hash verified)
|
||||
E3 = "E3" # Observed by system (CI/CD pipeline, API with hash)
|
||||
E4 = "E4" # Validated by external auditor
|
||||
|
||||
|
||||
class EvidenceTruthStatusEnum(str, enum.Enum):
|
||||
"""Truth status lifecycle for evidence (Anti-Fake-Evidence)."""
|
||||
GENERATED = "generated"
|
||||
UPLOADED = "uploaded"
|
||||
OBSERVED = "observed"
|
||||
VALIDATED_INTERNAL = "validated_internal"
|
||||
REJECTED = "rejected"
|
||||
PROVIDED_TO_AUDITOR = "provided_to_auditor"
|
||||
ACCEPTED_BY_AUDITOR = "accepted_by_auditor"
|
||||
|
||||
|
||||
class ExportStatusEnum(str, enum.Enum):
|
||||
"""Status of audit export."""
|
||||
PENDING = "pending"
|
||||
@@ -239,6 +260,7 @@ class ControlDB(Base):
|
||||
# Status
|
||||
status = Column(Enum(ControlStatusEnum), default=ControlStatusEnum.PLANNED)
|
||||
status_notes = Column(Text)
|
||||
status_justification = Column(Text) # Required for n/a transitions
|
||||
|
||||
# Ownership & Review
|
||||
owner = Column(String(100)) # Responsible person/team
|
||||
@@ -321,6 +343,22 @@ class EvidenceDB(Base):
|
||||
ci_job_id = Column(String(100)) # CI/CD job reference
|
||||
uploaded_by = Column(String(100)) # User who uploaded
|
||||
|
||||
# Anti-Fake-Evidence: Confidence & Truth tracking
|
||||
confidence_level = Column(Enum(EvidenceConfidenceEnum), default=EvidenceConfidenceEnum.E1)
|
||||
truth_status = Column(Enum(EvidenceTruthStatusEnum), default=EvidenceTruthStatusEnum.UPLOADED)
|
||||
generation_mode = Column(String(100)) # e.g. "draft_assistance", "auto_generation"
|
||||
may_be_used_as_evidence = Column(Boolean, default=True)
|
||||
reviewed_by = Column(String(200))
|
||||
reviewed_at = Column(DateTime)
|
||||
|
||||
# Anti-Fake-Evidence Phase 2: Four-Eyes review
|
||||
approval_status = Column(String(30), default="none")
|
||||
first_reviewer = Column(String(200))
|
||||
first_reviewed_at = Column(DateTime)
|
||||
second_reviewer = Column(String(200))
|
||||
second_reviewed_at = Column(DateTime)
|
||||
requires_four_eyes = Column(Boolean, default=False)
|
||||
|
||||
# Timestamps
|
||||
collected_at = Column(DateTime, default=datetime.utcnow)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
@@ -332,6 +370,7 @@ class EvidenceDB(Base):
|
||||
__table_args__ = (
|
||||
Index('ix_evidence_control_type', 'control_id', 'evidence_type'),
|
||||
Index('ix_evidence_status', 'status'),
|
||||
Index('ix_evidence_approval_status', 'approval_status'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
@@ -1464,3 +1503,77 @@ class ISMSReadinessCheckDB(Base):
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ISMSReadiness {self.check_date}: {self.overall_status}>"
|
||||
|
||||
|
||||
class LLMGenerationAuditDB(Base):
|
||||
"""
|
||||
Audit trail for LLM-generated content.
|
||||
|
||||
Every piece of content generated by an LLM is recorded here with its
|
||||
truth_status and may_be_used_as_evidence flag, ensuring transparency
|
||||
about what is real evidence vs. generated assistance.
|
||||
"""
|
||||
__tablename__ = 'compliance_llm_generation_audit'
|
||||
|
||||
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
tenant_id = Column(String(36), index=True)
|
||||
|
||||
entity_type = Column(String(50), nullable=False) # 'evidence', 'control', 'document'
|
||||
entity_id = Column(String(36)) # FK to generated entity
|
||||
generation_mode = Column(String(100), nullable=False) # 'draft_assistance', 'auto_generation'
|
||||
truth_status = Column(Enum(EvidenceTruthStatusEnum), nullable=False, default=EvidenceTruthStatusEnum.GENERATED)
|
||||
may_be_used_as_evidence = Column(Boolean, nullable=False, default=False)
|
||||
|
||||
llm_model = Column(String(100))
|
||||
llm_provider = Column(String(50)) # 'ollama', 'anthropic'
|
||||
prompt_hash = Column(String(64)) # SHA-256 of prompt
|
||||
input_summary = Column(Text)
|
||||
output_summary = Column(Text)
|
||||
extra_metadata = Column("metadata", JSON, default=dict)
|
||||
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
__table_args__ = (
|
||||
Index('ix_llm_audit_entity', 'entity_type', 'entity_id'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<LLMGenerationAudit {self.entity_type}:{self.entity_id} mode={self.generation_mode}>"
|
||||
|
||||
|
||||
class AssertionDB(Base):
|
||||
"""
|
||||
Assertion tracking — separates claims from verified facts.
|
||||
|
||||
Each sentence from a control/evidence/document is stored here with its
|
||||
classification (assertion vs. fact vs. rationale) and optional evidence linkage.
|
||||
"""
|
||||
__tablename__ = 'compliance_assertions'
|
||||
|
||||
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
tenant_id = Column(String(36), index=True)
|
||||
|
||||
entity_type = Column(String(50), nullable=False) # 'control', 'evidence', 'document', 'obligation'
|
||||
entity_id = Column(String(36), nullable=False)
|
||||
sentence_text = Column(Text, nullable=False)
|
||||
sentence_index = Column(Integer, nullable=False, default=0)
|
||||
|
||||
assertion_type = Column(String(20), nullable=False, default='assertion') # 'assertion' | 'fact' | 'rationale'
|
||||
evidence_ids = Column(JSON, default=list)
|
||||
confidence = Column(Float, default=0.0)
|
||||
normative_tier = Column(String(20)) # 'pflicht' | 'empfehlung' | 'kann'
|
||||
|
||||
verified_by = Column(String(200))
|
||||
verified_at = Column(DateTime)
|
||||
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
__table_args__ = (
|
||||
Index('ix_assertion_entity', 'entity_type', 'entity_id'),
|
||||
Index('ix_assertion_type', 'assertion_type'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Assertion {self.assertion_type}: {self.sentence_text[:50]}>"
|
||||
|
||||
Reference in New Issue
Block a user