Compare commits
3 Commits
1a63f5857b
...
2efc738803
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2efc738803 | ||
|
|
e6201d5239 | ||
|
|
48ca0a6bef |
@@ -591,12 +591,43 @@ async function handleV2Draft(body: Record<string, unknown>): Promise<NextRespons
|
|||||||
cacheStats: proseCache.getStats(),
|
cacheStats: proseCache.getStats(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Anti-Fake-Evidence: Truth label for all LLM-generated content
|
||||||
|
const truthLabel = {
|
||||||
|
generation_mode: 'draft_assistance',
|
||||||
|
truth_status: 'generated',
|
||||||
|
may_be_used_as_evidence: false,
|
||||||
|
generated_by: 'system',
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire-and-forget: persist LLM audit trail to backend
|
||||||
|
try {
|
||||||
|
const BACKEND_URL = process.env.BACKEND_COMPLIANCE_URL || 'http://backend-compliance:8002'
|
||||||
|
fetch(`${BACKEND_URL}/api/compliance/llm-audit`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
entity_type: 'document',
|
||||||
|
entity_id: null,
|
||||||
|
generation_mode: 'draft_assistance',
|
||||||
|
truth_status: 'generated',
|
||||||
|
may_be_used_as_evidence: false,
|
||||||
|
llm_model: LLM_MODEL,
|
||||||
|
llm_provider: 'ollama',
|
||||||
|
input_summary: `${documentType} draft generation`,
|
||||||
|
output_summary: draft?.sections?.length ? `${draft.sections.length} sections generated` : 'draft generated',
|
||||||
|
}),
|
||||||
|
}).catch(() => {/* fire-and-forget */})
|
||||||
|
} catch {
|
||||||
|
// LLM audit persistence failure should not block the response
|
||||||
|
}
|
||||||
|
|
||||||
return NextResponse.json({
|
return NextResponse.json({
|
||||||
draft,
|
draft,
|
||||||
constraintCheck,
|
constraintCheck,
|
||||||
tokensUsed: Math.round(totalTokens),
|
tokensUsed: Math.round(totalTokens),
|
||||||
pipelineVersion: 'v2',
|
pipelineVersion: 'v2',
|
||||||
auditTrail,
|
auditTrail,
|
||||||
|
truthLabel,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,76 @@ import { buildCrossCheckPrompt } from '@/lib/sdk/drafting-engine/prompts/validat
|
|||||||
const OLLAMA_URL = process.env.OLLAMA_URL || 'http://host.docker.internal:11434'
|
const OLLAMA_URL = process.env.OLLAMA_URL || 'http://host.docker.internal:11434'
|
||||||
const LLM_MODEL = process.env.COMPLIANCE_LLM_MODEL || 'qwen2.5vl:32b'
|
const LLM_MODEL = process.env.COMPLIANCE_LLM_MODEL || 'qwen2.5vl:32b'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Anti-Fake-Evidence: Verbotene Formulierungen
|
||||||
|
*
|
||||||
|
* Flags formulations that falsely claim compliance without evidence.
|
||||||
|
* Only allowed when: control_status=pass AND confidence >= E2 AND
|
||||||
|
* truth_status in (validated_internal, accepted_by_auditor).
|
||||||
|
*/
|
||||||
|
interface EvidenceContext {
|
||||||
|
controlStatus?: string
|
||||||
|
confidenceLevel?: string
|
||||||
|
truthStatus?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
const FORBIDDEN_PATTERNS: Array<{
|
||||||
|
pattern: RegExp
|
||||||
|
label: string
|
||||||
|
safeAlternative: string
|
||||||
|
}> = [
|
||||||
|
{ pattern: /ist\s+compliant/gi, label: 'ist compliant', safeAlternative: 'soll compliant sein' },
|
||||||
|
{ pattern: /erfüllt\s+vollständig/gi, label: 'erfüllt vollständig', safeAlternative: 'soll vollständig erfüllt werden' },
|
||||||
|
{ pattern: /wurde\s+geprüft/gi, label: 'wurde geprüft', safeAlternative: 'soll geprüft werden' },
|
||||||
|
{ pattern: /wurde\s+umgesetzt/gi, label: 'wurde umgesetzt', safeAlternative: 'ist zur Umsetzung vorgesehen' },
|
||||||
|
{ pattern: /ist\s+auditiert/gi, label: 'ist auditiert', safeAlternative: 'soll auditiert werden' },
|
||||||
|
{ pattern: /vollständig\s+implementiert/gi, label: 'vollständig implementiert', safeAlternative: 'Implementierung ist vorgesehen' },
|
||||||
|
{ pattern: /nachweislich\s+konform/gi, label: 'nachweislich konform', safeAlternative: 'Konformität ist nachzuweisen' },
|
||||||
|
]
|
||||||
|
|
||||||
|
const CONFIDENCE_ORDER: Record<string, number> = { E0: 0, E1: 1, E2: 2, E3: 3, E4: 4 }
|
||||||
|
const VALID_TRUTH_STATUSES = new Set(['validated_internal', 'accepted_by_auditor'])
|
||||||
|
|
||||||
|
function checkForbiddenFormulations(
|
||||||
|
content: string,
|
||||||
|
evidenceContext?: EvidenceContext,
|
||||||
|
): ValidationFinding[] {
|
||||||
|
const findings: ValidationFinding[] = []
|
||||||
|
|
||||||
|
if (!content) return findings
|
||||||
|
|
||||||
|
// If evidence context shows sufficient proof, allow the formulations
|
||||||
|
if (evidenceContext) {
|
||||||
|
const { controlStatus, confidenceLevel, truthStatus } = evidenceContext
|
||||||
|
const confLevel = CONFIDENCE_ORDER[confidenceLevel ?? 'E0'] ?? 0
|
||||||
|
if (
|
||||||
|
controlStatus === 'pass' &&
|
||||||
|
confLevel >= CONFIDENCE_ORDER.E2 &&
|
||||||
|
VALID_TRUTH_STATUSES.has(truthStatus ?? '')
|
||||||
|
) {
|
||||||
|
return findings // Formulations are backed by real evidence
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const { pattern, label, safeAlternative } of FORBIDDEN_PATTERNS) {
|
||||||
|
// Reset regex state for global patterns
|
||||||
|
pattern.lastIndex = 0
|
||||||
|
if (pattern.test(content)) {
|
||||||
|
findings.push({
|
||||||
|
id: `AFE-FORBIDDEN-${label.replace(/\s+/g, '_').toUpperCase()}`,
|
||||||
|
severity: 'error',
|
||||||
|
category: 'forbidden_formulation' as ValidationFinding['category'],
|
||||||
|
title: `Verbotene Formulierung: "${label}"`,
|
||||||
|
description: `Die Formulierung "${label}" impliziert eine nachgewiesene Compliance, die ohne ausreichenden Nachweis (Evidence >= E2, validiert) nicht verwendet werden darf.`,
|
||||||
|
documentType: 'vvt' as ScopeDocumentType,
|
||||||
|
suggestion: `Verwende stattdessen: "${safeAlternative}"`,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return findings
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stufe 1: Deterministische Pruefung
|
* Stufe 1: Deterministische Pruefung
|
||||||
*/
|
*/
|
||||||
@@ -221,10 +291,18 @@ export async function POST(request: NextRequest) {
|
|||||||
// LLM unavailable, continue with deterministic results only
|
// LLM unavailable, continue with deterministic results only
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------
|
||||||
|
// Stufe 1b: Verbotene Formulierungen (Anti-Fake-Evidence)
|
||||||
|
// ---------------------------------------------------------------
|
||||||
|
const forbiddenFindings = checkForbiddenFormulations(
|
||||||
|
draftContent || '',
|
||||||
|
validationContext.evidenceContext,
|
||||||
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
// Combine results
|
// Combine results
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
const allFindings = [...deterministicFindings, ...llmFindings]
|
const allFindings = [...deterministicFindings, ...forbiddenFindings, ...llmFindings]
|
||||||
const errors = allFindings.filter(f => f.severity === 'error')
|
const errors = allFindings.filter(f => f.severity === 'error')
|
||||||
const warnings = allFindings.filter(f => f.severity === 'warning')
|
const warnings = allFindings.filter(f => f.severity === 'warning')
|
||||||
const suggestions = allFindings.filter(f => f.severity === 'suggestion')
|
const suggestions = allFindings.filter(f => f.severity === 'suggestion')
|
||||||
|
|||||||
468
admin-compliance/app/sdk/assertions/page.tsx
Normal file
468
admin-compliance/app/sdk/assertions/page.tsx
Normal file
@@ -0,0 +1,468 @@
|
|||||||
|
'use client'
|
||||||
|
|
||||||
|
import React, { useState, useEffect } from 'react'
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// TYPES
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
interface Assertion {
|
||||||
|
id: string
|
||||||
|
tenant_id: string | null
|
||||||
|
entity_type: string
|
||||||
|
entity_id: string
|
||||||
|
sentence_text: string
|
||||||
|
sentence_index: number
|
||||||
|
assertion_type: string // 'assertion' | 'fact' | 'rationale'
|
||||||
|
evidence_ids: string[]
|
||||||
|
confidence: number
|
||||||
|
normative_tier: string | null // 'pflicht' | 'empfehlung' | 'kann'
|
||||||
|
verified_by: string | null
|
||||||
|
verified_at: string | null
|
||||||
|
created_at: string | null
|
||||||
|
updated_at: string | null
|
||||||
|
}
|
||||||
|
|
||||||
|
interface AssertionSummary {
|
||||||
|
total_assertions: number
|
||||||
|
total_facts: number
|
||||||
|
total_rationale: number
|
||||||
|
unverified_count: number
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// CONSTANTS
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
const TIER_COLORS: Record<string, string> = {
|
||||||
|
pflicht: 'bg-red-100 text-red-700',
|
||||||
|
empfehlung: 'bg-yellow-100 text-yellow-700',
|
||||||
|
kann: 'bg-blue-100 text-blue-700',
|
||||||
|
}
|
||||||
|
|
||||||
|
const TIER_LABELS: Record<string, string> = {
|
||||||
|
pflicht: 'Pflicht',
|
||||||
|
empfehlung: 'Empfehlung',
|
||||||
|
kann: 'Kann',
|
||||||
|
}
|
||||||
|
|
||||||
|
const TYPE_COLORS: Record<string, string> = {
|
||||||
|
assertion: 'bg-orange-100 text-orange-700',
|
||||||
|
fact: 'bg-green-100 text-green-700',
|
||||||
|
rationale: 'bg-purple-100 text-purple-700',
|
||||||
|
}
|
||||||
|
|
||||||
|
const TYPE_LABELS: Record<string, string> = {
|
||||||
|
assertion: 'Behauptung',
|
||||||
|
fact: 'Fakt',
|
||||||
|
rationale: 'Begruendung',
|
||||||
|
}
|
||||||
|
|
||||||
|
const API_BASE = '/api/sdk/v1/compliance'
|
||||||
|
|
||||||
|
type TabKey = 'overview' | 'list' | 'extract'
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// ASSERTION CARD
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
function AssertionCard({
|
||||||
|
assertion,
|
||||||
|
onVerify,
|
||||||
|
}: {
|
||||||
|
assertion: Assertion
|
||||||
|
onVerify: (id: string) => void
|
||||||
|
}) {
|
||||||
|
const tierColor = assertion.normative_tier ? TIER_COLORS[assertion.normative_tier] || 'bg-gray-100 text-gray-600' : 'bg-gray-100 text-gray-600'
|
||||||
|
const tierLabel = assertion.normative_tier ? TIER_LABELS[assertion.normative_tier] || assertion.normative_tier : '—'
|
||||||
|
const typeColor = TYPE_COLORS[assertion.assertion_type] || 'bg-gray-100 text-gray-600'
|
||||||
|
const typeLabel = TYPE_LABELS[assertion.assertion_type] || assertion.assertion_type
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="bg-white rounded-xl border border-gray-200 p-5">
|
||||||
|
<div className="flex items-start justify-between gap-3">
|
||||||
|
<div className="flex-1">
|
||||||
|
<div className="flex items-center gap-2 mb-2">
|
||||||
|
<span className={`px-2 py-0.5 text-xs rounded font-medium ${tierColor}`}>
|
||||||
|
{tierLabel}
|
||||||
|
</span>
|
||||||
|
<span className={`px-2 py-0.5 text-xs rounded ${typeColor}`}>
|
||||||
|
{typeLabel}
|
||||||
|
</span>
|
||||||
|
{assertion.entity_type && (
|
||||||
|
<span className="px-2 py-0.5 text-xs bg-gray-100 text-gray-500 rounded">
|
||||||
|
{assertion.entity_type}: {assertion.entity_id?.slice(0, 8) || '—'}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{assertion.confidence > 0 && (
|
||||||
|
<span className="text-xs text-gray-400">
|
||||||
|
Konfidenz: {(assertion.confidence * 100).toFixed(0)}%
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
<p className="text-sm text-gray-900 leading-relaxed">
|
||||||
|
“{assertion.sentence_text}”
|
||||||
|
</p>
|
||||||
|
<div className="mt-2 flex items-center gap-3 text-xs text-gray-400">
|
||||||
|
{assertion.verified_by && (
|
||||||
|
<span className="text-green-600">
|
||||||
|
Verifiziert von {assertion.verified_by} am {assertion.verified_at ? new Date(assertion.verified_at).toLocaleDateString('de-DE') : '—'}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{assertion.evidence_ids.length > 0 && (
|
||||||
|
<span>
|
||||||
|
{assertion.evidence_ids.length} Evidence verknuepft
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div className="flex flex-col gap-1">
|
||||||
|
{assertion.assertion_type !== 'fact' && (
|
||||||
|
<button
|
||||||
|
onClick={() => onVerify(assertion.id)}
|
||||||
|
className="px-3 py-1.5 text-xs bg-green-600 text-white rounded-lg hover:bg-green-700 transition-colors whitespace-nowrap"
|
||||||
|
>
|
||||||
|
Als Fakt pruefen
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// MAIN PAGE
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
export default function AssertionsPage() {
|
||||||
|
const [activeTab, setActiveTab] = useState<TabKey>('overview')
|
||||||
|
const [summary, setSummary] = useState<AssertionSummary | null>(null)
|
||||||
|
const [assertions, setAssertions] = useState<Assertion[]>([])
|
||||||
|
const [loading, setLoading] = useState(true)
|
||||||
|
const [error, setError] = useState<string | null>(null)
|
||||||
|
|
||||||
|
// Filters
|
||||||
|
const [filterEntityType, setFilterEntityType] = useState('')
|
||||||
|
const [filterAssertionType, setFilterAssertionType] = useState('')
|
||||||
|
|
||||||
|
// Extract tab
|
||||||
|
const [extractText, setExtractText] = useState('')
|
||||||
|
const [extractEntityType, setExtractEntityType] = useState('control')
|
||||||
|
const [extractEntityId, setExtractEntityId] = useState('')
|
||||||
|
const [extracting, setExtracting] = useState(false)
|
||||||
|
const [extractedAssertions, setExtractedAssertions] = useState<Assertion[]>([])
|
||||||
|
|
||||||
|
// Verify dialog
|
||||||
|
const [verifyingId, setVerifyingId] = useState<string | null>(null)
|
||||||
|
const [verifyEmail, setVerifyEmail] = useState('')
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
loadSummary()
|
||||||
|
}, [])
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (activeTab === 'list') loadAssertions()
|
||||||
|
}, [activeTab, filterEntityType, filterAssertionType]) // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const loadSummary = async () => {
|
||||||
|
try {
|
||||||
|
const res = await fetch(`${API_BASE}/assertions/summary`)
|
||||||
|
if (res.ok) setSummary(await res.json())
|
||||||
|
} catch { /* silent */ }
|
||||||
|
finally { setLoading(false) }
|
||||||
|
}
|
||||||
|
|
||||||
|
const loadAssertions = async () => {
|
||||||
|
setLoading(true)
|
||||||
|
try {
|
||||||
|
const params = new URLSearchParams()
|
||||||
|
if (filterEntityType) params.set('entity_type', filterEntityType)
|
||||||
|
if (filterAssertionType) params.set('assertion_type', filterAssertionType)
|
||||||
|
params.set('limit', '200')
|
||||||
|
|
||||||
|
const res = await fetch(`${API_BASE}/assertions?${params}`)
|
||||||
|
if (res.ok) {
|
||||||
|
const data = await res.json()
|
||||||
|
setAssertions(data.assertions || [])
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
setError('Assertions konnten nicht geladen werden')
|
||||||
|
} finally {
|
||||||
|
setLoading(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleExtract = async () => {
|
||||||
|
if (!extractText.trim()) { setError('Bitte Text eingeben'); return }
|
||||||
|
setExtracting(true)
|
||||||
|
setError(null)
|
||||||
|
setExtractedAssertions([])
|
||||||
|
try {
|
||||||
|
const res = await fetch(`${API_BASE}/assertions/extract`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
text: extractText,
|
||||||
|
entity_type: extractEntityType || 'control',
|
||||||
|
entity_id: extractEntityId || undefined,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
if (!res.ok) {
|
||||||
|
const err = await res.json().catch(() => ({ detail: 'Extraktion fehlgeschlagen' }))
|
||||||
|
throw new Error(typeof err.detail === 'string' ? err.detail : JSON.stringify(err.detail))
|
||||||
|
}
|
||||||
|
const data = await res.json()
|
||||||
|
setExtractedAssertions(data.assertions || [])
|
||||||
|
// Refresh summary
|
||||||
|
loadSummary()
|
||||||
|
} catch (err) {
|
||||||
|
setError(err instanceof Error ? err.message : 'Extraktion fehlgeschlagen')
|
||||||
|
} finally {
|
||||||
|
setExtracting(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleVerify = async (assertionId: string) => {
|
||||||
|
setVerifyingId(assertionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
const submitVerify = async () => {
|
||||||
|
if (!verifyingId || !verifyEmail.trim()) return
|
||||||
|
try {
|
||||||
|
const res = await fetch(`${API_BASE}/assertions/${verifyingId}/verify?verified_by=${encodeURIComponent(verifyEmail)}`, {
|
||||||
|
method: 'POST',
|
||||||
|
})
|
||||||
|
if (res.ok) {
|
||||||
|
setVerifyingId(null)
|
||||||
|
setVerifyEmail('')
|
||||||
|
loadAssertions()
|
||||||
|
loadSummary()
|
||||||
|
} else {
|
||||||
|
const err = await res.json().catch(() => ({ detail: 'Verifizierung fehlgeschlagen' }))
|
||||||
|
setError(typeof err.detail === 'string' ? err.detail : 'Verifizierung fehlgeschlagen')
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
setError('Netzwerkfehler')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const tabs: { key: TabKey; label: string }[] = [
|
||||||
|
{ key: 'overview', label: 'Uebersicht' },
|
||||||
|
{ key: 'list', label: 'Assertion-Liste' },
|
||||||
|
{ key: 'extract', label: 'Extraktion' },
|
||||||
|
]
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-6">
|
||||||
|
{/* Header */}
|
||||||
|
<div className="bg-white rounded-xl shadow-sm border p-6">
|
||||||
|
<h1 className="text-2xl font-bold text-slate-900">Assertions</h1>
|
||||||
|
<p className="text-slate-500 mt-1">
|
||||||
|
Behauptungen vs. Fakten in Compliance-Texten trennen und verifizieren.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Tabs */}
|
||||||
|
<div className="bg-white rounded-xl shadow-sm border">
|
||||||
|
<div className="flex border-b">
|
||||||
|
{tabs.map(tab => (
|
||||||
|
<button
|
||||||
|
key={tab.key}
|
||||||
|
onClick={() => setActiveTab(tab.key)}
|
||||||
|
className={`px-6 py-3 text-sm font-medium transition-colors ${
|
||||||
|
activeTab === tab.key
|
||||||
|
? 'text-purple-600 border-b-2 border-purple-600'
|
||||||
|
: 'text-slate-500 hover:text-slate-700'
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
{tab.label}
|
||||||
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Error */}
|
||||||
|
{error && (
|
||||||
|
<div className="p-4 bg-red-50 border border-red-200 rounded-lg text-red-700 flex items-center justify-between">
|
||||||
|
<span>{error}</span>
|
||||||
|
<button onClick={() => setError(null)} className="text-red-500 hover:text-red-700">×</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* ============================================================ */}
|
||||||
|
{/* TAB: Uebersicht */}
|
||||||
|
{/* ============================================================ */}
|
||||||
|
{activeTab === 'overview' && (
|
||||||
|
<>
|
||||||
|
{loading ? (
|
||||||
|
<div className="flex justify-center py-12">
|
||||||
|
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-purple-600" />
|
||||||
|
</div>
|
||||||
|
) : summary ? (
|
||||||
|
<div className="grid grid-cols-1 md:grid-cols-4 gap-4">
|
||||||
|
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||||
|
<div className="text-sm text-gray-500">Gesamt Assertions</div>
|
||||||
|
<div className="text-3xl font-bold text-gray-900">{summary.total_assertions}</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-white rounded-xl border border-green-200 p-6">
|
||||||
|
<div className="text-sm text-green-600">Verifizierte Fakten</div>
|
||||||
|
<div className="text-3xl font-bold text-green-600">{summary.total_facts}</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-white rounded-xl border border-purple-200 p-6">
|
||||||
|
<div className="text-sm text-purple-600">Begruendungen</div>
|
||||||
|
<div className="text-3xl font-bold text-purple-600">{summary.total_rationale}</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-white rounded-xl border border-orange-200 p-6">
|
||||||
|
<div className="text-sm text-orange-600">Unverifizizt</div>
|
||||||
|
<div className="text-3xl font-bold text-orange-600">{summary.unverified_count}</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div className="bg-white rounded-xl border border-gray-200 p-12 text-center">
|
||||||
|
<p className="text-gray-500">Keine Assertions vorhanden. Nutzen Sie die Extraktion, um Behauptungen aus Texten zu identifizieren.</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* ============================================================ */}
|
||||||
|
{/* TAB: Assertion-Liste */}
|
||||||
|
{/* ============================================================ */}
|
||||||
|
{activeTab === 'list' && (
|
||||||
|
<>
|
||||||
|
{/* Filters */}
|
||||||
|
<div className="flex items-center gap-4 flex-wrap">
|
||||||
|
<div>
|
||||||
|
<label className="block text-xs text-gray-500 mb-1">Entity-Typ</label>
|
||||||
|
<select value={filterEntityType} onChange={e => setFilterEntityType(e.target.value)}
|
||||||
|
className="px-3 py-1.5 border border-gray-300 rounded-lg text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent">
|
||||||
|
<option value="">Alle</option>
|
||||||
|
<option value="control">Control</option>
|
||||||
|
<option value="evidence">Evidence</option>
|
||||||
|
<option value="requirement">Requirement</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label className="block text-xs text-gray-500 mb-1">Assertion-Typ</label>
|
||||||
|
<select value={filterAssertionType} onChange={e => setFilterAssertionType(e.target.value)}
|
||||||
|
className="px-3 py-1.5 border border-gray-300 rounded-lg text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent">
|
||||||
|
<option value="">Alle</option>
|
||||||
|
<option value="assertion">Behauptung</option>
|
||||||
|
<option value="fact">Fakt</option>
|
||||||
|
<option value="rationale">Begruendung</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{loading ? (
|
||||||
|
<div className="flex justify-center py-12">
|
||||||
|
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-purple-600" />
|
||||||
|
</div>
|
||||||
|
) : assertions.length === 0 ? (
|
||||||
|
<div className="bg-white rounded-xl border border-gray-200 p-12 text-center">
|
||||||
|
<p className="text-gray-500">Keine Assertions gefunden.</p>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div className="space-y-3">
|
||||||
|
<p className="text-sm text-gray-500">{assertions.length} Assertions</p>
|
||||||
|
{assertions.map(a => (
|
||||||
|
<AssertionCard key={a.id} assertion={a} onVerify={handleVerify} />
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* ============================================================ */}
|
||||||
|
{/* TAB: Extraktion */}
|
||||||
|
{/* ============================================================ */}
|
||||||
|
{activeTab === 'extract' && (
|
||||||
|
<div className="bg-white rounded-xl shadow-sm border p-6">
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900 mb-4">Assertions aus Text extrahieren</h3>
|
||||||
|
<p className="text-sm text-gray-500 mb-4">
|
||||||
|
Geben Sie einen Compliance-Text ein. Das System identifiziert automatisch Behauptungen, Fakten und Begruendungen.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<div className="grid grid-cols-2 gap-4 mb-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Entity-Typ</label>
|
||||||
|
<select value={extractEntityType} onChange={e => setExtractEntityType(e.target.value)}
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent">
|
||||||
|
<option value="control">Control</option>
|
||||||
|
<option value="evidence">Evidence</option>
|
||||||
|
<option value="requirement">Requirement</option>
|
||||||
|
<option value="policy">Policy</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Entity-ID (optional)</label>
|
||||||
|
<input type="text" value={extractEntityId} onChange={e => setExtractEntityId(e.target.value)}
|
||||||
|
placeholder="z.B. GOV-001 oder UUID"
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent" />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="mb-4">
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Text</label>
|
||||||
|
<textarea
|
||||||
|
value={extractText}
|
||||||
|
onChange={e => setExtractText(e.target.value)}
|
||||||
|
placeholder="Die Organisation muss ein ISMS gemaess ISO 27001 implementieren. Es sollte regelmaessig ein internes Audit durchgefuehrt werden. Optional kann ein externer Auditor hinzugezogen werden."
|
||||||
|
rows={6}
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent resize-none"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<button
|
||||||
|
onClick={handleExtract}
|
||||||
|
disabled={extracting || !extractText.trim()}
|
||||||
|
className={`px-5 py-2 rounded-lg font-medium transition-colors ${
|
||||||
|
extracting || !extractText.trim()
|
||||||
|
? 'bg-gray-200 text-gray-400 cursor-not-allowed'
|
||||||
|
: 'bg-purple-600 text-white hover:bg-purple-700'
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
{extracting ? 'Extrahiere...' : 'Extrahieren'}
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{/* Extracted results */}
|
||||||
|
{extractedAssertions.length > 0 && (
|
||||||
|
<div className="mt-6">
|
||||||
|
<h4 className="text-sm font-semibold text-gray-800 mb-3">{extractedAssertions.length} Assertions extrahiert:</h4>
|
||||||
|
<div className="space-y-3">
|
||||||
|
{extractedAssertions.map(a => (
|
||||||
|
<AssertionCard key={a.id} assertion={a} onVerify={handleVerify} />
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Verify Dialog */}
|
||||||
|
{verifyingId && (
|
||||||
|
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/50" onClick={() => setVerifyingId(null)}>
|
||||||
|
<div className="bg-white rounded-2xl shadow-xl w-full max-w-md mx-4 p-6" onClick={e => e.stopPropagation()}>
|
||||||
|
<h2 className="text-lg font-bold text-gray-900 mb-4">Als Fakt verifizieren</h2>
|
||||||
|
<div className="mb-4">
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Verifiziert von (E-Mail)</label>
|
||||||
|
<input type="email" value={verifyEmail} onChange={e => setVerifyEmail(e.target.value)}
|
||||||
|
placeholder="auditor@unternehmen.de"
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent" />
|
||||||
|
</div>
|
||||||
|
<div className="flex justify-end gap-3">
|
||||||
|
<button onClick={() => setVerifyingId(null)} className="px-4 py-2 text-sm text-gray-600 hover:bg-gray-100 rounded-lg transition-colors">
|
||||||
|
Abbrechen
|
||||||
|
</button>
|
||||||
|
<button onClick={submitVerify} disabled={!verifyEmail.trim()}
|
||||||
|
className="px-4 py-2 text-sm bg-green-600 text-white rounded-lg hover:bg-green-700 transition-colors disabled:opacity-50">
|
||||||
|
Verifizieren
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -12,6 +12,7 @@
|
|||||||
|
|
||||||
import { useState, useEffect } from 'react'
|
import { useState, useEffect } from 'react'
|
||||||
import Link from 'next/link'
|
import Link from 'next/link'
|
||||||
|
import { ConfidenceLevelBadge } from '../evidence/components/anti-fake-badges'
|
||||||
|
|
||||||
// Types
|
// Types
|
||||||
interface DashboardData {
|
interface DashboardData {
|
||||||
@@ -25,6 +26,15 @@ interface DashboardData {
|
|||||||
evidence_by_status: Record<string, number>
|
evidence_by_status: Record<string, number>
|
||||||
total_risks: number
|
total_risks: number
|
||||||
risks_by_level: Record<string, number>
|
risks_by_level: Record<string, number>
|
||||||
|
multi_score?: {
|
||||||
|
requirement_coverage: number
|
||||||
|
evidence_strength: number
|
||||||
|
validation_quality: number
|
||||||
|
evidence_freshness: number
|
||||||
|
control_effectiveness: number
|
||||||
|
overall_readiness: number
|
||||||
|
hard_blocks: string[]
|
||||||
|
} | null
|
||||||
}
|
}
|
||||||
|
|
||||||
interface Regulation {
|
interface Regulation {
|
||||||
@@ -106,7 +116,46 @@ interface ScoreSnapshot {
|
|||||||
created_at: string
|
created_at: string
|
||||||
}
|
}
|
||||||
|
|
||||||
type TabKey = 'overview' | 'roadmap' | 'modules' | 'trend'
|
interface TraceabilityAssertion {
|
||||||
|
id: string
|
||||||
|
sentence_text: string
|
||||||
|
assertion_type: string
|
||||||
|
confidence: number
|
||||||
|
verified: boolean
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TraceabilityEvidence {
|
||||||
|
id: string
|
||||||
|
title: string
|
||||||
|
evidence_type: string
|
||||||
|
confidence_level: string
|
||||||
|
status: string
|
||||||
|
assertions: TraceabilityAssertion[]
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TraceabilityCoverage {
|
||||||
|
has_evidence: boolean
|
||||||
|
has_assertions: boolean
|
||||||
|
all_assertions_verified: boolean
|
||||||
|
min_confidence_level: string | null
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TraceabilityControl {
|
||||||
|
id: string
|
||||||
|
control_id: string
|
||||||
|
title: string
|
||||||
|
status: string
|
||||||
|
domain: string
|
||||||
|
evidence: TraceabilityEvidence[]
|
||||||
|
coverage: TraceabilityCoverage
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TraceabilityMatrixData {
|
||||||
|
controls: TraceabilityControl[]
|
||||||
|
summary: Record<string, number>
|
||||||
|
}
|
||||||
|
|
||||||
|
type TabKey = 'overview' | 'roadmap' | 'modules' | 'trend' | 'traceability'
|
||||||
|
|
||||||
const DOMAIN_LABELS: Record<string, string> = {
|
const DOMAIN_LABELS: Record<string, string> = {
|
||||||
gov: 'Governance',
|
gov: 'Governance',
|
||||||
@@ -148,6 +197,17 @@ export default function ComplianceHubPage() {
|
|||||||
const [error, setError] = useState<string | null>(null)
|
const [error, setError] = useState<string | null>(null)
|
||||||
const [seeding, setSeeding] = useState(false)
|
const [seeding, setSeeding] = useState(false)
|
||||||
const [savingSnapshot, setSavingSnapshot] = useState(false)
|
const [savingSnapshot, setSavingSnapshot] = useState(false)
|
||||||
|
const [evidenceDistribution, setEvidenceDistribution] = useState<{
|
||||||
|
by_confidence: Record<string, number>
|
||||||
|
four_eyes_pending: number
|
||||||
|
total: number
|
||||||
|
} | null>(null)
|
||||||
|
const [traceabilityMatrix, setTraceabilityMatrix] = useState<TraceabilityMatrixData | null>(null)
|
||||||
|
const [traceabilityLoading, setTraceabilityLoading] = useState(false)
|
||||||
|
const [traceabilityFilter, setTraceabilityFilter] = useState<'all' | 'covered' | 'uncovered' | 'fully_verified'>('all')
|
||||||
|
const [traceabilityDomainFilter, setTraceabilityDomainFilter] = useState<string>('all')
|
||||||
|
const [expandedControls, setExpandedControls] = useState<Set<string>>(new Set())
|
||||||
|
const [expandedEvidence, setExpandedEvidence] = useState<Set<string>>(new Set())
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
loadData()
|
loadData()
|
||||||
@@ -157,6 +217,7 @@ export default function ComplianceHubPage() {
|
|||||||
if (activeTab === 'roadmap' && !roadmap) loadRoadmap()
|
if (activeTab === 'roadmap' && !roadmap) loadRoadmap()
|
||||||
if (activeTab === 'modules' && !moduleStatus) loadModuleStatus()
|
if (activeTab === 'modules' && !moduleStatus) loadModuleStatus()
|
||||||
if (activeTab === 'trend' && scoreHistory.length === 0) loadScoreHistory()
|
if (activeTab === 'trend' && scoreHistory.length === 0) loadScoreHistory()
|
||||||
|
if (activeTab === 'traceability' && !traceabilityMatrix) loadTraceabilityMatrix()
|
||||||
}, [activeTab]) // eslint-disable-line react-hooks/exhaustive-deps
|
}, [activeTab]) // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
const loadData = async () => {
|
const loadData = async () => {
|
||||||
@@ -182,6 +243,12 @@ export default function ComplianceHubPage() {
|
|||||||
const data = await actionsRes.json()
|
const data = await actionsRes.json()
|
||||||
setNextActions(data.actions || [])
|
setNextActions(data.actions || [])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Evidence distribution (Anti-Fake-Evidence Phase 3)
|
||||||
|
try {
|
||||||
|
const evidenceDistRes = await fetch('/api/sdk/v1/compliance/dashboard/evidence-distribution')
|
||||||
|
if (evidenceDistRes.ok) setEvidenceDistribution(await evidenceDistRes.json())
|
||||||
|
} catch { /* silent */ }
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to load compliance data:', err)
|
console.error('Failed to load compliance data:', err)
|
||||||
setError('Verbindung zum Backend fehlgeschlagen')
|
setError('Verbindung zum Backend fehlgeschlagen')
|
||||||
@@ -214,6 +281,31 @@ export default function ComplianceHubPage() {
|
|||||||
} catch { /* silent */ }
|
} catch { /* silent */ }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const loadTraceabilityMatrix = async () => {
|
||||||
|
setTraceabilityLoading(true)
|
||||||
|
try {
|
||||||
|
const res = await fetch('/api/sdk/v1/compliance/dashboard/traceability-matrix')
|
||||||
|
if (res.ok) setTraceabilityMatrix(await res.json())
|
||||||
|
} catch { /* silent */ }
|
||||||
|
finally { setTraceabilityLoading(false) }
|
||||||
|
}
|
||||||
|
|
||||||
|
const toggleControlExpanded = (id: string) => {
|
||||||
|
setExpandedControls(prev => {
|
||||||
|
const next = new Set(prev)
|
||||||
|
if (next.has(id)) next.delete(id); else next.add(id)
|
||||||
|
return next
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const toggleEvidenceExpanded = (id: string) => {
|
||||||
|
setExpandedEvidence(prev => {
|
||||||
|
const next = new Set(prev)
|
||||||
|
if (next.has(id)) next.delete(id); else next.add(id)
|
||||||
|
return next
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
const saveSnapshot = async () => {
|
const saveSnapshot = async () => {
|
||||||
setSavingSnapshot(true)
|
setSavingSnapshot(true)
|
||||||
try {
|
try {
|
||||||
@@ -259,6 +351,7 @@ export default function ComplianceHubPage() {
|
|||||||
{ key: 'roadmap', label: 'Roadmap' },
|
{ key: 'roadmap', label: 'Roadmap' },
|
||||||
{ key: 'modules', label: 'Module' },
|
{ key: 'modules', label: 'Module' },
|
||||||
{ key: 'trend', label: 'Trend' },
|
{ key: 'trend', label: 'Trend' },
|
||||||
|
{ key: 'traceability', label: 'Traceability' },
|
||||||
]
|
]
|
||||||
|
|
||||||
return (
|
return (
|
||||||
@@ -411,6 +504,115 @@ export default function ComplianceHubPage() {
|
|||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Anti-Fake-Evidence Section (Phase 3) */}
|
||||||
|
{dashboard && (
|
||||||
|
<div className="bg-white rounded-xl shadow-sm border p-6">
|
||||||
|
<h3 className="text-lg font-semibold text-slate-900 mb-4">Anti-Fake-Evidence Status</h3>
|
||||||
|
|
||||||
|
{/* Confidence Distribution Bar */}
|
||||||
|
{evidenceDistribution && evidenceDistribution.total > 0 && (
|
||||||
|
<div className="mb-6">
|
||||||
|
<p className="text-sm text-slate-500 mb-2">Confidence-Verteilung ({evidenceDistribution.total} Nachweise)</p>
|
||||||
|
<div className="flex h-6 rounded-full overflow-hidden">
|
||||||
|
{(['E0', 'E1', 'E2', 'E3', 'E4'] as const).map(level => {
|
||||||
|
const count = evidenceDistribution.by_confidence[level] || 0
|
||||||
|
const pct = (count / evidenceDistribution.total) * 100
|
||||||
|
if (pct === 0) return null
|
||||||
|
const colors: Record<string, string> = {
|
||||||
|
E0: 'bg-red-400', E1: 'bg-yellow-400', E2: 'bg-blue-400', E3: 'bg-green-400', E4: 'bg-emerald-400'
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<div key={level} className={`${colors[level]} flex items-center justify-center text-xs text-white font-medium`}
|
||||||
|
style={{ width: `${pct}%` }} title={`${level}: ${count}`}>
|
||||||
|
{pct >= 10 ? `${level} (${count})` : ''}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
<div className="flex items-center gap-4 mt-2 text-xs text-slate-500">
|
||||||
|
{(['E0', 'E1', 'E2', 'E3', 'E4'] as const).map(level => {
|
||||||
|
const count = evidenceDistribution.by_confidence[level] || 0
|
||||||
|
const dotColors: Record<string, string> = {
|
||||||
|
E0: 'bg-red-400', E1: 'bg-yellow-400', E2: 'bg-blue-400', E3: 'bg-green-400', E4: 'bg-emerald-400'
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<span key={level} className="flex items-center gap-1">
|
||||||
|
<span className={`w-2 h-2 rounded-full ${dotColors[level]}`} />
|
||||||
|
{level}: {count}
|
||||||
|
</span>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Multi-Score Dimensions */}
|
||||||
|
{dashboard.multi_score && (
|
||||||
|
<div className="mb-6">
|
||||||
|
<p className="text-sm text-slate-500 mb-2">Multi-dimensionaler Score</p>
|
||||||
|
<div className="space-y-2">
|
||||||
|
{([
|
||||||
|
{ key: 'requirement_coverage', label: 'Anforderungsabdeckung', color: 'bg-blue-500' },
|
||||||
|
{ key: 'evidence_strength', label: 'Evidence-Staerke', color: 'bg-green-500' },
|
||||||
|
{ key: 'validation_quality', label: 'Validierungsqualitaet', color: 'bg-purple-500' },
|
||||||
|
{ key: 'evidence_freshness', label: 'Aktualitaet', color: 'bg-yellow-500' },
|
||||||
|
{ key: 'control_effectiveness', label: 'Control-Wirksamkeit', color: 'bg-indigo-500' },
|
||||||
|
] as const).map(dim => {
|
||||||
|
const value = (dashboard.multi_score as Record<string, number>)[dim.key] || 0
|
||||||
|
return (
|
||||||
|
<div key={dim.key} className="flex items-center gap-3">
|
||||||
|
<span className="text-xs text-slate-600 w-44 truncate">{dim.label}</span>
|
||||||
|
<div className="flex-1 h-2 bg-slate-200 rounded-full overflow-hidden">
|
||||||
|
<div className={`h-full ${dim.color} rounded-full transition-all`} style={{ width: `${value}%` }} />
|
||||||
|
</div>
|
||||||
|
<span className="text-xs text-slate-600 w-12 text-right">{typeof value === 'number' ? value.toFixed(0) : value}%</span>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
<div className="flex items-center gap-3 pt-2 border-t border-slate-100">
|
||||||
|
<span className="text-xs font-semibold text-slate-700 w-44">Audit-Readiness</span>
|
||||||
|
<div className="flex-1 h-3 bg-slate-200 rounded-full overflow-hidden">
|
||||||
|
<div className={`h-full rounded-full transition-all ${
|
||||||
|
(dashboard.multi_score.overall_readiness || 0) >= 80 ? 'bg-green-500' :
|
||||||
|
(dashboard.multi_score.overall_readiness || 0) >= 60 ? 'bg-yellow-500' : 'bg-red-500'
|
||||||
|
}`} style={{ width: `${dashboard.multi_score.overall_readiness || 0}%` }} />
|
||||||
|
</div>
|
||||||
|
<span className="text-xs font-semibold text-slate-700 w-12 text-right">
|
||||||
|
{typeof dashboard.multi_score.overall_readiness === 'number' ? dashboard.multi_score.overall_readiness.toFixed(0) : 0}%
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Bottom row: Four-Eyes + Hard Blocks */}
|
||||||
|
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||||
|
<div className="text-center p-3 rounded-lg bg-yellow-50">
|
||||||
|
<div className="text-2xl font-bold text-yellow-700">{evidenceDistribution?.four_eyes_pending || 0}</div>
|
||||||
|
<div className="text-xs text-yellow-600 mt-1">Four-Eyes Reviews ausstehend</div>
|
||||||
|
</div>
|
||||||
|
{dashboard.multi_score?.hard_blocks && dashboard.multi_score.hard_blocks.length > 0 ? (
|
||||||
|
<div className="p-3 rounded-lg bg-red-50">
|
||||||
|
<div className="text-xs font-medium text-red-700 mb-1">Hard Blocks ({dashboard.multi_score.hard_blocks.length})</div>
|
||||||
|
<ul className="space-y-1">
|
||||||
|
{dashboard.multi_score.hard_blocks.slice(0, 3).map((block: string, i: number) => (
|
||||||
|
<li key={i} className="text-xs text-red-600 flex items-start gap-1">
|
||||||
|
<span className="text-red-400 mt-0.5">•</span>
|
||||||
|
<span>{block}</span>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div className="text-center p-3 rounded-lg bg-green-50">
|
||||||
|
<div className="text-2xl font-bold text-green-700">0</div>
|
||||||
|
<div className="text-xs text-green-600 mt-1">Keine Hard Blocks</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
{/* Next Actions + Findings */}
|
{/* Next Actions + Findings */}
|
||||||
<div className="grid grid-cols-1 lg:grid-cols-2 gap-4">
|
<div className="grid grid-cols-1 lg:grid-cols-2 gap-4">
|
||||||
{/* Next Actions */}
|
{/* Next Actions */}
|
||||||
@@ -805,6 +1007,232 @@ export default function ComplianceHubPage() {
|
|||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{/* Traceability Tab */}
|
||||||
|
{activeTab === 'traceability' && (
|
||||||
|
<div className="p-6 space-y-6">
|
||||||
|
{traceabilityLoading ? (
|
||||||
|
<div className="flex items-center justify-center py-12">
|
||||||
|
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-purple-600" />
|
||||||
|
<span className="ml-3 text-slate-500">Traceability Matrix wird geladen...</span>
|
||||||
|
</div>
|
||||||
|
) : !traceabilityMatrix ? (
|
||||||
|
<div className="text-center py-12 text-slate-500">
|
||||||
|
Keine Daten verfuegbar. Stellen Sie sicher, dass Controls und Evidence vorhanden sind.
|
||||||
|
</div>
|
||||||
|
) : (() => {
|
||||||
|
const summary = traceabilityMatrix.summary
|
||||||
|
const totalControls = summary.total_controls || 0
|
||||||
|
const covered = summary.covered || 0
|
||||||
|
const fullyVerified = summary.fully_verified || 0
|
||||||
|
const uncovered = summary.uncovered || 0
|
||||||
|
|
||||||
|
const filteredControls = (traceabilityMatrix.controls || []).filter(ctrl => {
|
||||||
|
if (traceabilityFilter === 'covered' && !ctrl.coverage.has_evidence) return false
|
||||||
|
if (traceabilityFilter === 'uncovered' && ctrl.coverage.has_evidence) return false
|
||||||
|
if (traceabilityFilter === 'fully_verified' && !ctrl.coverage.all_assertions_verified) return false
|
||||||
|
if (traceabilityDomainFilter !== 'all' && ctrl.domain !== traceabilityDomainFilter) return false
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
const domains = [...new Set(traceabilityMatrix.controls.map(c => c.domain))].sort()
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
{/* Summary Cards */}
|
||||||
|
<div className="grid grid-cols-2 md:grid-cols-4 gap-4">
|
||||||
|
<div className="bg-purple-50 border border-purple-200 rounded-lg p-4">
|
||||||
|
<div className="text-2xl font-bold text-purple-700">{totalControls}</div>
|
||||||
|
<div className="text-sm text-purple-600">Total Controls</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-blue-50 border border-blue-200 rounded-lg p-4">
|
||||||
|
<div className="text-2xl font-bold text-blue-700">{covered}</div>
|
||||||
|
<div className="text-sm text-blue-600">Abgedeckt</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-green-50 border border-green-200 rounded-lg p-4">
|
||||||
|
<div className="text-2xl font-bold text-green-700">{fullyVerified}</div>
|
||||||
|
<div className="text-sm text-green-600">Vollst. verifiziert</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-red-50 border border-red-200 rounded-lg p-4">
|
||||||
|
<div className="text-2xl font-bold text-red-700">{uncovered}</div>
|
||||||
|
<div className="text-sm text-red-600">Unabgedeckt</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Filter Bar */}
|
||||||
|
<div className="flex flex-wrap gap-4 items-center">
|
||||||
|
<div className="flex gap-1">
|
||||||
|
{([
|
||||||
|
{ key: 'all', label: 'Alle' },
|
||||||
|
{ key: 'covered', label: 'Abgedeckt' },
|
||||||
|
{ key: 'uncovered', label: 'Nicht abgedeckt' },
|
||||||
|
{ key: 'fully_verified', label: 'Vollst. verifiziert' },
|
||||||
|
] as const).map(f => (
|
||||||
|
<button
|
||||||
|
key={f.key}
|
||||||
|
onClick={() => setTraceabilityFilter(f.key)}
|
||||||
|
className={`px-3 py-1 text-xs rounded-full transition-colors ${
|
||||||
|
traceabilityFilter === f.key
|
||||||
|
? 'bg-purple-600 text-white'
|
||||||
|
: 'bg-slate-100 text-slate-600 hover:bg-slate-200'
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
{f.label}
|
||||||
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
<div className="h-4 w-px bg-slate-300" />
|
||||||
|
<div className="flex gap-1 flex-wrap">
|
||||||
|
<button
|
||||||
|
onClick={() => setTraceabilityDomainFilter('all')}
|
||||||
|
className={`px-3 py-1 text-xs rounded-full transition-colors ${
|
||||||
|
traceabilityDomainFilter === 'all'
|
||||||
|
? 'bg-purple-600 text-white'
|
||||||
|
: 'bg-slate-100 text-slate-600 hover:bg-slate-200'
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
Alle Domains
|
||||||
|
</button>
|
||||||
|
{domains.map(d => (
|
||||||
|
<button
|
||||||
|
key={d}
|
||||||
|
onClick={() => setTraceabilityDomainFilter(d)}
|
||||||
|
className={`px-3 py-1 text-xs rounded-full transition-colors ${
|
||||||
|
traceabilityDomainFilter === d
|
||||||
|
? 'bg-purple-600 text-white'
|
||||||
|
: 'bg-slate-100 text-slate-600 hover:bg-slate-200'
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
{DOMAIN_LABELS[d] || d}
|
||||||
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Controls List */}
|
||||||
|
<div className="space-y-2">
|
||||||
|
{filteredControls.length === 0 ? (
|
||||||
|
<div className="text-center py-8 text-slate-400">
|
||||||
|
Keine Controls fuer diesen Filter gefunden.
|
||||||
|
</div>
|
||||||
|
) : filteredControls.map(ctrl => {
|
||||||
|
const isExpanded = expandedControls.has(ctrl.id)
|
||||||
|
const coverageIcon = ctrl.coverage.all_assertions_verified
|
||||||
|
? { symbol: '\u2713', color: 'text-green-600 bg-green-50' }
|
||||||
|
: ctrl.coverage.has_evidence
|
||||||
|
? { symbol: '\u25D0', color: 'text-yellow-600 bg-yellow-50' }
|
||||||
|
: { symbol: '\u2717', color: 'text-red-600 bg-red-50' }
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div key={ctrl.id} className="border rounded-lg overflow-hidden">
|
||||||
|
{/* Control Row */}
|
||||||
|
<button
|
||||||
|
onClick={() => toggleControlExpanded(ctrl.id)}
|
||||||
|
className="w-full flex items-center gap-3 px-4 py-3 text-left hover:bg-slate-50 transition-colors"
|
||||||
|
>
|
||||||
|
<span className="text-slate-400 text-xs">{isExpanded ? '\u25BC' : '\u25B6'}</span>
|
||||||
|
<span className={`w-7 h-7 flex items-center justify-center rounded-full text-sm font-medium ${coverageIcon.color}`}>
|
||||||
|
{coverageIcon.symbol}
|
||||||
|
</span>
|
||||||
|
<code className="text-xs bg-slate-100 px-2 py-0.5 rounded text-slate-600 font-mono">{ctrl.control_id}</code>
|
||||||
|
<span className="text-sm text-slate-800 flex-1 truncate">{ctrl.title}</span>
|
||||||
|
<span className="text-xs bg-slate-100 text-slate-500 px-2 py-0.5 rounded">{DOMAIN_LABELS[ctrl.domain] || ctrl.domain}</span>
|
||||||
|
<span className={`text-xs px-2 py-0.5 rounded ${
|
||||||
|
ctrl.status === 'implemented' ? 'bg-green-100 text-green-700'
|
||||||
|
: ctrl.status === 'in_progress' ? 'bg-blue-100 text-blue-700'
|
||||||
|
: 'bg-slate-100 text-slate-600'
|
||||||
|
}`}>
|
||||||
|
{ctrl.status}
|
||||||
|
</span>
|
||||||
|
<ConfidenceLevelBadge level={ctrl.coverage.min_confidence_level} />
|
||||||
|
<span className="text-xs text-slate-400 min-w-[3rem] text-right">
|
||||||
|
{ctrl.evidence.length} Ev.
|
||||||
|
</span>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{/* Expanded: Evidence list */}
|
||||||
|
{isExpanded && (
|
||||||
|
<div className="border-t bg-slate-50">
|
||||||
|
{ctrl.evidence.length === 0 ? (
|
||||||
|
<div className="px-8 py-3 text-xs text-slate-400 italic">
|
||||||
|
Kein Evidence verknuepft.
|
||||||
|
</div>
|
||||||
|
) : ctrl.evidence.map(ev => {
|
||||||
|
const evExpanded = expandedEvidence.has(ev.id)
|
||||||
|
return (
|
||||||
|
<div key={ev.id} className="border-b last:border-b-0">
|
||||||
|
<button
|
||||||
|
onClick={() => toggleEvidenceExpanded(ev.id)}
|
||||||
|
className="w-full flex items-center gap-3 px-8 py-2 text-left hover:bg-slate-100 transition-colors"
|
||||||
|
>
|
||||||
|
<span className="text-slate-400 text-xs">{evExpanded ? '\u25BC' : '\u25B6'}</span>
|
||||||
|
<span className="text-sm text-slate-700 flex-1 truncate">{ev.title}</span>
|
||||||
|
<span className="text-xs bg-white border px-2 py-0.5 rounded text-slate-500">{ev.evidence_type}</span>
|
||||||
|
<ConfidenceLevelBadge level={ev.confidence_level} />
|
||||||
|
<span className={`text-xs px-2 py-0.5 rounded ${
|
||||||
|
ev.status === 'valid' ? 'bg-green-100 text-green-700'
|
||||||
|
: ev.status === 'expired' ? 'bg-red-100 text-red-700'
|
||||||
|
: 'bg-slate-100 text-slate-600'
|
||||||
|
}`}>
|
||||||
|
{ev.status}
|
||||||
|
</span>
|
||||||
|
<span className="text-xs text-slate-400 min-w-[3rem] text-right">
|
||||||
|
{ev.assertions.length} Ass.
|
||||||
|
</span>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{/* Expanded: Assertions list */}
|
||||||
|
{evExpanded && ev.assertions.length > 0 && (
|
||||||
|
<div className="bg-white border-t">
|
||||||
|
<table className="w-full text-xs">
|
||||||
|
<thead className="bg-slate-50">
|
||||||
|
<tr>
|
||||||
|
<th className="px-12 py-1.5 text-left text-slate-500 font-medium">Aussage</th>
|
||||||
|
<th className="px-3 py-1.5 text-center text-slate-500 font-medium w-20">Typ</th>
|
||||||
|
<th className="px-3 py-1.5 text-center text-slate-500 font-medium w-24">Konfidenz</th>
|
||||||
|
<th className="px-3 py-1.5 text-center text-slate-500 font-medium w-16">Status</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody className="divide-y divide-slate-100">
|
||||||
|
{ev.assertions.map(a => (
|
||||||
|
<tr key={a.id} className="hover:bg-slate-50">
|
||||||
|
<td className="px-12 py-1.5 text-slate-700">{a.sentence_text}</td>
|
||||||
|
<td className="px-3 py-1.5 text-center text-slate-500">{a.assertion_type}</td>
|
||||||
|
<td className="px-3 py-1.5 text-center">
|
||||||
|
<span className={`font-medium ${
|
||||||
|
a.confidence >= 0.8 ? 'text-green-600'
|
||||||
|
: a.confidence >= 0.5 ? 'text-yellow-600'
|
||||||
|
: 'text-red-600'
|
||||||
|
}`}>
|
||||||
|
{(a.confidence * 100).toFixed(0)}%
|
||||||
|
</span>
|
||||||
|
</td>
|
||||||
|
<td className="px-3 py-1.5 text-center">
|
||||||
|
{a.verified
|
||||||
|
? <span className="text-green-600 font-medium">{'\u2713'}</span>
|
||||||
|
: <span className="text-slate-400">{'\u2717'}</span>
|
||||||
|
}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
))}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
)
|
||||||
|
})()}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -196,7 +196,15 @@ function ControlCard({
|
|||||||
{/* Linked Evidence */}
|
{/* Linked Evidence */}
|
||||||
{control.linkedEvidence.length > 0 && (
|
{control.linkedEvidence.length > 0 && (
|
||||||
<div className="mt-3 pt-3 border-t border-gray-100">
|
<div className="mt-3 pt-3 border-t border-gray-100">
|
||||||
<span className="text-xs text-gray-500 mb-1 block">Nachweise:</span>
|
<span className="text-xs text-gray-500 mb-1 block">
|
||||||
|
Nachweise: {control.linkedEvidence.length}
|
||||||
|
{(() => {
|
||||||
|
const e2plus = control.linkedEvidence.filter((ev: { confidenceLevel?: string }) =>
|
||||||
|
ev.confidenceLevel && ['E2', 'E3', 'E4'].includes(ev.confidenceLevel)
|
||||||
|
).length
|
||||||
|
return e2plus > 0 ? ` (${e2plus} E2+)` : ''
|
||||||
|
})()}
|
||||||
|
</span>
|
||||||
<div className="flex items-center gap-1 flex-wrap">
|
<div className="flex items-center gap-1 flex-wrap">
|
||||||
{control.linkedEvidence.map(ev => (
|
{control.linkedEvidence.map(ev => (
|
||||||
<span key={ev.id} className={`px-2 py-0.5 text-xs rounded ${
|
<span key={ev.id} className={`px-2 py-0.5 text-xs rounded ${
|
||||||
@@ -205,6 +213,9 @@ function ControlCard({
|
|||||||
'bg-yellow-50 text-yellow-700'
|
'bg-yellow-50 text-yellow-700'
|
||||||
}`}>
|
}`}>
|
||||||
{ev.title}
|
{ev.title}
|
||||||
|
{(ev as { confidenceLevel?: string }).confidenceLevel && (
|
||||||
|
<span className="ml-1 opacity-70">({(ev as { confidenceLevel?: string }).confidenceLevel})</span>
|
||||||
|
)}
|
||||||
</span>
|
</span>
|
||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
@@ -359,6 +370,49 @@ interface RAGControlSuggestion {
|
|||||||
// MAIN PAGE
|
// MAIN PAGE
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
|
|
||||||
|
function TransitionErrorBanner({
|
||||||
|
controlId,
|
||||||
|
violations,
|
||||||
|
onDismiss,
|
||||||
|
}: {
|
||||||
|
controlId: string
|
||||||
|
violations: string[]
|
||||||
|
onDismiss: () => void
|
||||||
|
}) {
|
||||||
|
return (
|
||||||
|
<div className="p-4 bg-orange-50 border border-orange-200 rounded-lg">
|
||||||
|
<div className="flex items-start justify-between">
|
||||||
|
<div className="flex items-start gap-3">
|
||||||
|
<svg className="w-5 h-5 text-orange-600 mt-0.5 flex-shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-3L13.732 4c-.77-1.333-2.694-1.333-3.464 0L3.34 16c-.77 1.333.192 3 1.732 3z" />
|
||||||
|
</svg>
|
||||||
|
<div>
|
||||||
|
<h4 className="font-medium text-orange-800">
|
||||||
|
Status-Transition blockiert ({controlId})
|
||||||
|
</h4>
|
||||||
|
<ul className="mt-2 space-y-1">
|
||||||
|
{violations.map((v, i) => (
|
||||||
|
<li key={i} className="text-sm text-orange-700 flex items-start gap-2">
|
||||||
|
<span className="text-orange-400 mt-0.5">•</span>
|
||||||
|
<span>{v}</span>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ul>
|
||||||
|
<a href="/sdk/evidence" className="mt-2 inline-block text-sm text-purple-600 hover:text-purple-700 font-medium">
|
||||||
|
Evidence hinzufuegen →
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<button onClick={onDismiss} className="text-orange-400 hover:text-orange-600 ml-4">
|
||||||
|
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
export default function ControlsPage() {
|
export default function ControlsPage() {
|
||||||
const { state, dispatch } = useSDK()
|
const { state, dispatch } = useSDK()
|
||||||
const router = useRouter()
|
const router = useRouter()
|
||||||
@@ -373,6 +427,9 @@ export default function ControlsPage() {
|
|||||||
const [showRagPanel, setShowRagPanel] = useState(false)
|
const [showRagPanel, setShowRagPanel] = useState(false)
|
||||||
const [selectedRequirementId, setSelectedRequirementId] = useState<string>('')
|
const [selectedRequirementId, setSelectedRequirementId] = useState<string>('')
|
||||||
|
|
||||||
|
// Transition error from Anti-Fake-Evidence state machine (409 Conflict)
|
||||||
|
const [transitionError, setTransitionError] = useState<{ controlId: string; violations: string[] } | null>(null)
|
||||||
|
|
||||||
// Track effectiveness locally as it's not in the SDK state type
|
// Track effectiveness locally as it's not in the SDK state type
|
||||||
const [effectivenessMap, setEffectivenessMap] = useState<Record<string, number>>({})
|
const [effectivenessMap, setEffectivenessMap] = useState<Record<string, number>>({})
|
||||||
// Track linked evidence per control
|
// Track linked evidence per control
|
||||||
@@ -385,7 +442,7 @@ export default function ControlsPage() {
|
|||||||
const data = await res.json()
|
const data = await res.json()
|
||||||
const allEvidence = data.evidence || data
|
const allEvidence = data.evidence || data
|
||||||
if (Array.isArray(allEvidence)) {
|
if (Array.isArray(allEvidence)) {
|
||||||
const map: Record<string, { id: string; title: string; status: string }[]> = {}
|
const map: Record<string, { id: string; title: string; status: string; confidenceLevel?: string }[]> = {}
|
||||||
for (const ev of allEvidence) {
|
for (const ev of allEvidence) {
|
||||||
const ctrlId = ev.control_id || ''
|
const ctrlId = ev.control_id || ''
|
||||||
if (!map[ctrlId]) map[ctrlId] = []
|
if (!map[ctrlId]) map[ctrlId] = []
|
||||||
@@ -393,6 +450,7 @@ export default function ControlsPage() {
|
|||||||
id: ev.id,
|
id: ev.id,
|
||||||
title: ev.title || ev.name || 'Nachweis',
|
title: ev.title || ev.name || 'Nachweis',
|
||||||
status: ev.status || 'pending',
|
status: ev.status || 'pending',
|
||||||
|
confidenceLevel: ev.confidence_level || undefined,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
setEvidenceMap(map)
|
setEvidenceMap(map)
|
||||||
@@ -483,20 +541,56 @@ export default function ControlsPage() {
|
|||||||
: 0
|
: 0
|
||||||
const partialCount = displayControls.filter(c => c.displayStatus === 'partial').length
|
const partialCount = displayControls.filter(c => c.displayStatus === 'partial').length
|
||||||
|
|
||||||
const handleStatusChange = async (controlId: string, status: ImplementationStatus) => {
|
const handleStatusChange = async (controlId: string, newStatus: ImplementationStatus) => {
|
||||||
|
// Remember old status for rollback
|
||||||
|
const oldControl = state.controls.find(c => c.id === controlId)
|
||||||
|
const oldStatus = oldControl?.implementationStatus
|
||||||
|
|
||||||
|
// Optimistic update
|
||||||
dispatch({
|
dispatch({
|
||||||
type: 'UPDATE_CONTROL',
|
type: 'UPDATE_CONTROL',
|
||||||
payload: { id: controlId, data: { implementationStatus: status } },
|
payload: { id: controlId, data: { implementationStatus: newStatus } },
|
||||||
})
|
})
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await fetch(`/api/sdk/v1/compliance/controls/${controlId}`, {
|
const res = await fetch(`/api/sdk/v1/compliance/controls/${controlId}`, {
|
||||||
method: 'PUT',
|
method: 'PUT',
|
||||||
headers: { 'Content-Type': 'application/json' },
|
headers: { 'Content-Type': 'application/json' },
|
||||||
body: JSON.stringify({ implementation_status: status }),
|
body: JSON.stringify({ implementation_status: newStatus }),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if (!res.ok) {
|
||||||
|
// Rollback optimistic update
|
||||||
|
if (oldStatus) {
|
||||||
|
dispatch({
|
||||||
|
type: 'UPDATE_CONTROL',
|
||||||
|
payload: { id: controlId, data: { implementationStatus: oldStatus } },
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const err = await res.json().catch(() => ({ detail: 'Status-Aenderung fehlgeschlagen' }))
|
||||||
|
|
||||||
|
if (res.status === 409 && err.detail?.violations) {
|
||||||
|
setTransitionError({ controlId, violations: err.detail.violations })
|
||||||
|
} else {
|
||||||
|
const msg = typeof err.detail === 'string' ? err.detail : err.detail?.error || 'Status-Aenderung fehlgeschlagen'
|
||||||
|
setError(msg)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Clear any previous transition error for this control
|
||||||
|
if (transitionError?.controlId === controlId) {
|
||||||
|
setTransitionError(null)
|
||||||
|
}
|
||||||
|
}
|
||||||
} catch {
|
} catch {
|
||||||
// Silently fail — SDK state is already updated
|
// Network error — rollback
|
||||||
|
if (oldStatus) {
|
||||||
|
dispatch({
|
||||||
|
type: 'UPDATE_CONTROL',
|
||||||
|
payload: { id: controlId, data: { implementationStatus: oldStatus } },
|
||||||
|
})
|
||||||
|
}
|
||||||
|
setError('Netzwerkfehler bei Status-Aenderung')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -745,6 +839,15 @@ export default function ControlsPage() {
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{/* Transition Error Banner (Anti-Fake-Evidence 409 violations) */}
|
||||||
|
{transitionError && (
|
||||||
|
<TransitionErrorBanner
|
||||||
|
controlId={transitionError.controlId}
|
||||||
|
violations={transitionError.violations}
|
||||||
|
onDismiss={() => setTransitionError(null)}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
|
||||||
{/* Requirements Alert */}
|
{/* Requirements Alert */}
|
||||||
{state.requirements.length === 0 && !loading && (
|
{state.requirements.length === 0 && !loading && (
|
||||||
<div className="bg-amber-50 border border-amber-200 rounded-xl p-4">
|
<div className="bg-amber-50 border border-amber-200 rounded-xl p-4">
|
||||||
|
|||||||
@@ -0,0 +1,111 @@
|
|||||||
|
"use client"
|
||||||
|
|
||||||
|
import React from "react"
|
||||||
|
|
||||||
|
const badgeBase = "inline-flex items-center px-2 py-0.5 rounded text-xs font-medium"
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Confidence Level Badge (E0–E4)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
const confidenceColors: Record<string, string> = {
|
||||||
|
E0: "bg-red-100 text-red-800",
|
||||||
|
E1: "bg-yellow-100 text-yellow-800",
|
||||||
|
E2: "bg-blue-100 text-blue-800",
|
||||||
|
E3: "bg-green-100 text-green-800",
|
||||||
|
E4: "bg-emerald-100 text-emerald-800",
|
||||||
|
}
|
||||||
|
|
||||||
|
const confidenceLabels: Record<string, string> = {
|
||||||
|
E0: "E0 — Generiert",
|
||||||
|
E1: "E1 — Manuell",
|
||||||
|
E2: "E2 — Intern validiert",
|
||||||
|
E3: "E3 — System-beobachtet",
|
||||||
|
E4: "E4 — Extern auditiert",
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ConfidenceLevelBadge({ level }: { level?: string | null }) {
|
||||||
|
if (!level) return null
|
||||||
|
const color = confidenceColors[level] || "bg-gray-100 text-gray-800"
|
||||||
|
const label = confidenceLabels[level] || level
|
||||||
|
return <span className={`${badgeBase} ${color}`}>{label}</span>
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Truth Status Badge
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
const truthColors: Record<string, string> = {
|
||||||
|
generated: "bg-violet-100 text-violet-800",
|
||||||
|
uploaded: "bg-gray-100 text-gray-800",
|
||||||
|
observed: "bg-blue-100 text-blue-800",
|
||||||
|
validated: "bg-green-100 text-green-800",
|
||||||
|
rejected: "bg-red-100 text-red-800",
|
||||||
|
audited: "bg-emerald-100 text-emerald-800",
|
||||||
|
}
|
||||||
|
|
||||||
|
const truthLabels: Record<string, string> = {
|
||||||
|
generated: "Generiert",
|
||||||
|
uploaded: "Hochgeladen",
|
||||||
|
observed: "Beobachtet",
|
||||||
|
validated: "Validiert",
|
||||||
|
rejected: "Abgelehnt",
|
||||||
|
audited: "Auditiert",
|
||||||
|
}
|
||||||
|
|
||||||
|
export function TruthStatusBadge({ status }: { status?: string | null }) {
|
||||||
|
if (!status) return null
|
||||||
|
const color = truthColors[status] || "bg-gray-100 text-gray-800"
|
||||||
|
const label = truthLabels[status] || status
|
||||||
|
return <span className={`${badgeBase} ${color}`}>{label}</span>
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Generation Mode Badge (sparkles icon)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
export function GenerationModeBadge({ mode }: { mode?: string | null }) {
|
||||||
|
if (!mode) return null
|
||||||
|
return (
|
||||||
|
<span className={`${badgeBase} bg-violet-100 text-violet-800`}>
|
||||||
|
<svg className="w-3 h-3 mr-1" fill="currentColor" viewBox="0 0 20 20">
|
||||||
|
<path d="M5 2a1 1 0 011 1v1h1a1 1 0 010 2H6v1a1 1 0 01-2 0V6H3a1 1 0 010-2h1V3a1 1 0 011-1zm0 10a1 1 0 011 1v1h1a1 1 0 010 2H6v1a1 1 0 01-2 0v-1H3a1 1 0 010-2h1v-1a1 1 0 011-1zm7-10a1 1 0 01.967.744L14.146 7.2 17.5 7.512a1 1 0 010 1.976l-3.354.313-1.18 4.456a1 1 0 01-1.932 0l-1.18-4.456-3.354-.313a1 1 0 010-1.976l3.354-.313 1.18-4.456A1 1 0 0112 2z" />
|
||||||
|
</svg>
|
||||||
|
KI-generiert
|
||||||
|
</span>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Approval Status Badge (Four-Eyes)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
const approvalColors: Record<string, string> = {
|
||||||
|
none: "bg-gray-100 text-gray-600",
|
||||||
|
pending_first: "bg-yellow-100 text-yellow-800",
|
||||||
|
first_approved: "bg-blue-100 text-blue-800",
|
||||||
|
approved: "bg-green-100 text-green-800",
|
||||||
|
rejected: "bg-red-100 text-red-800",
|
||||||
|
}
|
||||||
|
|
||||||
|
const approvalLabels: Record<string, string> = {
|
||||||
|
none: "Kein Review",
|
||||||
|
pending_first: "Warte auf 1. Review",
|
||||||
|
first_approved: "1. Review OK",
|
||||||
|
approved: "Genehmigt (4-Augen)",
|
||||||
|
rejected: "Abgelehnt",
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ApprovalStatusBadge({
|
||||||
|
status,
|
||||||
|
requiresFourEyes,
|
||||||
|
}: {
|
||||||
|
status?: string | null
|
||||||
|
requiresFourEyes?: boolean | null
|
||||||
|
}) {
|
||||||
|
if (!requiresFourEyes) return null
|
||||||
|
const s = status || "none"
|
||||||
|
const color = approvalColors[s] || "bg-gray-100 text-gray-600"
|
||||||
|
const label = approvalLabels[s] || s
|
||||||
|
return <span className={`${badgeBase} ${color}`}>{label}</span>
|
||||||
|
}
|
||||||
@@ -3,6 +3,12 @@
|
|||||||
import React, { useState, useEffect, useRef } from 'react'
|
import React, { useState, useEffect, useRef } from 'react'
|
||||||
import { useSDK, Evidence as SDKEvidence, EvidenceType } from '@/lib/sdk'
|
import { useSDK, Evidence as SDKEvidence, EvidenceType } from '@/lib/sdk'
|
||||||
import { StepHeader, STEP_EXPLANATIONS } from '@/components/sdk/StepHeader'
|
import { StepHeader, STEP_EXPLANATIONS } from '@/components/sdk/StepHeader'
|
||||||
|
import {
|
||||||
|
ConfidenceLevelBadge,
|
||||||
|
TruthStatusBadge,
|
||||||
|
GenerationModeBadge,
|
||||||
|
ApprovalStatusBadge,
|
||||||
|
} from './components/anti-fake-badges'
|
||||||
|
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
// TYPES
|
// TYPES
|
||||||
@@ -28,6 +34,12 @@ interface DisplayEvidence {
|
|||||||
status: DisplayStatus
|
status: DisplayStatus
|
||||||
fileSize: string
|
fileSize: string
|
||||||
fileUrl: string | null
|
fileUrl: string | null
|
||||||
|
// Anti-Fake-Evidence Phase 2
|
||||||
|
confidenceLevel: string | null
|
||||||
|
truthStatus: string | null
|
||||||
|
generationMode: string | null
|
||||||
|
approvalStatus: string | null
|
||||||
|
requiresFourEyes: boolean
|
||||||
}
|
}
|
||||||
|
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
@@ -162,7 +174,327 @@ const evidenceTemplates: EvidenceTemplate[] = [
|
|||||||
// COMPONENTS
|
// COMPONENTS
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
|
|
||||||
function EvidenceCard({ evidence, onDelete, onView, onDownload }: { evidence: DisplayEvidence; onDelete: () => void; onView: () => void; onDownload: () => void }) {
|
// =============================================================================
|
||||||
|
// CONFIDENCE FILTER COLORS (matching anti-fake-badges)
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
const confidenceFilterColors: Record<string, string> = {
|
||||||
|
E0: 'bg-red-200 text-red-800',
|
||||||
|
E1: 'bg-yellow-200 text-yellow-800',
|
||||||
|
E2: 'bg-blue-200 text-blue-800',
|
||||||
|
E3: 'bg-green-200 text-green-800',
|
||||||
|
E4: 'bg-emerald-200 text-emerald-800',
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// REVIEW MODAL
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
function ReviewModal({ evidence, onClose, onSuccess }: { evidence: DisplayEvidence; onClose: () => void; onSuccess: () => void }) {
|
||||||
|
const [confidenceLevel, setConfidenceLevel] = useState(evidence.confidenceLevel || 'E1')
|
||||||
|
const [truthStatus, setTruthStatus] = useState(evidence.truthStatus || 'uploaded')
|
||||||
|
const [reviewedBy, setReviewedBy] = useState('')
|
||||||
|
const [submitting, setSubmitting] = useState(false)
|
||||||
|
const [error, setError] = useState<string | null>(null)
|
||||||
|
|
||||||
|
const handleSubmit = async () => {
|
||||||
|
if (!reviewedBy.trim()) { setError('Bitte E-Mail-Adresse angeben'); return }
|
||||||
|
setSubmitting(true)
|
||||||
|
setError(null)
|
||||||
|
try {
|
||||||
|
const res = await fetch(`/api/sdk/v1/compliance/evidence/${evidence.id}/review`, {
|
||||||
|
method: 'PATCH',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ confidence_level: confidenceLevel, truth_status: truthStatus, reviewed_by: reviewedBy }),
|
||||||
|
})
|
||||||
|
if (!res.ok) {
|
||||||
|
const err = await res.json().catch(() => ({ detail: 'Review fehlgeschlagen' }))
|
||||||
|
throw new Error(typeof err.detail === 'string' ? err.detail : JSON.stringify(err.detail))
|
||||||
|
}
|
||||||
|
onSuccess()
|
||||||
|
} catch (err) {
|
||||||
|
setError(err instanceof Error ? err.message : 'Unbekannter Fehler')
|
||||||
|
} finally {
|
||||||
|
setSubmitting(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const confidenceLevels = [
|
||||||
|
{ value: 'E0', label: 'E0 — Generiert' },
|
||||||
|
{ value: 'E1', label: 'E1 — Manuell' },
|
||||||
|
{ value: 'E2', label: 'E2 — Intern validiert' },
|
||||||
|
{ value: 'E3', label: 'E3 — System-beobachtet' },
|
||||||
|
{ value: 'E4', label: 'E4 — Extern auditiert' },
|
||||||
|
]
|
||||||
|
|
||||||
|
const truthStatuses = [
|
||||||
|
{ value: 'generated', label: 'Generiert' },
|
||||||
|
{ value: 'uploaded', label: 'Hochgeladen' },
|
||||||
|
{ value: 'observed', label: 'Beobachtet' },
|
||||||
|
{ value: 'validated', label: 'Validiert' },
|
||||||
|
{ value: 'audited', label: 'Auditiert' },
|
||||||
|
]
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/50" onClick={onClose}>
|
||||||
|
<div className="bg-white rounded-2xl shadow-xl w-full max-w-lg mx-4 p-6" onClick={e => e.stopPropagation()}>
|
||||||
|
<h2 className="text-xl font-bold text-gray-900 mb-4">Evidence Reviewen</h2>
|
||||||
|
<p className="text-sm text-gray-500 mb-4">{evidence.name}</p>
|
||||||
|
|
||||||
|
{/* Current values */}
|
||||||
|
<div className="mb-4 p-3 bg-gray-50 rounded-lg text-sm space-y-1">
|
||||||
|
<div className="flex justify-between">
|
||||||
|
<span className="text-gray-500">Aktuelles Confidence-Level:</span>
|
||||||
|
<span className="font-medium">{evidence.confidenceLevel || '—'}</span>
|
||||||
|
</div>
|
||||||
|
<div className="flex justify-between">
|
||||||
|
<span className="text-gray-500">Aktueller Truth-Status:</span>
|
||||||
|
<span className="font-medium">{evidence.truthStatus || '—'}</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* New confidence level */}
|
||||||
|
<div className="mb-4">
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Neues Confidence-Level</label>
|
||||||
|
<select value={confidenceLevel} onChange={e => setConfidenceLevel(e.target.value)}
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent">
|
||||||
|
{confidenceLevels.map(l => <option key={l.value} value={l.value}>{l.label}</option>)}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* New truth status */}
|
||||||
|
<div className="mb-4">
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Neuer Truth-Status</label>
|
||||||
|
<select value={truthStatus} onChange={e => setTruthStatus(e.target.value)}
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent">
|
||||||
|
{truthStatuses.map(s => <option key={s.value} value={s.value}>{s.label}</option>)}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Reviewed by */}
|
||||||
|
<div className="mb-4">
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Reviewer (E-Mail)</label>
|
||||||
|
<input type="email" value={reviewedBy} onChange={e => setReviewedBy(e.target.value)}
|
||||||
|
placeholder="reviewer@unternehmen.de"
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-purple-500 focus:border-transparent" />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Four-eyes warning */}
|
||||||
|
{evidence.requiresFourEyes && evidence.approvalStatus !== 'approved' && (
|
||||||
|
<div className="mb-4 p-3 bg-yellow-50 border border-yellow-200 rounded-lg">
|
||||||
|
<div className="flex items-start gap-2">
|
||||||
|
<svg className="w-5 h-5 text-yellow-600 mt-0.5 flex-shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-3L13.732 4c-.77-1.333-2.694-1.333-3.464 0L3.34 16c-.77 1.333.192 3 1.732 3z" />
|
||||||
|
</svg>
|
||||||
|
<div className="text-sm text-yellow-800">
|
||||||
|
<p className="font-medium">4-Augen-Prinzip aktiv</p>
|
||||||
|
<p>Dieser Nachweis erfordert eine zusaetzliche Freigabe durch einen zweiten Reviewer.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Error */}
|
||||||
|
{error && (
|
||||||
|
<div className="mb-4 p-3 bg-red-50 border border-red-200 rounded-lg text-sm text-red-700">{error}</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Actions */}
|
||||||
|
<div className="flex justify-end gap-3">
|
||||||
|
<button onClick={onClose} className="px-4 py-2 text-sm text-gray-600 hover:bg-gray-100 rounded-lg transition-colors">
|
||||||
|
Abbrechen
|
||||||
|
</button>
|
||||||
|
<button onClick={handleSubmit} disabled={submitting}
|
||||||
|
className="px-4 py-2 text-sm bg-purple-600 text-white rounded-lg hover:bg-purple-700 transition-colors disabled:opacity-50">
|
||||||
|
{submitting ? 'Wird gespeichert...' : 'Review abschliessen'}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// REJECT MODAL
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
function RejectModal({ evidence, onClose, onSuccess }: { evidence: DisplayEvidence; onClose: () => void; onSuccess: () => void }) {
|
||||||
|
const [reviewedBy, setReviewedBy] = useState('')
|
||||||
|
const [rejectionReason, setRejectionReason] = useState('')
|
||||||
|
const [submitting, setSubmitting] = useState(false)
|
||||||
|
const [error, setError] = useState<string | null>(null)
|
||||||
|
|
||||||
|
const handleSubmit = async () => {
|
||||||
|
if (!reviewedBy.trim()) { setError('Bitte E-Mail-Adresse angeben'); return }
|
||||||
|
if (!rejectionReason.trim()) { setError('Bitte Ablehnungsgrund angeben'); return }
|
||||||
|
setSubmitting(true)
|
||||||
|
setError(null)
|
||||||
|
try {
|
||||||
|
const res = await fetch(`/api/sdk/v1/compliance/evidence/${evidence.id}/reject`, {
|
||||||
|
method: 'PATCH',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ reviewed_by: reviewedBy, rejection_reason: rejectionReason }),
|
||||||
|
})
|
||||||
|
if (!res.ok) {
|
||||||
|
const err = await res.json().catch(() => ({ detail: 'Ablehnung fehlgeschlagen' }))
|
||||||
|
throw new Error(typeof err.detail === 'string' ? err.detail : JSON.stringify(err.detail))
|
||||||
|
}
|
||||||
|
onSuccess()
|
||||||
|
} catch (err) {
|
||||||
|
setError(err instanceof Error ? err.message : 'Unbekannter Fehler')
|
||||||
|
} finally {
|
||||||
|
setSubmitting(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/50" onClick={onClose}>
|
||||||
|
<div className="bg-white rounded-2xl shadow-xl w-full max-w-lg mx-4 p-6" onClick={e => e.stopPropagation()}>
|
||||||
|
<h2 className="text-xl font-bold text-gray-900 mb-4">Evidence Ablehnen</h2>
|
||||||
|
<p className="text-sm text-gray-500 mb-4">{evidence.name}</p>
|
||||||
|
|
||||||
|
{/* Reviewed by */}
|
||||||
|
<div className="mb-4">
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Reviewer (E-Mail)</label>
|
||||||
|
<input type="email" value={reviewedBy} onChange={e => setReviewedBy(e.target.value)}
|
||||||
|
placeholder="reviewer@unternehmen.de"
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-red-500 focus:border-transparent" />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Rejection reason */}
|
||||||
|
<div className="mb-4">
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">Ablehnungsgrund</label>
|
||||||
|
<textarea value={rejectionReason} onChange={e => setRejectionReason(e.target.value)}
|
||||||
|
placeholder="Bitte beschreiben Sie den Grund fuer die Ablehnung..."
|
||||||
|
rows={4}
|
||||||
|
className="w-full border border-gray-300 rounded-lg px-3 py-2 text-sm focus:ring-2 focus:ring-red-500 focus:border-transparent resize-none" />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Error */}
|
||||||
|
{error && (
|
||||||
|
<div className="mb-4 p-3 bg-red-50 border border-red-200 rounded-lg text-sm text-red-700">{error}</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Actions */}
|
||||||
|
<div className="flex justify-end gap-3">
|
||||||
|
<button onClick={onClose} className="px-4 py-2 text-sm text-gray-600 hover:bg-gray-100 rounded-lg transition-colors">
|
||||||
|
Abbrechen
|
||||||
|
</button>
|
||||||
|
<button onClick={handleSubmit} disabled={submitting}
|
||||||
|
className="px-4 py-2 text-sm bg-red-600 text-white rounded-lg hover:bg-red-700 transition-colors disabled:opacity-50">
|
||||||
|
{submitting ? 'Wird abgelehnt...' : 'Ablehnen'}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// AUDIT TRAIL PANEL
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
function AuditTrailPanel({ evidenceId, onClose }: { evidenceId: string; onClose: () => void }) {
|
||||||
|
const [entries, setEntries] = useState<{ id: string; action: string; actor: string; timestamp: string; details: Record<string, unknown> | null }[]>([])
|
||||||
|
const [loading, setLoading] = useState(true)
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
fetch(`/api/sdk/v1/compliance/audit-trail?entity_type=evidence&entity_id=${evidenceId}`)
|
||||||
|
.then(res => res.json())
|
||||||
|
.then(data => {
|
||||||
|
const mapped = (data.entries || []).map((e: Record<string, unknown>) => ({
|
||||||
|
id: e.id as string,
|
||||||
|
action: e.action as string,
|
||||||
|
actor: (e.performed_by || 'System') as string,
|
||||||
|
timestamp: (e.performed_at || '') as string,
|
||||||
|
details: {
|
||||||
|
...(e.field_changed ? { field: e.field_changed } : {}),
|
||||||
|
...(e.old_value ? { old: e.old_value } : {}),
|
||||||
|
...(e.new_value ? { new: e.new_value } : {}),
|
||||||
|
...(e.change_summary ? { summary: e.change_summary } : {}),
|
||||||
|
} as Record<string, unknown>,
|
||||||
|
}))
|
||||||
|
setEntries(mapped)
|
||||||
|
})
|
||||||
|
.catch(() => {})
|
||||||
|
.finally(() => setLoading(false))
|
||||||
|
}, [evidenceId])
|
||||||
|
|
||||||
|
const actionLabels: Record<string, { label: string; color: string }> = {
|
||||||
|
created: { label: 'Erstellt', color: 'bg-blue-100 text-blue-700' },
|
||||||
|
uploaded: { label: 'Hochgeladen', color: 'bg-purple-100 text-purple-700' },
|
||||||
|
reviewed: { label: 'Reviewed', color: 'bg-green-100 text-green-700' },
|
||||||
|
rejected: { label: 'Abgelehnt', color: 'bg-red-100 text-red-700' },
|
||||||
|
updated: { label: 'Aktualisiert', color: 'bg-yellow-100 text-yellow-700' },
|
||||||
|
deleted: { label: 'Geloescht', color: 'bg-gray-100 text-gray-700' },
|
||||||
|
approved: { label: 'Genehmigt', color: 'bg-emerald-100 text-emerald-700' },
|
||||||
|
four_eyes_first: { label: '1. Review (4-Augen)', color: 'bg-blue-100 text-blue-700' },
|
||||||
|
four_eyes_final: { label: 'Finale Freigabe (4-Augen)', color: 'bg-emerald-100 text-emerald-700' },
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/50" onClick={onClose}>
|
||||||
|
<div className="bg-white rounded-2xl shadow-xl w-full max-w-2xl mx-4 p-6 max-h-[80vh] overflow-y-auto" onClick={e => e.stopPropagation()}>
|
||||||
|
<div className="flex items-center justify-between mb-4">
|
||||||
|
<h2 className="text-xl font-bold text-gray-900">Audit-Trail</h2>
|
||||||
|
<button onClick={onClose} className="text-gray-400 hover:text-gray-600 text-xl">×</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{loading ? (
|
||||||
|
<div className="flex justify-center py-12">
|
||||||
|
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-purple-600" />
|
||||||
|
</div>
|
||||||
|
) : entries.length === 0 ? (
|
||||||
|
<div className="py-12 text-center text-gray-500">
|
||||||
|
<p>Keine Audit-Trail-Eintraege vorhanden.</p>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div className="relative">
|
||||||
|
{/* Timeline line */}
|
||||||
|
<div className="absolute left-4 top-0 bottom-0 w-0.5 bg-gray-200" />
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
{entries.map((entry, idx) => {
|
||||||
|
const meta = actionLabels[entry.action] || { label: entry.action, color: 'bg-gray-100 text-gray-700' }
|
||||||
|
return (
|
||||||
|
<div key={entry.id || idx} className="relative flex items-start gap-4 pl-10">
|
||||||
|
{/* Timeline dot */}
|
||||||
|
<div className="absolute left-2.5 top-1.5 w-3 h-3 rounded-full bg-white border-2 border-purple-400" />
|
||||||
|
|
||||||
|
<div className="flex-1 bg-gray-50 rounded-lg p-3">
|
||||||
|
<div className="flex items-center gap-2 mb-1">
|
||||||
|
<span className={`px-2 py-0.5 text-xs rounded ${meta.color}`}>{meta.label}</span>
|
||||||
|
<span className="text-xs text-gray-400">
|
||||||
|
{entry.timestamp ? new Date(entry.timestamp).toLocaleString('de-DE') : '—'}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<div className="text-sm text-gray-600">
|
||||||
|
<span className="font-medium">{entry.actor || 'System'}</span>
|
||||||
|
</div>
|
||||||
|
{entry.details && Object.keys(entry.details).length > 0 && (
|
||||||
|
<div className="mt-2 text-xs text-gray-500 font-mono bg-white rounded p-2 border">
|
||||||
|
{Object.entries(entry.details).map(([k, v]) => (
|
||||||
|
<div key={k}><span className="text-gray-400">{k}:</span> {String(v)}</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// EVIDENCE CARD
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
function EvidenceCard({ evidence, onDelete, onView, onDownload, onReview, onReject, onShowHistory }: { evidence: DisplayEvidence; onDelete: () => void; onView: () => void; onDownload: () => void; onReview: () => void; onReject: () => void; onShowHistory: () => void }) {
|
||||||
const typeIcons = {
|
const typeIcons = {
|
||||||
document: (
|
document: (
|
||||||
<svg className="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
<svg className="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
@@ -221,9 +553,15 @@ function EvidenceCard({ evidence, onDelete, onView, onDownload }: { evidence: Di
|
|||||||
<div className="flex-1">
|
<div className="flex-1">
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center justify-between">
|
||||||
<h3 className="text-lg font-semibold text-gray-900">{evidence.name}</h3>
|
<h3 className="text-lg font-semibold text-gray-900">{evidence.name}</h3>
|
||||||
<span className={`px-3 py-1 text-xs rounded-full ${statusColors[evidence.status]}`}>
|
<div className="flex items-center gap-1.5 flex-wrap">
|
||||||
{statusLabels[evidence.status]}
|
<span className={`px-3 py-1 text-xs rounded-full ${statusColors[evidence.status]}`}>
|
||||||
</span>
|
{statusLabels[evidence.status]}
|
||||||
|
</span>
|
||||||
|
<ConfidenceLevelBadge level={evidence.confidenceLevel} />
|
||||||
|
<TruthStatusBadge status={evidence.truthStatus} />
|
||||||
|
<GenerationModeBadge mode={evidence.generationMode} />
|
||||||
|
<ApprovalStatusBadge status={evidence.approvalStatus} requiresFourEyes={evidence.requiresFourEyes} />
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<p className="text-sm text-gray-500 mt-1">{evidence.description}</p>
|
<p className="text-sm text-gray-500 mt-1">{evidence.description}</p>
|
||||||
|
|
||||||
@@ -275,6 +613,31 @@ function EvidenceCard({ evidence, onDelete, onView, onDownload }: { evidence: Di
|
|||||||
>
|
>
|
||||||
Loeschen
|
Loeschen
|
||||||
</button>
|
</button>
|
||||||
|
{/* Review button — visible when review is possible */}
|
||||||
|
{(evidence.approvalStatus === 'none' || evidence.approvalStatus === 'pending_first' || evidence.approvalStatus === 'first_approved' || !evidence.approvalStatus) && evidence.approvalStatus !== 'approved' && evidence.approvalStatus !== 'rejected' && (
|
||||||
|
<button
|
||||||
|
onClick={onReview}
|
||||||
|
className="px-3 py-1 text-sm text-green-600 hover:bg-green-50 rounded-lg transition-colors font-medium"
|
||||||
|
>
|
||||||
|
Reviewen
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
{/* Reject button — visible for four-eyes evidence that's not yet resolved */}
|
||||||
|
{evidence.requiresFourEyes && evidence.approvalStatus !== 'rejected' && evidence.approvalStatus !== 'approved' && (
|
||||||
|
<button
|
||||||
|
onClick={onReject}
|
||||||
|
className="px-3 py-1 text-sm text-orange-600 hover:bg-orange-50 rounded-lg transition-colors"
|
||||||
|
>
|
||||||
|
Ablehnen
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
{/* History button */}
|
||||||
|
<button
|
||||||
|
onClick={onShowHistory}
|
||||||
|
className="px-3 py-1 text-sm text-gray-500 hover:bg-gray-100 rounded-lg transition-colors"
|
||||||
|
>
|
||||||
|
Historie
|
||||||
|
</button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -382,6 +745,15 @@ export default function EvidencePage() {
|
|||||||
const [pageSize] = useState(20)
|
const [pageSize] = useState(20)
|
||||||
const [total, setTotal] = useState(0)
|
const [total, setTotal] = useState(0)
|
||||||
|
|
||||||
|
// Anti-Fake-Evidence metadata (keyed by evidence ID)
|
||||||
|
const [antiFakeMeta, setAntiFakeMeta] = useState<Record<string, {
|
||||||
|
confidenceLevel: string | null
|
||||||
|
truthStatus: string | null
|
||||||
|
generationMode: string | null
|
||||||
|
approvalStatus: string | null
|
||||||
|
requiresFourEyes: boolean
|
||||||
|
}>>({})
|
||||||
|
|
||||||
// Evidence Checks state
|
// Evidence Checks state
|
||||||
const [checks, setChecks] = useState<EvidenceCheck[]>([])
|
const [checks, setChecks] = useState<EvidenceCheck[]>([])
|
||||||
const [checksLoading, setChecksLoading] = useState(false)
|
const [checksLoading, setChecksLoading] = useState(false)
|
||||||
@@ -393,6 +765,13 @@ export default function EvidencePage() {
|
|||||||
const [coverageReport, setCoverageReport] = useState<CoverageReport | null>(null)
|
const [coverageReport, setCoverageReport] = useState<CoverageReport | null>(null)
|
||||||
const [seedingChecks, setSeedingChecks] = useState(false)
|
const [seedingChecks, setSeedingChecks] = useState(false)
|
||||||
|
|
||||||
|
// Phase 3: Review/Reject/AuditTrail state
|
||||||
|
const [reviewEvidence, setReviewEvidence] = useState<DisplayEvidence | null>(null)
|
||||||
|
const [rejectEvidence, setRejectEvidence] = useState<DisplayEvidence | null>(null)
|
||||||
|
const [auditTrailId, setAuditTrailId] = useState<string | null>(null)
|
||||||
|
const [confidenceFilter, setConfidenceFilter] = useState<string | null>(null)
|
||||||
|
const [refreshKey, setRefreshKey] = useState(0)
|
||||||
|
|
||||||
// Fetch evidence from backend on mount and when page changes
|
// Fetch evidence from backend on mount and when page changes
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const fetchEvidence = async () => {
|
const fetchEvidence = async () => {
|
||||||
@@ -404,18 +783,30 @@ export default function EvidencePage() {
|
|||||||
if (data.total !== undefined) setTotal(data.total)
|
if (data.total !== undefined) setTotal(data.total)
|
||||||
const backendEvidence = data.evidence || data
|
const backendEvidence = data.evidence || data
|
||||||
if (Array.isArray(backendEvidence) && backendEvidence.length > 0) {
|
if (Array.isArray(backendEvidence) && backendEvidence.length > 0) {
|
||||||
const mapped: SDKEvidence[] = backendEvidence.map((e: Record<string, unknown>) => ({
|
const metaMap: typeof antiFakeMeta = {}
|
||||||
id: (e.id || '') as string,
|
const mapped: SDKEvidence[] = backendEvidence.map((e: Record<string, unknown>) => {
|
||||||
controlId: (e.control_id || '') as string,
|
const id = (e.id || '') as string
|
||||||
type: ((e.evidence_type || 'DOCUMENT') as string).toUpperCase() as EvidenceType,
|
metaMap[id] = {
|
||||||
name: (e.title || e.name || '') as string,
|
confidenceLevel: (e.confidence_level || null) as string | null,
|
||||||
description: (e.description || '') as string,
|
truthStatus: (e.truth_status || null) as string | null,
|
||||||
fileUrl: (e.artifact_url || null) as string | null,
|
generationMode: (e.generation_mode || null) as string | null,
|
||||||
validFrom: e.valid_from ? new Date(e.valid_from as string) : new Date(),
|
approvalStatus: (e.approval_status || null) as string | null,
|
||||||
validUntil: e.valid_until ? new Date(e.valid_until as string) : null,
|
requiresFourEyes: !!e.requires_four_eyes,
|
||||||
uploadedBy: (e.uploaded_by || 'System') as string,
|
}
|
||||||
uploadedAt: e.created_at ? new Date(e.created_at as string) : new Date(),
|
return {
|
||||||
}))
|
id,
|
||||||
|
controlId: (e.control_id || '') as string,
|
||||||
|
type: ((e.evidence_type || 'DOCUMENT') as string).toUpperCase() as EvidenceType,
|
||||||
|
name: (e.title || e.name || '') as string,
|
||||||
|
description: (e.description || '') as string,
|
||||||
|
fileUrl: (e.artifact_url || null) as string | null,
|
||||||
|
validFrom: e.valid_from ? new Date(e.valid_from as string) : new Date(),
|
||||||
|
validUntil: e.valid_until ? new Date(e.valid_until as string) : null,
|
||||||
|
uploadedBy: (e.uploaded_by || 'System') as string,
|
||||||
|
uploadedAt: e.created_at ? new Date(e.created_at as string) : new Date(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
setAntiFakeMeta(metaMap)
|
||||||
dispatch({ type: 'SET_STATE', payload: { evidence: mapped } })
|
dispatch({ type: 'SET_STATE', payload: { evidence: mapped } })
|
||||||
setError(null)
|
setError(null)
|
||||||
return
|
return
|
||||||
@@ -463,12 +854,13 @@ export default function EvidencePage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fetchEvidence()
|
fetchEvidence()
|
||||||
}, [page, pageSize]) // eslint-disable-line react-hooks/exhaustive-deps
|
}, [page, pageSize, refreshKey]) // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
// Convert SDK evidence to display evidence
|
// Convert SDK evidence to display evidence
|
||||||
const displayEvidence: DisplayEvidence[] = state.evidence.map(ev => {
|
const displayEvidence: DisplayEvidence[] = state.evidence.map(ev => {
|
||||||
const template = evidenceTemplates.find(t => t.id === ev.id)
|
const template = evidenceTemplates.find(t => t.id === ev.id)
|
||||||
|
|
||||||
|
const meta = antiFakeMeta[ev.id]
|
||||||
return {
|
return {
|
||||||
id: ev.id,
|
id: ev.id,
|
||||||
name: ev.name,
|
name: ev.name,
|
||||||
@@ -485,12 +877,18 @@ export default function EvidencePage() {
|
|||||||
status: getEvidenceStatus(ev.validUntil),
|
status: getEvidenceStatus(ev.validUntil),
|
||||||
fileSize: template?.fileSize || 'Unbekannt',
|
fileSize: template?.fileSize || 'Unbekannt',
|
||||||
fileUrl: ev.fileUrl,
|
fileUrl: ev.fileUrl,
|
||||||
|
confidenceLevel: meta?.confidenceLevel || null,
|
||||||
|
truthStatus: meta?.truthStatus || null,
|
||||||
|
generationMode: meta?.generationMode || null,
|
||||||
|
approvalStatus: meta?.approvalStatus || null,
|
||||||
|
requiresFourEyes: meta?.requiresFourEyes || false,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
const filteredEvidence = filter === 'all'
|
const filteredEvidence = (filter === 'all'
|
||||||
? displayEvidence
|
? displayEvidence
|
||||||
: displayEvidence.filter(e => e.status === filter || e.displayType === filter)
|
: displayEvidence.filter(e => e.status === filter || e.displayType === filter)
|
||||||
|
).filter(e => !confidenceFilter || e.confidenceLevel === confidenceFilter)
|
||||||
|
|
||||||
const validCount = displayEvidence.filter(e => e.status === 'valid').length
|
const validCount = displayEvidence.filter(e => e.status === 'valid').length
|
||||||
const expiredCount = displayEvidence.filter(e => e.status === 'expired').length
|
const expiredCount = displayEvidence.filter(e => e.status === 'expired').length
|
||||||
@@ -803,6 +1201,20 @@ export default function EvidencePage() {
|
|||||||
f === 'certificate' ? 'Zertifikate' : 'Audit-Berichte'}
|
f === 'certificate' ? 'Zertifikate' : 'Audit-Berichte'}
|
||||||
</button>
|
</button>
|
||||||
))}
|
))}
|
||||||
|
<span className="text-gray-300 mx-1">|</span>
|
||||||
|
{['E0', 'E1', 'E2', 'E3', 'E4'].map(level => (
|
||||||
|
<button
|
||||||
|
key={level}
|
||||||
|
onClick={() => setConfidenceFilter(confidenceFilter === level ? null : level)}
|
||||||
|
className={`px-3 py-1 text-sm rounded-full transition-colors ${
|
||||||
|
confidenceFilter === level
|
||||||
|
? confidenceFilterColors[level]
|
||||||
|
: 'bg-gray-100 text-gray-600 hover:bg-gray-200'
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
{level}
|
||||||
|
</button>
|
||||||
|
))}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{/* Loading State */}
|
{/* Loading State */}
|
||||||
@@ -818,6 +1230,9 @@ export default function EvidencePage() {
|
|||||||
onDelete={() => handleDelete(ev.id)}
|
onDelete={() => handleDelete(ev.id)}
|
||||||
onView={() => handleView(ev)}
|
onView={() => handleView(ev)}
|
||||||
onDownload={() => handleDownload(ev)}
|
onDownload={() => handleDownload(ev)}
|
||||||
|
onReview={() => setReviewEvidence(ev)}
|
||||||
|
onReject={() => setRejectEvidence(ev)}
|
||||||
|
onShowHistory={() => setAuditTrailId(ev.id)}
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
@@ -1106,6 +1521,28 @@ export default function EvidencePage() {
|
|||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{/* Phase 3 Modals */}
|
||||||
|
{reviewEvidence && (
|
||||||
|
<ReviewModal
|
||||||
|
evidence={reviewEvidence}
|
||||||
|
onClose={() => setReviewEvidence(null)}
|
||||||
|
onSuccess={() => { setReviewEvidence(null); setRefreshKey(k => k + 1) }}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
{rejectEvidence && (
|
||||||
|
<RejectModal
|
||||||
|
evidence={rejectEvidence}
|
||||||
|
onClose={() => setRejectEvidence(null)}
|
||||||
|
onSuccess={() => { setRejectEvidence(null); setRefreshKey(k => k + 1) }}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
{auditTrailId && (
|
||||||
|
<AuditTrailPanel
|
||||||
|
evidenceId={auditTrailId}
|
||||||
|
onClose={() => setAuditTrailId(null)}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -643,6 +643,19 @@ export function SDKSidebar({ collapsed = false, onCollapsedChange }: SDKSidebarP
|
|||||||
collapsed={collapsed}
|
collapsed={collapsed}
|
||||||
projectId={projectId}
|
projectId={projectId}
|
||||||
/>
|
/>
|
||||||
|
<AdditionalModuleItem
|
||||||
|
href="/sdk/assertions"
|
||||||
|
icon={
|
||||||
|
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2}
|
||||||
|
d="M9 5H7a2 2 0 00-2 2v12a2 2 0 002 2h10a2 2 0 002-2V7a2 2 0 00-2-2h-2M9 5a2 2 0 002 2h2a2 2 0 002-2M9 5a2 2 0 012-2h2a2 2 0 012 2m-6 9l2 2 4-4" />
|
||||||
|
</svg>
|
||||||
|
}
|
||||||
|
label="Assertions"
|
||||||
|
isActive={pathname === '/sdk/assertions'}
|
||||||
|
collapsed={collapsed}
|
||||||
|
projectId={projectId}
|
||||||
|
/>
|
||||||
<AdditionalModuleItem
|
<AdditionalModuleItem
|
||||||
href="/sdk/dsms"
|
href="/sdk/dsms"
|
||||||
icon={
|
icon={
|
||||||
|
|||||||
@@ -61,6 +61,8 @@ _ROUTER_MODULES = [
|
|||||||
"evidence_check_routes",
|
"evidence_check_routes",
|
||||||
"vvt_library_routes",
|
"vvt_library_routes",
|
||||||
"tom_mapping_routes",
|
"tom_mapping_routes",
|
||||||
|
"llm_audit_routes",
|
||||||
|
"assertion_routes",
|
||||||
]
|
]
|
||||||
|
|
||||||
_loaded_count = 0
|
_loaded_count = 0
|
||||||
|
|||||||
227
backend-compliance/compliance/api/assertion_routes.py
Normal file
227
backend-compliance/compliance/api/assertion_routes.py
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
"""
|
||||||
|
API routes for Assertion Engine (Anti-Fake-Evidence Phase 2).
|
||||||
|
|
||||||
|
Endpoints:
|
||||||
|
- /assertions: CRUD for assertions
|
||||||
|
- /assertions/extract: Automatic extraction from entity text
|
||||||
|
- /assertions/summary: Stats (total assertions, facts, unverified)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
|
||||||
|
from classroom_engine.database import get_db
|
||||||
|
|
||||||
|
from ..db.models import AssertionDB
|
||||||
|
from ..services.assertion_engine import extract_assertions
|
||||||
|
from .schemas import (
|
||||||
|
AssertionCreate,
|
||||||
|
AssertionUpdate,
|
||||||
|
AssertionResponse,
|
||||||
|
AssertionListResponse,
|
||||||
|
AssertionSummaryResponse,
|
||||||
|
AssertionExtractRequest,
|
||||||
|
)
|
||||||
|
from .audit_trail_utils import log_audit_trail, generate_id
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
router = APIRouter(tags=["compliance-assertions"])
|
||||||
|
|
||||||
|
|
||||||
|
def _build_assertion_response(a: AssertionDB) -> AssertionResponse:
|
||||||
|
return AssertionResponse(
|
||||||
|
id=a.id,
|
||||||
|
tenant_id=a.tenant_id,
|
||||||
|
entity_type=a.entity_type,
|
||||||
|
entity_id=a.entity_id,
|
||||||
|
sentence_text=a.sentence_text,
|
||||||
|
sentence_index=a.sentence_index,
|
||||||
|
assertion_type=a.assertion_type,
|
||||||
|
evidence_ids=a.evidence_ids or [],
|
||||||
|
confidence=a.confidence or 0.0,
|
||||||
|
normative_tier=a.normative_tier,
|
||||||
|
verified_by=a.verified_by,
|
||||||
|
verified_at=a.verified_at,
|
||||||
|
created_at=a.created_at,
|
||||||
|
updated_at=a.updated_at,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/assertions", response_model=AssertionResponse)
|
||||||
|
async def create_assertion(
|
||||||
|
data: AssertionCreate,
|
||||||
|
tenant_id: Optional[str] = Query(None),
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Create a single assertion manually."""
|
||||||
|
a = AssertionDB(
|
||||||
|
id=generate_id(),
|
||||||
|
tenant_id=tenant_id,
|
||||||
|
entity_type=data.entity_type,
|
||||||
|
entity_id=data.entity_id,
|
||||||
|
sentence_text=data.sentence_text,
|
||||||
|
assertion_type=data.assertion_type or "assertion",
|
||||||
|
evidence_ids=data.evidence_ids or [],
|
||||||
|
normative_tier=data.normative_tier,
|
||||||
|
)
|
||||||
|
db.add(a)
|
||||||
|
db.commit()
|
||||||
|
db.refresh(a)
|
||||||
|
return _build_assertion_response(a)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/assertions", response_model=AssertionListResponse)
|
||||||
|
async def list_assertions(
|
||||||
|
entity_type: Optional[str] = Query(None),
|
||||||
|
entity_id: Optional[str] = Query(None),
|
||||||
|
assertion_type: Optional[str] = Query(None),
|
||||||
|
tenant_id: Optional[str] = Query(None),
|
||||||
|
limit: int = Query(100, ge=1, le=500),
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""List assertions with optional filters."""
|
||||||
|
query = db.query(AssertionDB)
|
||||||
|
if entity_type:
|
||||||
|
query = query.filter(AssertionDB.entity_type == entity_type)
|
||||||
|
if entity_id:
|
||||||
|
query = query.filter(AssertionDB.entity_id == entity_id)
|
||||||
|
if assertion_type:
|
||||||
|
query = query.filter(AssertionDB.assertion_type == assertion_type)
|
||||||
|
if tenant_id:
|
||||||
|
query = query.filter(AssertionDB.tenant_id == tenant_id)
|
||||||
|
|
||||||
|
total = query.count()
|
||||||
|
records = query.order_by(AssertionDB.sentence_index.asc()).limit(limit).all()
|
||||||
|
|
||||||
|
return AssertionListResponse(
|
||||||
|
assertions=[_build_assertion_response(a) for a in records],
|
||||||
|
total=total,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/assertions/summary", response_model=AssertionSummaryResponse)
|
||||||
|
async def assertion_summary(
|
||||||
|
tenant_id: Optional[str] = Query(None),
|
||||||
|
entity_type: Optional[str] = Query(None),
|
||||||
|
entity_id: Optional[str] = Query(None),
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Summary stats: total assertions, facts, rationale, unverified."""
|
||||||
|
query = db.query(AssertionDB)
|
||||||
|
if tenant_id:
|
||||||
|
query = query.filter(AssertionDB.tenant_id == tenant_id)
|
||||||
|
if entity_type:
|
||||||
|
query = query.filter(AssertionDB.entity_type == entity_type)
|
||||||
|
if entity_id:
|
||||||
|
query = query.filter(AssertionDB.entity_id == entity_id)
|
||||||
|
|
||||||
|
all_records = query.all()
|
||||||
|
|
||||||
|
total = len(all_records)
|
||||||
|
facts = sum(1 for a in all_records if a.assertion_type == "fact")
|
||||||
|
rationale = sum(1 for a in all_records if a.assertion_type == "rationale")
|
||||||
|
unverified = sum(1 for a in all_records if a.assertion_type == "assertion" and not a.verified_by)
|
||||||
|
|
||||||
|
return AssertionSummaryResponse(
|
||||||
|
total_assertions=total,
|
||||||
|
total_facts=facts,
|
||||||
|
total_rationale=rationale,
|
||||||
|
unverified_count=unverified,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/assertions/{assertion_id}", response_model=AssertionResponse)
|
||||||
|
async def get_assertion(
|
||||||
|
assertion_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Get a single assertion by ID."""
|
||||||
|
a = db.query(AssertionDB).filter(AssertionDB.id == assertion_id).first()
|
||||||
|
if not a:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Assertion {assertion_id} not found")
|
||||||
|
return _build_assertion_response(a)
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/assertions/{assertion_id}", response_model=AssertionResponse)
|
||||||
|
async def update_assertion(
|
||||||
|
assertion_id: str,
|
||||||
|
data: AssertionUpdate,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Update an assertion (e.g. link evidence, change type)."""
|
||||||
|
a = db.query(AssertionDB).filter(AssertionDB.id == assertion_id).first()
|
||||||
|
if not a:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Assertion {assertion_id} not found")
|
||||||
|
|
||||||
|
update_fields = data.model_dump(exclude_unset=True)
|
||||||
|
for key, value in update_fields.items():
|
||||||
|
setattr(a, key, value)
|
||||||
|
a.updated_at = datetime.utcnow()
|
||||||
|
db.commit()
|
||||||
|
db.refresh(a)
|
||||||
|
return _build_assertion_response(a)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/assertions/{assertion_id}/verify", response_model=AssertionResponse)
|
||||||
|
async def verify_assertion(
|
||||||
|
assertion_id: str,
|
||||||
|
verified_by: str = Query(...),
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Mark an assertion as verified fact."""
|
||||||
|
a = db.query(AssertionDB).filter(AssertionDB.id == assertion_id).first()
|
||||||
|
if not a:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Assertion {assertion_id} not found")
|
||||||
|
|
||||||
|
a.assertion_type = "fact"
|
||||||
|
a.verified_by = verified_by
|
||||||
|
a.verified_at = datetime.utcnow()
|
||||||
|
a.updated_at = datetime.utcnow()
|
||||||
|
db.commit()
|
||||||
|
db.refresh(a)
|
||||||
|
return _build_assertion_response(a)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/assertions/extract", response_model=AssertionListResponse)
|
||||||
|
async def extract_assertions_endpoint(
|
||||||
|
data: AssertionExtractRequest,
|
||||||
|
tenant_id: Optional[str] = Query(None),
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Extract assertions from free text and persist them."""
|
||||||
|
extracted = extract_assertions(
|
||||||
|
text=data.text,
|
||||||
|
entity_type=data.entity_type,
|
||||||
|
entity_id=data.entity_id,
|
||||||
|
tenant_id=tenant_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
created = []
|
||||||
|
for item in extracted:
|
||||||
|
a = AssertionDB(
|
||||||
|
id=generate_id(),
|
||||||
|
tenant_id=item["tenant_id"],
|
||||||
|
entity_type=item["entity_type"],
|
||||||
|
entity_id=item["entity_id"],
|
||||||
|
sentence_text=item["sentence_text"],
|
||||||
|
sentence_index=item["sentence_index"],
|
||||||
|
assertion_type=item["assertion_type"],
|
||||||
|
evidence_ids=item["evidence_ids"],
|
||||||
|
normative_tier=item.get("normative_tier"),
|
||||||
|
confidence=item.get("confidence", 0.0),
|
||||||
|
)
|
||||||
|
db.add(a)
|
||||||
|
created.append(a)
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
for a in created:
|
||||||
|
db.refresh(a)
|
||||||
|
|
||||||
|
return AssertionListResponse(
|
||||||
|
assertions=[_build_assertion_response(a) for a in created],
|
||||||
|
total=len(created),
|
||||||
|
)
|
||||||
53
backend-compliance/compliance/api/audit_trail_utils.py
Normal file
53
backend-compliance/compliance/api/audit_trail_utils.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
"""Shared audit trail utilities.
|
||||||
|
|
||||||
|
Extracted from isms_routes.py for reuse across evidence, control,
|
||||||
|
and assertion routes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import uuid
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
|
||||||
|
from ..db.models import AuditTrailDB
|
||||||
|
|
||||||
|
|
||||||
|
def generate_id() -> str:
|
||||||
|
"""Generate a UUID string."""
|
||||||
|
return str(uuid.uuid4())
|
||||||
|
|
||||||
|
|
||||||
|
def create_signature(data: str) -> str:
|
||||||
|
"""Create SHA-256 signature."""
|
||||||
|
return hashlib.sha256(data.encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def log_audit_trail(
|
||||||
|
db: Session,
|
||||||
|
entity_type: str,
|
||||||
|
entity_id: str,
|
||||||
|
entity_name: str,
|
||||||
|
action: str,
|
||||||
|
performed_by: str,
|
||||||
|
field_changed: str = None,
|
||||||
|
old_value: str = None,
|
||||||
|
new_value: str = None,
|
||||||
|
change_summary: str = None,
|
||||||
|
):
|
||||||
|
"""Log an entry to the audit trail."""
|
||||||
|
trail = AuditTrailDB(
|
||||||
|
id=generate_id(),
|
||||||
|
entity_type=entity_type,
|
||||||
|
entity_id=entity_id,
|
||||||
|
entity_name=entity_name,
|
||||||
|
action=action,
|
||||||
|
field_changed=field_changed,
|
||||||
|
old_value=old_value,
|
||||||
|
new_value=new_value,
|
||||||
|
change_summary=change_summary,
|
||||||
|
performed_by=performed_by,
|
||||||
|
performed_at=datetime.utcnow(),
|
||||||
|
checksum=create_signature(f"{entity_type}|{entity_id}|{action}|{performed_by}"),
|
||||||
|
)
|
||||||
|
db.add(trail)
|
||||||
@@ -32,14 +32,21 @@ from ..db import (
|
|||||||
ControlRepository,
|
ControlRepository,
|
||||||
EvidenceRepository,
|
EvidenceRepository,
|
||||||
RiskRepository,
|
RiskRepository,
|
||||||
|
AssertionDB,
|
||||||
)
|
)
|
||||||
from .schemas import (
|
from .schemas import (
|
||||||
DashboardResponse,
|
DashboardResponse,
|
||||||
|
MultiDimensionalScore,
|
||||||
ExecutiveDashboardResponse,
|
ExecutiveDashboardResponse,
|
||||||
TrendDataPoint,
|
TrendDataPoint,
|
||||||
RiskSummary,
|
RiskSummary,
|
||||||
DeadlineItem,
|
DeadlineItem,
|
||||||
TeamWorkloadItem,
|
TeamWorkloadItem,
|
||||||
|
TraceabilityAssertion,
|
||||||
|
TraceabilityEvidence,
|
||||||
|
TraceabilityCoverage,
|
||||||
|
TraceabilityControl,
|
||||||
|
TraceabilityMatrixResponse,
|
||||||
)
|
)
|
||||||
from .tenant_utils import get_tenant_id as _get_tenant_id
|
from .tenant_utils import get_tenant_id as _get_tenant_id
|
||||||
from .db_utils import row_to_dict as _row_to_dict
|
from .db_utils import row_to_dict as _row_to_dict
|
||||||
@@ -95,6 +102,14 @@ async def get_dashboard(db: Session = Depends(get_db)):
|
|||||||
# or compute from by_status dict
|
# or compute from by_status dict
|
||||||
score = ctrl_stats.get("compliance_score", 0.0)
|
score = ctrl_stats.get("compliance_score", 0.0)
|
||||||
|
|
||||||
|
# Multi-dimensional score (Anti-Fake-Evidence)
|
||||||
|
try:
|
||||||
|
ms = ctrl_repo.get_multi_dimensional_score()
|
||||||
|
multi_score = MultiDimensionalScore(**ms)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to compute multi-dimensional score: {e}")
|
||||||
|
multi_score = None
|
||||||
|
|
||||||
return DashboardResponse(
|
return DashboardResponse(
|
||||||
compliance_score=round(score, 1),
|
compliance_score=round(score, 1),
|
||||||
total_regulations=len(regulations),
|
total_regulations=len(regulations),
|
||||||
@@ -107,6 +122,7 @@ async def get_dashboard(db: Session = Depends(get_db)):
|
|||||||
total_risks=len(risks),
|
total_risks=len(risks),
|
||||||
risks_by_level=risks_by_level,
|
risks_by_level=risks_by_level,
|
||||||
recent_activity=[],
|
recent_activity=[],
|
||||||
|
multi_score=multi_score,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -125,11 +141,18 @@ async def get_compliance_score(db: Session = Depends(get_db)):
|
|||||||
else:
|
else:
|
||||||
score = 0
|
score = 0
|
||||||
|
|
||||||
|
# Multi-dimensional score (Anti-Fake-Evidence)
|
||||||
|
try:
|
||||||
|
multi_score = ctrl_repo.get_multi_dimensional_score()
|
||||||
|
except Exception:
|
||||||
|
multi_score = None
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"score": round(score, 1),
|
"score": round(score, 1),
|
||||||
"total_controls": total,
|
"total_controls": total,
|
||||||
"passing_controls": passing,
|
"passing_controls": passing,
|
||||||
"partial_controls": partial,
|
"partial_controls": partial,
|
||||||
|
"multi_score": multi_score,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -597,6 +620,158 @@ async def get_score_history(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Evidence Distribution (Anti-Fake-Evidence Phase 3)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/dashboard/evidence-distribution")
|
||||||
|
async def get_evidence_distribution(
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
tenant_id: str = Depends(_get_tenant_id),
|
||||||
|
):
|
||||||
|
"""Evidence counts by confidence level and four-eyes status."""
|
||||||
|
evidence_repo = EvidenceRepository(db)
|
||||||
|
all_evidence = evidence_repo.get_all()
|
||||||
|
|
||||||
|
by_confidence = {"E0": 0, "E1": 0, "E2": 0, "E3": 0, "E4": 0}
|
||||||
|
four_eyes_pending = 0
|
||||||
|
|
||||||
|
for e in all_evidence:
|
||||||
|
level = e.confidence_level.value if e.confidence_level else "E1"
|
||||||
|
if level in by_confidence:
|
||||||
|
by_confidence[level] += 1
|
||||||
|
if e.requires_four_eyes and e.approval_status not in ("approved", "rejected"):
|
||||||
|
four_eyes_pending += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"by_confidence": by_confidence,
|
||||||
|
"four_eyes_pending": four_eyes_pending,
|
||||||
|
"total": len(all_evidence),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Traceability Matrix (Anti-Fake-Evidence Phase 4a)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/dashboard/traceability-matrix", response_model=TraceabilityMatrixResponse)
|
||||||
|
async def get_traceability_matrix(
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
tenant_id: str = Depends(_get_tenant_id),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Full traceability chain: Control → Evidence → Assertions.
|
||||||
|
|
||||||
|
Loads each entity set once, builds in-memory indices, and nests
|
||||||
|
the result so the frontend can render a matrix view.
|
||||||
|
"""
|
||||||
|
ctrl_repo = ControlRepository(db)
|
||||||
|
evidence_repo = EvidenceRepository(db)
|
||||||
|
|
||||||
|
# 1. Load all three entity sets
|
||||||
|
controls = ctrl_repo.get_all()
|
||||||
|
all_evidence = evidence_repo.get_all()
|
||||||
|
all_assertions = db.query(AssertionDB).filter(
|
||||||
|
AssertionDB.entity_type == "evidence",
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# 2. Index assertions by evidence_id (entity_id)
|
||||||
|
assertions_by_evidence: Dict[str, list] = {}
|
||||||
|
for a in all_assertions:
|
||||||
|
assertions_by_evidence.setdefault(a.entity_id, []).append(a)
|
||||||
|
|
||||||
|
# 3. Index evidence by control_id
|
||||||
|
evidence_by_control: Dict[str, list] = {}
|
||||||
|
for e in all_evidence:
|
||||||
|
evidence_by_control.setdefault(str(e.control_id), []).append(e)
|
||||||
|
|
||||||
|
# 4. Build nested response
|
||||||
|
result_controls: list = []
|
||||||
|
total_controls = 0
|
||||||
|
covered_controls = 0
|
||||||
|
fully_verified = 0
|
||||||
|
|
||||||
|
for ctrl in controls:
|
||||||
|
total_controls += 1
|
||||||
|
ctrl_id = str(ctrl.id)
|
||||||
|
ctrl_evidence = evidence_by_control.get(ctrl_id, [])
|
||||||
|
|
||||||
|
nested_evidence: list = []
|
||||||
|
has_evidence = len(ctrl_evidence) > 0
|
||||||
|
has_assertions = False
|
||||||
|
all_verified = True
|
||||||
|
min_conf: Optional[str] = None
|
||||||
|
conf_order = {"E0": 0, "E1": 1, "E2": 2, "E3": 3, "E4": 4}
|
||||||
|
|
||||||
|
for e in ctrl_evidence:
|
||||||
|
ev_id = str(e.id)
|
||||||
|
ev_assertions = assertions_by_evidence.get(ev_id, [])
|
||||||
|
|
||||||
|
nested_assertions = [
|
||||||
|
TraceabilityAssertion(
|
||||||
|
id=str(a.id),
|
||||||
|
sentence_text=a.sentence_text,
|
||||||
|
assertion_type=a.assertion_type or "assertion",
|
||||||
|
confidence=a.confidence or 0.0,
|
||||||
|
verified=a.verified_by is not None,
|
||||||
|
)
|
||||||
|
for a in ev_assertions
|
||||||
|
]
|
||||||
|
|
||||||
|
if nested_assertions:
|
||||||
|
has_assertions = True
|
||||||
|
for na in nested_assertions:
|
||||||
|
if not na.verified:
|
||||||
|
all_verified = False
|
||||||
|
|
||||||
|
conf = e.confidence_level.value if e.confidence_level else "E1"
|
||||||
|
if min_conf is None or conf_order.get(conf, 1) < conf_order.get(min_conf, 1):
|
||||||
|
min_conf = conf
|
||||||
|
|
||||||
|
nested_evidence.append(TraceabilityEvidence(
|
||||||
|
id=ev_id,
|
||||||
|
title=e.title,
|
||||||
|
evidence_type=e.evidence_type,
|
||||||
|
confidence_level=conf,
|
||||||
|
status=e.status.value if e.status else "valid",
|
||||||
|
assertions=nested_assertions,
|
||||||
|
))
|
||||||
|
|
||||||
|
if not has_assertions:
|
||||||
|
all_verified = False
|
||||||
|
|
||||||
|
if has_evidence:
|
||||||
|
covered_controls += 1
|
||||||
|
if has_evidence and has_assertions and all_verified:
|
||||||
|
fully_verified += 1
|
||||||
|
|
||||||
|
coverage = TraceabilityCoverage(
|
||||||
|
has_evidence=has_evidence,
|
||||||
|
has_assertions=has_assertions,
|
||||||
|
all_assertions_verified=all_verified,
|
||||||
|
min_confidence_level=min_conf,
|
||||||
|
)
|
||||||
|
|
||||||
|
result_controls.append(TraceabilityControl(
|
||||||
|
id=ctrl_id,
|
||||||
|
control_id=ctrl.control_id,
|
||||||
|
title=ctrl.title,
|
||||||
|
status=ctrl.status.value if ctrl.status else "planned",
|
||||||
|
domain=ctrl.domain.value if ctrl.domain else "unknown",
|
||||||
|
evidence=nested_evidence,
|
||||||
|
coverage=coverage,
|
||||||
|
))
|
||||||
|
|
||||||
|
summary = {
|
||||||
|
"total_controls": total_controls,
|
||||||
|
"covered_controls": covered_controls,
|
||||||
|
"fully_verified": fully_verified,
|
||||||
|
"uncovered_controls": total_controls - covered_controls,
|
||||||
|
}
|
||||||
|
|
||||||
|
return TraceabilityMatrixResponse(controls=result_controls, summary=summary)
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Reports
|
# Reports
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|||||||
@@ -26,17 +26,102 @@ from ..db import (
|
|||||||
ControlRepository,
|
ControlRepository,
|
||||||
EvidenceRepository,
|
EvidenceRepository,
|
||||||
EvidenceStatusEnum,
|
EvidenceStatusEnum,
|
||||||
|
EvidenceConfidenceEnum,
|
||||||
|
EvidenceTruthStatusEnum,
|
||||||
)
|
)
|
||||||
from ..db.models import EvidenceDB, ControlDB
|
from ..db.models import EvidenceDB, ControlDB, AuditTrailDB
|
||||||
from ..services.auto_risk_updater import AutoRiskUpdater
|
from ..services.auto_risk_updater import AutoRiskUpdater
|
||||||
from .schemas import (
|
from .schemas import (
|
||||||
EvidenceCreate, EvidenceResponse, EvidenceListResponse,
|
EvidenceCreate, EvidenceResponse, EvidenceListResponse,
|
||||||
|
EvidenceRejectRequest,
|
||||||
)
|
)
|
||||||
|
from .audit_trail_utils import log_audit_trail
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
router = APIRouter(tags=["compliance-evidence"])
|
router = APIRouter(tags=["compliance-evidence"])
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Anti-Fake-Evidence: Four-Eyes Domain Check
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
FOUR_EYES_DOMAINS = {"gov", "priv"}
|
||||||
|
|
||||||
|
|
||||||
|
def _requires_four_eyes(control_domain: str) -> bool:
|
||||||
|
"""Controls in governance/privacy domains require two independent reviewers."""
|
||||||
|
return control_domain in FOUR_EYES_DOMAINS
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Anti-Fake-Evidence: Auto-Classification Helpers
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
def _classify_confidence(source: Optional[str], evidence_type: Optional[str] = None, artifact_hash: Optional[str] = None) -> EvidenceConfidenceEnum:
|
||||||
|
"""Classify evidence confidence level based on source and metadata."""
|
||||||
|
if source == "ci_pipeline":
|
||||||
|
return EvidenceConfidenceEnum.E3
|
||||||
|
if source == "api" and artifact_hash:
|
||||||
|
return EvidenceConfidenceEnum.E3
|
||||||
|
if source == "api":
|
||||||
|
return EvidenceConfidenceEnum.E3
|
||||||
|
if source in ("manual", "upload"):
|
||||||
|
return EvidenceConfidenceEnum.E1
|
||||||
|
if source == "generated":
|
||||||
|
return EvidenceConfidenceEnum.E0
|
||||||
|
# Default for unknown sources
|
||||||
|
return EvidenceConfidenceEnum.E1
|
||||||
|
|
||||||
|
|
||||||
|
def _classify_truth_status(source: Optional[str]) -> EvidenceTruthStatusEnum:
|
||||||
|
"""Classify evidence truth status based on source."""
|
||||||
|
if source == "ci_pipeline":
|
||||||
|
return EvidenceTruthStatusEnum.OBSERVED
|
||||||
|
if source in ("manual", "upload"):
|
||||||
|
return EvidenceTruthStatusEnum.UPLOADED
|
||||||
|
if source == "generated":
|
||||||
|
return EvidenceTruthStatusEnum.GENERATED
|
||||||
|
if source == "api":
|
||||||
|
return EvidenceTruthStatusEnum.OBSERVED
|
||||||
|
return EvidenceTruthStatusEnum.UPLOADED
|
||||||
|
|
||||||
|
|
||||||
|
def _build_evidence_response(e: EvidenceDB) -> EvidenceResponse:
|
||||||
|
"""Build an EvidenceResponse from an EvidenceDB, including anti-fake fields."""
|
||||||
|
return EvidenceResponse(
|
||||||
|
id=e.id,
|
||||||
|
control_id=e.control_id,
|
||||||
|
evidence_type=e.evidence_type,
|
||||||
|
title=e.title,
|
||||||
|
description=e.description,
|
||||||
|
artifact_path=e.artifact_path,
|
||||||
|
artifact_url=e.artifact_url,
|
||||||
|
artifact_hash=e.artifact_hash,
|
||||||
|
file_size_bytes=e.file_size_bytes,
|
||||||
|
mime_type=e.mime_type,
|
||||||
|
valid_from=e.valid_from,
|
||||||
|
valid_until=e.valid_until,
|
||||||
|
status=e.status.value if e.status else None,
|
||||||
|
source=e.source,
|
||||||
|
ci_job_id=e.ci_job_id,
|
||||||
|
uploaded_by=e.uploaded_by,
|
||||||
|
collected_at=e.collected_at,
|
||||||
|
created_at=e.created_at,
|
||||||
|
confidence_level=e.confidence_level.value if e.confidence_level else None,
|
||||||
|
truth_status=e.truth_status.value if e.truth_status else None,
|
||||||
|
generation_mode=e.generation_mode,
|
||||||
|
may_be_used_as_evidence=e.may_be_used_as_evidence,
|
||||||
|
reviewed_by=e.reviewed_by,
|
||||||
|
reviewed_at=e.reviewed_at,
|
||||||
|
approval_status=e.approval_status,
|
||||||
|
first_reviewer=e.first_reviewer,
|
||||||
|
first_reviewed_at=e.first_reviewed_at,
|
||||||
|
second_reviewer=e.second_reviewer,
|
||||||
|
second_reviewed_at=e.second_reviewed_at,
|
||||||
|
requires_four_eyes=e.requires_four_eyes,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Evidence
|
# Evidence
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
@@ -80,29 +165,7 @@ async def list_evidence(
|
|||||||
offset = (page - 1) * limit
|
offset = (page - 1) * limit
|
||||||
evidence = evidence[offset:offset + limit]
|
evidence = evidence[offset:offset + limit]
|
||||||
|
|
||||||
results = [
|
results = [_build_evidence_response(e) for e in evidence]
|
||||||
EvidenceResponse(
|
|
||||||
id=e.id,
|
|
||||||
control_id=e.control_id,
|
|
||||||
evidence_type=e.evidence_type,
|
|
||||||
title=e.title,
|
|
||||||
description=e.description,
|
|
||||||
artifact_path=e.artifact_path,
|
|
||||||
artifact_url=e.artifact_url,
|
|
||||||
artifact_hash=e.artifact_hash,
|
|
||||||
file_size_bytes=e.file_size_bytes,
|
|
||||||
mime_type=e.mime_type,
|
|
||||||
valid_from=e.valid_from,
|
|
||||||
valid_until=e.valid_until,
|
|
||||||
status=e.status.value if e.status else None,
|
|
||||||
source=e.source,
|
|
||||||
ci_job_id=e.ci_job_id,
|
|
||||||
uploaded_by=e.uploaded_by,
|
|
||||||
collected_at=e.collected_at,
|
|
||||||
created_at=e.created_at,
|
|
||||||
)
|
|
||||||
for e in evidence
|
|
||||||
]
|
|
||||||
|
|
||||||
return EvidenceListResponse(evidence=results, total=total)
|
return EvidenceListResponse(evidence=results, total=total)
|
||||||
|
|
||||||
@@ -121,6 +184,22 @@ async def create_evidence(
|
|||||||
if not control:
|
if not control:
|
||||||
raise HTTPException(status_code=404, detail=f"Control {evidence_data.control_id} not found")
|
raise HTTPException(status_code=404, detail=f"Control {evidence_data.control_id} not found")
|
||||||
|
|
||||||
|
source = evidence_data.source or "api"
|
||||||
|
confidence = _classify_confidence(source, evidence_data.evidence_type)
|
||||||
|
truth = _classify_truth_status(source)
|
||||||
|
|
||||||
|
# Allow explicit override from request
|
||||||
|
if evidence_data.confidence_level:
|
||||||
|
try:
|
||||||
|
confidence = EvidenceConfidenceEnum(evidence_data.confidence_level)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
if evidence_data.truth_status:
|
||||||
|
try:
|
||||||
|
truth = EvidenceTruthStatusEnum(evidence_data.truth_status)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
evidence = repo.create(
|
evidence = repo.create(
|
||||||
control_id=control.id,
|
control_id=control.id,
|
||||||
evidence_type=evidence_data.evidence_type,
|
evidence_type=evidence_data.evidence_type,
|
||||||
@@ -129,31 +208,34 @@ async def create_evidence(
|
|||||||
artifact_url=evidence_data.artifact_url,
|
artifact_url=evidence_data.artifact_url,
|
||||||
valid_from=evidence_data.valid_from,
|
valid_from=evidence_data.valid_from,
|
||||||
valid_until=evidence_data.valid_until,
|
valid_until=evidence_data.valid_until,
|
||||||
source=evidence_data.source or "api",
|
source=source,
|
||||||
ci_job_id=evidence_data.ci_job_id,
|
ci_job_id=evidence_data.ci_job_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Set anti-fake-evidence fields
|
||||||
|
evidence.confidence_level = confidence
|
||||||
|
evidence.truth_status = truth
|
||||||
|
# Generated evidence should not be used as evidence by default
|
||||||
|
if truth == EvidenceTruthStatusEnum.GENERATED:
|
||||||
|
evidence.may_be_used_as_evidence = False
|
||||||
|
|
||||||
|
# Four-Eyes: check if the linked control's domain requires it
|
||||||
|
control_domain = control.domain.value if control.domain else ""
|
||||||
|
if _requires_four_eyes(control_domain):
|
||||||
|
evidence.requires_four_eyes = True
|
||||||
|
evidence.approval_status = "pending_first"
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
# Audit trail
|
||||||
|
log_audit_trail(
|
||||||
|
db, "evidence", evidence.id, evidence.title, "create",
|
||||||
|
performed_by=evidence_data.source or "api",
|
||||||
|
change_summary=f"Evidence created with confidence={confidence.value}, truth={truth.value}",
|
||||||
|
)
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
return EvidenceResponse(
|
return _build_evidence_response(evidence)
|
||||||
id=evidence.id,
|
|
||||||
control_id=evidence.control_id,
|
|
||||||
evidence_type=evidence.evidence_type,
|
|
||||||
title=evidence.title,
|
|
||||||
description=evidence.description,
|
|
||||||
artifact_path=evidence.artifact_path,
|
|
||||||
artifact_url=evidence.artifact_url,
|
|
||||||
artifact_hash=evidence.artifact_hash,
|
|
||||||
file_size_bytes=evidence.file_size_bytes,
|
|
||||||
mime_type=evidence.mime_type,
|
|
||||||
valid_from=evidence.valid_from,
|
|
||||||
valid_until=evidence.valid_until,
|
|
||||||
status=evidence.status.value if evidence.status else None,
|
|
||||||
source=evidence.source,
|
|
||||||
ci_job_id=evidence.ci_job_id,
|
|
||||||
uploaded_by=evidence.uploaded_by,
|
|
||||||
collected_at=evidence.collected_at,
|
|
||||||
created_at=evidence.created_at,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.delete("/evidence/{evidence_id}")
|
@router.delete("/evidence/{evidence_id}")
|
||||||
@@ -223,28 +305,20 @@ async def upload_evidence(
|
|||||||
mime_type=file.content_type,
|
mime_type=file.content_type,
|
||||||
source="upload",
|
source="upload",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Upload evidence → E1 + uploaded
|
||||||
|
evidence.confidence_level = EvidenceConfidenceEnum.E1
|
||||||
|
evidence.truth_status = EvidenceTruthStatusEnum.UPLOADED
|
||||||
|
|
||||||
|
# Four-Eyes: check if the linked control's domain requires it
|
||||||
|
control_domain = control.domain.value if control.domain else ""
|
||||||
|
if _requires_four_eyes(control_domain):
|
||||||
|
evidence.requires_four_eyes = True
|
||||||
|
evidence.approval_status = "pending_first"
|
||||||
|
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
return EvidenceResponse(
|
return _build_evidence_response(evidence)
|
||||||
id=evidence.id,
|
|
||||||
control_id=evidence.control_id,
|
|
||||||
evidence_type=evidence.evidence_type,
|
|
||||||
title=evidence.title,
|
|
||||||
description=evidence.description,
|
|
||||||
artifact_path=evidence.artifact_path,
|
|
||||||
artifact_url=evidence.artifact_url,
|
|
||||||
artifact_hash=evidence.artifact_hash,
|
|
||||||
file_size_bytes=evidence.file_size_bytes,
|
|
||||||
mime_type=evidence.mime_type,
|
|
||||||
valid_from=evidence.valid_from,
|
|
||||||
valid_until=evidence.valid_until,
|
|
||||||
status=evidence.status.value if evidence.status else None,
|
|
||||||
source=evidence.source,
|
|
||||||
ci_job_id=evidence.ci_job_id,
|
|
||||||
uploaded_by=evidence.uploaded_by,
|
|
||||||
collected_at=evidence.collected_at,
|
|
||||||
created_at=evidence.created_at,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
@@ -357,7 +431,7 @@ def _store_evidence(
|
|||||||
with open(file_path, "w") as f:
|
with open(file_path, "w") as f:
|
||||||
json.dump(report_data or {}, f, indent=2)
|
json.dump(report_data or {}, f, indent=2)
|
||||||
|
|
||||||
# Create evidence record
|
# Create evidence record with anti-fake-evidence classification
|
||||||
evidence = EvidenceDB(
|
evidence = EvidenceDB(
|
||||||
id=str(uuid_module.uuid4()),
|
id=str(uuid_module.uuid4()),
|
||||||
control_id=control_db_id,
|
control_id=control_db_id,
|
||||||
@@ -373,6 +447,10 @@ def _store_evidence(
|
|||||||
valid_from=datetime.utcnow(),
|
valid_from=datetime.utcnow(),
|
||||||
valid_until=datetime.utcnow() + timedelta(days=90),
|
valid_until=datetime.utcnow() + timedelta(days=90),
|
||||||
status=EvidenceStatusEnum(parsed["evidence_status"]),
|
status=EvidenceStatusEnum(parsed["evidence_status"]),
|
||||||
|
# CI pipeline evidence → E3 observed (system-observed, hash-verified)
|
||||||
|
confidence_level=EvidenceConfidenceEnum.E3,
|
||||||
|
truth_status=EvidenceTruthStatusEnum.OBSERVED,
|
||||||
|
may_be_used_as_evidence=True,
|
||||||
)
|
)
|
||||||
db.add(evidence)
|
db.add(evidence)
|
||||||
db.commit()
|
db.commit()
|
||||||
@@ -639,3 +717,169 @@ async def get_ci_evidence_status(
|
|||||||
"total_evidence": len(evidence_list),
|
"total_evidence": len(evidence_list),
|
||||||
"controls": result,
|
"controls": result,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Evidence Review (Anti-Fake-Evidence)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
from pydantic import BaseModel as _BaseModel
|
||||||
|
|
||||||
|
class _EvidenceReviewRequest(_BaseModel):
|
||||||
|
confidence_level: Optional[str] = None
|
||||||
|
truth_status: Optional[str] = None
|
||||||
|
reviewed_by: str
|
||||||
|
|
||||||
|
|
||||||
|
@router.patch("/evidence/{evidence_id}/review", response_model=EvidenceResponse)
|
||||||
|
async def review_evidence(
|
||||||
|
evidence_id: str,
|
||||||
|
review: _EvidenceReviewRequest,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Review evidence: upgrade confidence level and/or change truth status.
|
||||||
|
|
||||||
|
For Four-Eyes evidence, the first reviewer sets first_reviewer and
|
||||||
|
approval_status='first_approved'. A second (different) reviewer then
|
||||||
|
sets second_reviewer and approval_status='approved'.
|
||||||
|
"""
|
||||||
|
evidence = db.query(EvidenceDB).filter(EvidenceDB.id == evidence_id).first()
|
||||||
|
if not evidence:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Evidence {evidence_id} not found")
|
||||||
|
|
||||||
|
old_confidence = evidence.confidence_level.value if evidence.confidence_level else None
|
||||||
|
old_truth = evidence.truth_status.value if evidence.truth_status else None
|
||||||
|
|
||||||
|
if review.confidence_level:
|
||||||
|
try:
|
||||||
|
evidence.confidence_level = EvidenceConfidenceEnum(review.confidence_level)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail=f"Invalid confidence_level: {review.confidence_level}")
|
||||||
|
|
||||||
|
if review.truth_status:
|
||||||
|
try:
|
||||||
|
evidence.truth_status = EvidenceTruthStatusEnum(review.truth_status)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail=f"Invalid truth_status: {review.truth_status}")
|
||||||
|
|
||||||
|
# Four-Eyes branching
|
||||||
|
if evidence.requires_four_eyes:
|
||||||
|
status = evidence.approval_status or "none"
|
||||||
|
if status in ("none", "pending_first"):
|
||||||
|
evidence.first_reviewer = review.reviewed_by
|
||||||
|
evidence.first_reviewed_at = datetime.utcnow()
|
||||||
|
evidence.approval_status = "first_approved"
|
||||||
|
elif status == "first_approved":
|
||||||
|
if review.reviewed_by == evidence.first_reviewer:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail="Four-Eyes: second reviewer must be different from first reviewer",
|
||||||
|
)
|
||||||
|
evidence.second_reviewer = review.reviewed_by
|
||||||
|
evidence.second_reviewed_at = datetime.utcnow()
|
||||||
|
evidence.approval_status = "approved"
|
||||||
|
elif status == "approved":
|
||||||
|
raise HTTPException(status_code=400, detail="Evidence already approved")
|
||||||
|
elif status == "rejected":
|
||||||
|
raise HTTPException(status_code=400, detail="Evidence was rejected — create new evidence instead")
|
||||||
|
|
||||||
|
evidence.reviewed_by = review.reviewed_by
|
||||||
|
evidence.reviewed_at = datetime.utcnow()
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
# Audit trail
|
||||||
|
new_confidence = evidence.confidence_level.value if evidence.confidence_level else None
|
||||||
|
if old_confidence != new_confidence:
|
||||||
|
log_audit_trail(
|
||||||
|
db, "evidence", evidence_id, evidence.title, "review",
|
||||||
|
performed_by=review.reviewed_by,
|
||||||
|
field_changed="confidence_level",
|
||||||
|
old_value=old_confidence,
|
||||||
|
new_value=new_confidence,
|
||||||
|
)
|
||||||
|
new_truth = evidence.truth_status.value if evidence.truth_status else None
|
||||||
|
if old_truth != new_truth:
|
||||||
|
log_audit_trail(
|
||||||
|
db, "evidence", evidence_id, evidence.title, "review",
|
||||||
|
performed_by=review.reviewed_by,
|
||||||
|
field_changed="truth_status",
|
||||||
|
old_value=old_truth,
|
||||||
|
new_value=new_truth,
|
||||||
|
)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
db.refresh(evidence)
|
||||||
|
return _build_evidence_response(evidence)
|
||||||
|
|
||||||
|
|
||||||
|
@router.patch("/evidence/{evidence_id}/reject", response_model=EvidenceResponse)
|
||||||
|
async def reject_evidence(
|
||||||
|
evidence_id: str,
|
||||||
|
body: EvidenceRejectRequest,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Reject evidence (sets approval_status='rejected')."""
|
||||||
|
evidence = db.query(EvidenceDB).filter(EvidenceDB.id == evidence_id).first()
|
||||||
|
if not evidence:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Evidence {evidence_id} not found")
|
||||||
|
|
||||||
|
evidence.approval_status = "rejected"
|
||||||
|
evidence.reviewed_by = body.reviewed_by
|
||||||
|
evidence.reviewed_at = datetime.utcnow()
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
log_audit_trail(
|
||||||
|
db, "evidence", evidence_id, evidence.title, "reject",
|
||||||
|
performed_by=body.reviewed_by,
|
||||||
|
change_summary=body.rejection_reason or "Evidence rejected",
|
||||||
|
)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
db.refresh(evidence)
|
||||||
|
return _build_evidence_response(evidence)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Audit Trail Query
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/audit-trail")
|
||||||
|
async def get_audit_trail(
|
||||||
|
entity_type: Optional[str] = Query(None),
|
||||||
|
entity_id: Optional[str] = Query(None),
|
||||||
|
action: Optional[str] = Query(None),
|
||||||
|
limit: int = Query(50, ge=1, le=200),
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Query audit trail entries for an entity."""
|
||||||
|
query = db.query(AuditTrailDB)
|
||||||
|
if entity_type:
|
||||||
|
query = query.filter(AuditTrailDB.entity_type == entity_type)
|
||||||
|
if entity_id:
|
||||||
|
query = query.filter(AuditTrailDB.entity_id == entity_id)
|
||||||
|
if action:
|
||||||
|
query = query.filter(AuditTrailDB.action == action)
|
||||||
|
|
||||||
|
records = query.order_by(AuditTrailDB.performed_at.desc()).limit(limit).all()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"entries": [
|
||||||
|
{
|
||||||
|
"id": r.id,
|
||||||
|
"entity_type": r.entity_type,
|
||||||
|
"entity_id": r.entity_id,
|
||||||
|
"entity_name": r.entity_name,
|
||||||
|
"action": r.action,
|
||||||
|
"field_changed": r.field_changed,
|
||||||
|
"old_value": r.old_value,
|
||||||
|
"new_value": r.new_value,
|
||||||
|
"change_summary": r.change_summary,
|
||||||
|
"performed_by": r.performed_by,
|
||||||
|
"performed_at": r.performed_at.isoformat() if r.performed_at else None,
|
||||||
|
"checksum": r.checksum,
|
||||||
|
}
|
||||||
|
for r in records
|
||||||
|
],
|
||||||
|
"total": len(records),
|
||||||
|
}
|
||||||
|
|||||||
@@ -73,39 +73,8 @@ def generate_id() -> str:
|
|||||||
return str(uuid.uuid4())
|
return str(uuid.uuid4())
|
||||||
|
|
||||||
|
|
||||||
def create_signature(data: str) -> str:
|
# Shared audit trail utilities — canonical implementation in audit_trail_utils.py
|
||||||
"""Create SHA-256 signature."""
|
from .audit_trail_utils import log_audit_trail, create_signature # noqa: E402
|
||||||
return hashlib.sha256(data.encode()).hexdigest()
|
|
||||||
|
|
||||||
|
|
||||||
def log_audit_trail(
|
|
||||||
db: Session,
|
|
||||||
entity_type: str,
|
|
||||||
entity_id: str,
|
|
||||||
entity_name: str,
|
|
||||||
action: str,
|
|
||||||
performed_by: str,
|
|
||||||
field_changed: str = None,
|
|
||||||
old_value: str = None,
|
|
||||||
new_value: str = None,
|
|
||||||
change_summary: str = None
|
|
||||||
):
|
|
||||||
"""Log an entry to the audit trail."""
|
|
||||||
trail = AuditTrailDB(
|
|
||||||
id=generate_id(),
|
|
||||||
entity_type=entity_type,
|
|
||||||
entity_id=entity_id,
|
|
||||||
entity_name=entity_name,
|
|
||||||
action=action,
|
|
||||||
field_changed=field_changed,
|
|
||||||
old_value=old_value,
|
|
||||||
new_value=new_value,
|
|
||||||
change_summary=change_summary,
|
|
||||||
performed_by=performed_by,
|
|
||||||
performed_at=datetime.utcnow(),
|
|
||||||
checksum=create_signature(f"{entity_type}|{entity_id}|{action}|{performed_by}")
|
|
||||||
)
|
|
||||||
db.add(trail)
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|||||||
162
backend-compliance/compliance/api/llm_audit_routes.py
Normal file
162
backend-compliance/compliance/api/llm_audit_routes.py
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
"""
|
||||||
|
FastAPI routes for LLM Generation Audit Trail.
|
||||||
|
|
||||||
|
Endpoints:
|
||||||
|
- POST /llm-audit: Record an LLM generation event
|
||||||
|
- GET /llm-audit: List audit records with filters
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import uuid as uuid_module
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Depends, Query
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
|
||||||
|
from classroom_engine.database import get_db
|
||||||
|
from ..db.models import LLMGenerationAuditDB
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
router = APIRouter(tags=["compliance-llm-audit"])
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Schemas
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class LLMAuditCreate(BaseModel):
|
||||||
|
entity_type: str
|
||||||
|
entity_id: Optional[str] = None
|
||||||
|
generation_mode: str
|
||||||
|
truth_status: str = "generated"
|
||||||
|
may_be_used_as_evidence: bool = False
|
||||||
|
llm_model: Optional[str] = None
|
||||||
|
llm_provider: Optional[str] = None
|
||||||
|
prompt_hash: Optional[str] = None
|
||||||
|
input_summary: Optional[str] = None
|
||||||
|
output_summary: Optional[str] = None
|
||||||
|
metadata: Optional[dict] = None
|
||||||
|
tenant_id: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class LLMAuditResponse(BaseModel):
|
||||||
|
id: str
|
||||||
|
tenant_id: Optional[str] = None
|
||||||
|
entity_type: str
|
||||||
|
entity_id: Optional[str] = None
|
||||||
|
generation_mode: str
|
||||||
|
truth_status: str
|
||||||
|
may_be_used_as_evidence: bool
|
||||||
|
llm_model: Optional[str] = None
|
||||||
|
llm_provider: Optional[str] = None
|
||||||
|
prompt_hash: Optional[str] = None
|
||||||
|
input_summary: Optional[str] = None
|
||||||
|
output_summary: Optional[str] = None
|
||||||
|
metadata: Optional[dict] = None
|
||||||
|
created_at: datetime
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
from_attributes = True
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Routes
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/llm-audit", response_model=LLMAuditResponse)
|
||||||
|
async def create_llm_audit(
|
||||||
|
data: LLMAuditCreate,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Record an LLM generation event for audit trail."""
|
||||||
|
from ..db.models import EvidenceTruthStatusEnum
|
||||||
|
|
||||||
|
# Validate truth_status
|
||||||
|
try:
|
||||||
|
truth_enum = EvidenceTruthStatusEnum(data.truth_status)
|
||||||
|
except ValueError:
|
||||||
|
truth_enum = EvidenceTruthStatusEnum.GENERATED
|
||||||
|
|
||||||
|
record = LLMGenerationAuditDB(
|
||||||
|
id=str(uuid_module.uuid4()),
|
||||||
|
tenant_id=data.tenant_id,
|
||||||
|
entity_type=data.entity_type,
|
||||||
|
entity_id=data.entity_id,
|
||||||
|
generation_mode=data.generation_mode,
|
||||||
|
truth_status=truth_enum,
|
||||||
|
may_be_used_as_evidence=data.may_be_used_as_evidence,
|
||||||
|
llm_model=data.llm_model,
|
||||||
|
llm_provider=data.llm_provider,
|
||||||
|
prompt_hash=data.prompt_hash,
|
||||||
|
input_summary=data.input_summary[:500] if data.input_summary else None,
|
||||||
|
output_summary=data.output_summary[:500] if data.output_summary else None,
|
||||||
|
extra_metadata=data.metadata or {},
|
||||||
|
)
|
||||||
|
db.add(record)
|
||||||
|
db.commit()
|
||||||
|
db.refresh(record)
|
||||||
|
|
||||||
|
return LLMAuditResponse(
|
||||||
|
id=record.id,
|
||||||
|
tenant_id=record.tenant_id,
|
||||||
|
entity_type=record.entity_type,
|
||||||
|
entity_id=record.entity_id,
|
||||||
|
generation_mode=record.generation_mode,
|
||||||
|
truth_status=record.truth_status.value if record.truth_status else "generated",
|
||||||
|
may_be_used_as_evidence=record.may_be_used_as_evidence,
|
||||||
|
llm_model=record.llm_model,
|
||||||
|
llm_provider=record.llm_provider,
|
||||||
|
prompt_hash=record.prompt_hash,
|
||||||
|
input_summary=record.input_summary,
|
||||||
|
output_summary=record.output_summary,
|
||||||
|
metadata=record.extra_metadata,
|
||||||
|
created_at=record.created_at,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/llm-audit")
|
||||||
|
async def list_llm_audit(
|
||||||
|
entity_type: Optional[str] = Query(None),
|
||||||
|
entity_id: Optional[str] = Query(None),
|
||||||
|
page: int = Query(1, ge=1),
|
||||||
|
limit: int = Query(50, ge=1, le=200),
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""List LLM generation audit records with optional filters."""
|
||||||
|
query = db.query(LLMGenerationAuditDB)
|
||||||
|
|
||||||
|
if entity_type:
|
||||||
|
query = query.filter(LLMGenerationAuditDB.entity_type == entity_type)
|
||||||
|
if entity_id:
|
||||||
|
query = query.filter(LLMGenerationAuditDB.entity_id == entity_id)
|
||||||
|
|
||||||
|
total = query.count()
|
||||||
|
offset = (page - 1) * limit
|
||||||
|
records = query.order_by(LLMGenerationAuditDB.created_at.desc()).offset(offset).limit(limit).all()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"records": [
|
||||||
|
LLMAuditResponse(
|
||||||
|
id=r.id,
|
||||||
|
tenant_id=r.tenant_id,
|
||||||
|
entity_type=r.entity_type,
|
||||||
|
entity_id=r.entity_id,
|
||||||
|
generation_mode=r.generation_mode,
|
||||||
|
truth_status=r.truth_status.value if r.truth_status else "generated",
|
||||||
|
may_be_used_as_evidence=r.may_be_used_as_evidence,
|
||||||
|
llm_model=r.llm_model,
|
||||||
|
llm_provider=r.llm_provider,
|
||||||
|
prompt_hash=r.prompt_hash,
|
||||||
|
input_summary=r.input_summary,
|
||||||
|
output_summary=r.output_summary,
|
||||||
|
metadata=r.extra_metadata,
|
||||||
|
created_at=r.created_at,
|
||||||
|
)
|
||||||
|
for r in records
|
||||||
|
],
|
||||||
|
"total": total,
|
||||||
|
"page": page,
|
||||||
|
"limit": limit,
|
||||||
|
}
|
||||||
@@ -25,6 +25,7 @@ from sqlalchemy.orm import Session
|
|||||||
|
|
||||||
from classroom_engine.database import get_db
|
from classroom_engine.database import get_db
|
||||||
|
|
||||||
|
from .audit_trail_utils import log_audit_trail
|
||||||
from ..db import (
|
from ..db import (
|
||||||
RegulationRepository,
|
RegulationRepository,
|
||||||
RequirementRepository,
|
RequirementRepository,
|
||||||
@@ -595,6 +596,7 @@ async def get_control(control_id: str, db: Session = Depends(get_db)):
|
|||||||
review_frequency_days=control.review_frequency_days,
|
review_frequency_days=control.review_frequency_days,
|
||||||
status=control.status.value if control.status else None,
|
status=control.status.value if control.status else None,
|
||||||
status_notes=control.status_notes,
|
status_notes=control.status_notes,
|
||||||
|
status_justification=control.status_justification,
|
||||||
last_reviewed_at=control.last_reviewed_at,
|
last_reviewed_at=control.last_reviewed_at,
|
||||||
next_review_at=control.next_review_at,
|
next_review_at=control.next_review_at,
|
||||||
created_at=control.created_at,
|
created_at=control.created_at,
|
||||||
@@ -617,16 +619,52 @@ async def update_control(
|
|||||||
|
|
||||||
update_data = update.model_dump(exclude_unset=True)
|
update_data = update.model_dump(exclude_unset=True)
|
||||||
|
|
||||||
# Convert status string to enum
|
# Convert status string to enum and validate transition
|
||||||
if "status" in update_data:
|
if "status" in update_data:
|
||||||
try:
|
try:
|
||||||
update_data["status"] = ControlStatusEnum(update_data["status"])
|
new_status_enum = ControlStatusEnum(update_data["status"])
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise HTTPException(status_code=400, detail=f"Invalid status: {update_data['status']}")
|
raise HTTPException(status_code=400, detail=f"Invalid status: {update_data['status']}")
|
||||||
|
|
||||||
|
# Validate status transition (Anti-Fake-Evidence)
|
||||||
|
from ..services.control_status_machine import validate_transition
|
||||||
|
current_status = control.status.value if control.status else "planned"
|
||||||
|
evidence_list = db.query(EvidenceDB).filter(EvidenceDB.control_id == control.id).all()
|
||||||
|
allowed, violations = validate_transition(
|
||||||
|
current_status=current_status,
|
||||||
|
new_status=update_data["status"],
|
||||||
|
evidence_list=evidence_list,
|
||||||
|
status_justification=update_data.get("status_justification") or update_data.get("status_notes"),
|
||||||
|
)
|
||||||
|
if not allowed:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=409,
|
||||||
|
detail={
|
||||||
|
"error": "Status transition not allowed",
|
||||||
|
"current_status": current_status,
|
||||||
|
"requested_status": update_data["status"],
|
||||||
|
"violations": violations,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
update_data["status"] = new_status_enum
|
||||||
|
|
||||||
updated = repo.update(control.id, **update_data)
|
updated = repo.update(control.id, **update_data)
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
|
# Audit trail for status changes
|
||||||
|
new_status = updated.status.value if updated.status else None
|
||||||
|
if "status" in update.model_dump(exclude_unset=True) and current_status != new_status:
|
||||||
|
log_audit_trail(
|
||||||
|
db, "control", control.id, updated.control_id or updated.title,
|
||||||
|
"status_change",
|
||||||
|
performed_by=update.owner or "system",
|
||||||
|
field_changed="status",
|
||||||
|
old_value=current_status,
|
||||||
|
new_value=new_status,
|
||||||
|
)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
return ControlResponse(
|
return ControlResponse(
|
||||||
id=updated.id,
|
id=updated.id,
|
||||||
control_id=updated.control_id,
|
control_id=updated.control_id,
|
||||||
@@ -645,6 +683,7 @@ async def update_control(
|
|||||||
review_frequency_days=updated.review_frequency_days,
|
review_frequency_days=updated.review_frequency_days,
|
||||||
status=updated.status.value if updated.status else None,
|
status=updated.status.value if updated.status else None,
|
||||||
status_notes=updated.status_notes,
|
status_notes=updated.status_notes,
|
||||||
|
status_justification=updated.status_justification,
|
||||||
last_reviewed_at=updated.last_reviewed_at,
|
last_reviewed_at=updated.last_reviewed_at,
|
||||||
next_review_at=updated.next_review_at,
|
next_review_at=updated.next_review_at,
|
||||||
created_at=updated.created_at,
|
created_at=updated.created_at,
|
||||||
@@ -690,6 +729,7 @@ async def review_control(
|
|||||||
review_frequency_days=updated.review_frequency_days,
|
review_frequency_days=updated.review_frequency_days,
|
||||||
status=updated.status.value if updated.status else None,
|
status=updated.status.value if updated.status else None,
|
||||||
status_notes=updated.status_notes,
|
status_notes=updated.status_notes,
|
||||||
|
status_justification=updated.status_justification,
|
||||||
last_reviewed_at=updated.last_reviewed_at,
|
last_reviewed_at=updated.last_reviewed_at,
|
||||||
next_review_at=updated.next_review_at,
|
next_review_at=updated.next_review_at,
|
||||||
created_at=updated.created_at,
|
created_at=updated.created_at,
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ class ControlStatus(str):
|
|||||||
FAIL = "fail"
|
FAIL = "fail"
|
||||||
NOT_APPLICABLE = "n/a"
|
NOT_APPLICABLE = "n/a"
|
||||||
PLANNED = "planned"
|
PLANNED = "planned"
|
||||||
|
IN_PROGRESS = "in_progress"
|
||||||
|
|
||||||
|
|
||||||
class RiskLevel(str):
|
class RiskLevel(str):
|
||||||
@@ -209,12 +210,14 @@ class ControlUpdate(BaseModel):
|
|||||||
owner: Optional[str] = None
|
owner: Optional[str] = None
|
||||||
status: Optional[str] = None
|
status: Optional[str] = None
|
||||||
status_notes: Optional[str] = None
|
status_notes: Optional[str] = None
|
||||||
|
status_justification: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
class ControlResponse(ControlBase):
|
class ControlResponse(ControlBase):
|
||||||
id: str
|
id: str
|
||||||
status: str
|
status: str
|
||||||
status_notes: Optional[str] = None
|
status_notes: Optional[str] = None
|
||||||
|
status_justification: Optional[str] = None
|
||||||
last_reviewed_at: Optional[datetime] = None
|
last_reviewed_at: Optional[datetime] = None
|
||||||
next_review_at: Optional[datetime] = None
|
next_review_at: Optional[datetime] = None
|
||||||
created_at: datetime
|
created_at: datetime
|
||||||
@@ -291,7 +294,8 @@ class EvidenceBase(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class EvidenceCreate(EvidenceBase):
|
class EvidenceCreate(EvidenceBase):
|
||||||
pass
|
confidence_level: Optional[str] = None
|
||||||
|
truth_status: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
class EvidenceResponse(EvidenceBase):
|
class EvidenceResponse(EvidenceBase):
|
||||||
@@ -304,6 +308,20 @@ class EvidenceResponse(EvidenceBase):
|
|||||||
uploaded_by: Optional[str] = None
|
uploaded_by: Optional[str] = None
|
||||||
collected_at: datetime
|
collected_at: datetime
|
||||||
created_at: datetime
|
created_at: datetime
|
||||||
|
# Anti-Fake-Evidence fields
|
||||||
|
confidence_level: Optional[str] = None
|
||||||
|
truth_status: Optional[str] = None
|
||||||
|
generation_mode: Optional[str] = None
|
||||||
|
may_be_used_as_evidence: Optional[bool] = None
|
||||||
|
reviewed_by: Optional[str] = None
|
||||||
|
reviewed_at: Optional[datetime] = None
|
||||||
|
# Anti-Fake-Evidence Phase 2: Four-Eyes
|
||||||
|
approval_status: Optional[str] = None
|
||||||
|
first_reviewer: Optional[str] = None
|
||||||
|
first_reviewed_at: Optional[datetime] = None
|
||||||
|
second_reviewer: Optional[str] = None
|
||||||
|
second_reviewed_at: Optional[datetime] = None
|
||||||
|
requires_four_eyes: Optional[bool] = None
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
from_attributes = True
|
from_attributes = True
|
||||||
@@ -435,6 +453,25 @@ class AISystemListResponse(BaseModel):
|
|||||||
# Dashboard & Export Schemas
|
# Dashboard & Export Schemas
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
||||||
|
class MultiDimensionalScore(BaseModel):
|
||||||
|
"""Multi-dimensional compliance score (Anti-Fake-Evidence)."""
|
||||||
|
requirement_coverage: float = 0.0 # % requirements with linked control
|
||||||
|
evidence_strength: float = 0.0 # Weighted avg of evidence confidence (E0=0..E4=1)
|
||||||
|
validation_quality: float = 0.0 # % evidence with truth_status >= validated_internal
|
||||||
|
evidence_freshness: float = 0.0 # % evidence not expired + reviewed < 90 days
|
||||||
|
control_effectiveness: float = 0.0 # Existing formula (pass + partial*0.5)
|
||||||
|
overall_readiness: float = 0.0 # Weighted composite
|
||||||
|
hard_blocks: List[str] = [] # Blocking issues preventing audit-readiness
|
||||||
|
|
||||||
|
|
||||||
|
class StatusTransitionError(BaseModel):
|
||||||
|
"""Error detail for forbidden control status transitions."""
|
||||||
|
allowed: bool = False
|
||||||
|
current_status: str
|
||||||
|
requested_status: str
|
||||||
|
violations: List[str] = []
|
||||||
|
|
||||||
|
|
||||||
class DashboardResponse(BaseModel):
|
class DashboardResponse(BaseModel):
|
||||||
compliance_score: float
|
compliance_score: float
|
||||||
total_regulations: int
|
total_regulations: int
|
||||||
@@ -447,6 +484,7 @@ class DashboardResponse(BaseModel):
|
|||||||
total_risks: int
|
total_risks: int
|
||||||
risks_by_level: Dict[str, int]
|
risks_by_level: Dict[str, int]
|
||||||
recent_activity: List[Dict[str, Any]]
|
recent_activity: List[Dict[str, Any]]
|
||||||
|
multi_score: Optional[MultiDimensionalScore] = None
|
||||||
|
|
||||||
|
|
||||||
class ExportRequest(BaseModel):
|
class ExportRequest(BaseModel):
|
||||||
@@ -1939,3 +1977,111 @@ class TOMStatsResponse(BaseModel):
|
|||||||
implemented: int = 0
|
implemented: int = 0
|
||||||
partial: int = 0
|
partial: int = 0
|
||||||
not_implemented: int = 0
|
not_implemented: int = 0
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Assertion Schemas (Anti-Fake-Evidence Phase 2)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class AssertionCreate(BaseModel):
|
||||||
|
entity_type: str
|
||||||
|
entity_id: str
|
||||||
|
sentence_text: str
|
||||||
|
assertion_type: Optional[str] = "assertion"
|
||||||
|
evidence_ids: Optional[List[str]] = []
|
||||||
|
normative_tier: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class AssertionUpdate(BaseModel):
|
||||||
|
sentence_text: Optional[str] = None
|
||||||
|
assertion_type: Optional[str] = None
|
||||||
|
evidence_ids: Optional[List[str]] = None
|
||||||
|
normative_tier: Optional[str] = None
|
||||||
|
confidence: Optional[float] = None
|
||||||
|
|
||||||
|
|
||||||
|
class AssertionResponse(BaseModel):
|
||||||
|
id: str
|
||||||
|
tenant_id: Optional[str] = None
|
||||||
|
entity_type: str
|
||||||
|
entity_id: str
|
||||||
|
sentence_text: str
|
||||||
|
sentence_index: int = 0
|
||||||
|
assertion_type: str = "assertion"
|
||||||
|
evidence_ids: Optional[List[str]] = []
|
||||||
|
confidence: float = 0.0
|
||||||
|
normative_tier: Optional[str] = None
|
||||||
|
verified_by: Optional[str] = None
|
||||||
|
verified_at: Optional[datetime] = None
|
||||||
|
created_at: Optional[datetime] = None
|
||||||
|
updated_at: Optional[datetime] = None
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
from_attributes = True
|
||||||
|
|
||||||
|
|
||||||
|
class AssertionListResponse(BaseModel):
|
||||||
|
assertions: List[AssertionResponse]
|
||||||
|
total: int
|
||||||
|
|
||||||
|
|
||||||
|
class AssertionSummaryResponse(BaseModel):
|
||||||
|
total_assertions: int = 0
|
||||||
|
total_facts: int = 0
|
||||||
|
total_rationale: int = 0
|
||||||
|
unverified_count: int = 0
|
||||||
|
|
||||||
|
|
||||||
|
class AssertionExtractRequest(BaseModel):
|
||||||
|
entity_type: str
|
||||||
|
entity_id: str
|
||||||
|
text: str
|
||||||
|
|
||||||
|
|
||||||
|
class EvidenceRejectRequest(BaseModel):
|
||||||
|
reviewed_by: str
|
||||||
|
rejection_reason: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Traceability Matrix (Anti-Fake-Evidence Phase 4a)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TraceabilityAssertion(BaseModel):
|
||||||
|
"""Single assertion linked to an evidence item."""
|
||||||
|
id: str
|
||||||
|
sentence_text: str
|
||||||
|
assertion_type: str = "assertion"
|
||||||
|
confidence: float = 0.0
|
||||||
|
verified: bool = False
|
||||||
|
|
||||||
|
class TraceabilityEvidence(BaseModel):
|
||||||
|
"""Evidence item with nested assertions."""
|
||||||
|
id: str
|
||||||
|
title: str
|
||||||
|
evidence_type: str
|
||||||
|
confidence_level: str = "E1"
|
||||||
|
status: str = "valid"
|
||||||
|
assertions: List[TraceabilityAssertion] = []
|
||||||
|
|
||||||
|
class TraceabilityCoverage(BaseModel):
|
||||||
|
"""Coverage flags for a single control."""
|
||||||
|
has_evidence: bool = False
|
||||||
|
has_assertions: bool = False
|
||||||
|
all_assertions_verified: bool = False
|
||||||
|
min_confidence_level: Optional[str] = None
|
||||||
|
|
||||||
|
class TraceabilityControl(BaseModel):
|
||||||
|
"""Control with nested evidence and coverage info."""
|
||||||
|
id: str
|
||||||
|
control_id: str
|
||||||
|
title: str
|
||||||
|
status: str = "planned"
|
||||||
|
domain: str = "unknown"
|
||||||
|
evidence: List[TraceabilityEvidence] = []
|
||||||
|
coverage: TraceabilityCoverage = TraceabilityCoverage()
|
||||||
|
|
||||||
|
class TraceabilityMatrixResponse(BaseModel):
|
||||||
|
"""Full traceability matrix: Controls → Evidence → Assertions."""
|
||||||
|
controls: List[TraceabilityControl]
|
||||||
|
summary: Dict[str, int]
|
||||||
|
|||||||
443
backend-compliance/compliance/data/frameworks/csa_ccm.json
Normal file
443
backend-compliance/compliance/data/frameworks/csa_ccm.json
Normal file
@@ -0,0 +1,443 @@
|
|||||||
|
{
|
||||||
|
"framework_id": "CSA_CCM",
|
||||||
|
"display_name": "Cloud Security Alliance CCM v4",
|
||||||
|
"license": {
|
||||||
|
"type": "restricted",
|
||||||
|
"rag_allowed": false,
|
||||||
|
"use_as_metadata": true,
|
||||||
|
"note": "Abstrahierte Struktur — keine Originaltexte uebernommen"
|
||||||
|
},
|
||||||
|
"domains": [
|
||||||
|
{
|
||||||
|
"domain_id": "AIS",
|
||||||
|
"title": "Application and Interface Security",
|
||||||
|
"aliases": ["ais", "application and interface security", "anwendungssicherheit", "schnittstellensicherheit"],
|
||||||
|
"keywords": ["application", "anwendung", "interface", "schnittstelle", "api", "web", "eingabevalidierung"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AIS-01",
|
||||||
|
"title": "Application Security Policy",
|
||||||
|
"statement": "Sicherheitsrichtlinien fuer Anwendungsentwicklung und Schnittstellenmanagement muessen definiert und angewendet werden.",
|
||||||
|
"keywords": ["policy", "richtlinie", "entwicklung"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Anwendungssicherheitsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AIS-02",
|
||||||
|
"title": "Application Security Design",
|
||||||
|
"statement": "Sicherheitsanforderungen muessen in den Entwurf jeder Anwendung integriert werden.",
|
||||||
|
"keywords": ["design", "entwurf", "security by design"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Sicherheitsanforderungen im Anwendungsentwurf",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AIS-03",
|
||||||
|
"title": "Application Security Testing",
|
||||||
|
"statement": "Anwendungen muessen vor dem Deployment und regelmaessig auf Sicherheitsschwachstellen getestet werden.",
|
||||||
|
"keywords": ["testing", "test", "sast", "dast", "penetration"],
|
||||||
|
"action_hint": "test",
|
||||||
|
"object_hint": "Anwendungssicherheitstests",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AIS-04",
|
||||||
|
"title": "Secure Development Practices",
|
||||||
|
"statement": "Sichere Entwicklungspraktiken (Code Review, Pair Programming, SAST) muessen fuer alle Entwicklungsprojekte gelten.",
|
||||||
|
"keywords": ["development", "entwicklung", "code review", "sast", "praktiken"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Sichere Entwicklungspraktiken",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AIS-05",
|
||||||
|
"title": "API Security",
|
||||||
|
"statement": "APIs muessen authentifiziert, autorisiert und gegen Missbrauch geschuetzt werden.",
|
||||||
|
"keywords": ["api", "schnittstelle", "authentifizierung", "rate limiting"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "API-Sicherheitskontrollen",
|
||||||
|
"object_class": "interface"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AIS-06",
|
||||||
|
"title": "Automated Application Security Testing",
|
||||||
|
"statement": "Automatisierte Sicherheitstests muessen in die CI/CD-Pipeline integriert werden.",
|
||||||
|
"keywords": ["automatisiert", "ci/cd", "pipeline", "sast", "dast"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Automatisierte Sicherheitstests in CI/CD",
|
||||||
|
"object_class": "configuration"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "BCR",
|
||||||
|
"title": "Business Continuity and Resilience",
|
||||||
|
"aliases": ["bcr", "business continuity", "resilience", "geschaeftskontinuitaet", "resilienz"],
|
||||||
|
"keywords": ["continuity", "kontinuitaet", "resilience", "resilienz", "disaster", "recovery", "backup"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "BCR-01",
|
||||||
|
"title": "Business Continuity Planning",
|
||||||
|
"statement": "Ein Geschaeftskontinuitaetsplan muss erstellt, dokumentiert und regelmaessig getestet werden.",
|
||||||
|
"keywords": ["plan", "kontinuitaet", "geschaeft"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Geschaeftskontinuitaetsplan",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "BCR-02",
|
||||||
|
"title": "Risk Assessment for BCM",
|
||||||
|
"statement": "Risikobewertungen muessen fuer geschaeftskritische Prozesse durchgefuehrt werden.",
|
||||||
|
"keywords": ["risiko", "bewertung", "kritisch"],
|
||||||
|
"action_hint": "assess",
|
||||||
|
"object_hint": "BCM-Risikobewertung",
|
||||||
|
"object_class": "risk_artifact"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "BCR-03",
|
||||||
|
"title": "Backup and Recovery",
|
||||||
|
"statement": "Datensicherungen muessen regelmaessig erstellt und Wiederherstellungstests durchgefuehrt werden.",
|
||||||
|
"keywords": ["backup", "sicherung", "wiederherstellung", "recovery"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Datensicherung und Wiederherstellung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "BCR-04",
|
||||||
|
"title": "Disaster Recovery Planning",
|
||||||
|
"statement": "Ein Disaster-Recovery-Plan muss dokumentiert und jaehrlich getestet werden.",
|
||||||
|
"keywords": ["disaster", "recovery", "katastrophe"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Disaster-Recovery-Plan",
|
||||||
|
"object_class": "policy"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "CCC",
|
||||||
|
"title": "Change Control and Configuration Management",
|
||||||
|
"aliases": ["ccc", "change control", "configuration management", "aenderungsmanagement", "konfigurationsmanagement"],
|
||||||
|
"keywords": ["change", "aenderung", "konfiguration", "configuration", "release", "deployment"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CCC-01",
|
||||||
|
"title": "Change Management Policy",
|
||||||
|
"statement": "Ein Aenderungsmanagement-Prozess muss definiert und fuer alle Aenderungen angewendet werden.",
|
||||||
|
"keywords": ["policy", "richtlinie", "aenderung"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Aenderungsmanagement-Richtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CCC-02",
|
||||||
|
"title": "Change Testing",
|
||||||
|
"statement": "Aenderungen muessen vor der Produktivsetzung getestet und genehmigt werden.",
|
||||||
|
"keywords": ["test", "genehmigung", "approval"],
|
||||||
|
"action_hint": "test",
|
||||||
|
"object_hint": "Aenderungstests",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CCC-03",
|
||||||
|
"title": "Configuration Baseline",
|
||||||
|
"statement": "Basiskonfigurationen fuer alle Systeme muessen definiert und dokumentiert werden.",
|
||||||
|
"keywords": ["baseline", "basis", "standard"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Konfigurationsbaseline",
|
||||||
|
"object_class": "configuration"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "CEK",
|
||||||
|
"title": "Cryptography, Encryption and Key Management",
|
||||||
|
"aliases": ["cek", "cryptography", "encryption", "key management", "kryptographie", "verschluesselung", "schluesselverwaltung"],
|
||||||
|
"keywords": ["kryptographie", "verschluesselung", "schluessel", "key", "encryption", "certificate", "zertifikat"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CEK-01",
|
||||||
|
"title": "Encryption Policy",
|
||||||
|
"statement": "Verschluesselungsrichtlinien muessen definiert werden, die Algorithmen, Schluessellaengen und Einsatzbereiche festlegen.",
|
||||||
|
"keywords": ["policy", "richtlinie", "algorithmus"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Verschluesselungsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CEK-02",
|
||||||
|
"title": "Key Management",
|
||||||
|
"statement": "Kryptographische Schluessel muessen ueber ihren Lebenszyklus sicher verwaltet werden.",
|
||||||
|
"keywords": ["key", "schluessel", "management", "lebenszyklus"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Schluesselverwaltung",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CEK-03",
|
||||||
|
"title": "Data Encryption",
|
||||||
|
"statement": "Sensible Daten muessen bei Speicherung und Uebertragung verschluesselt werden.",
|
||||||
|
"keywords": ["data", "daten", "speicherung", "uebertragung"],
|
||||||
|
"action_hint": "encrypt",
|
||||||
|
"object_hint": "Datenverschluesselung",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "DSP",
|
||||||
|
"title": "Data Security and Privacy",
|
||||||
|
"aliases": ["dsp", "data security", "privacy", "datensicherheit", "datenschutz"],
|
||||||
|
"keywords": ["datenschutz", "datensicherheit", "privacy", "data security", "pii", "personenbezogen", "dsgvo"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "DSP-01",
|
||||||
|
"title": "Data Classification",
|
||||||
|
"statement": "Daten muessen nach Sensibilitaet klassifiziert und entsprechend geschuetzt werden.",
|
||||||
|
"keywords": ["klassifizierung", "sensibilitaet", "classification"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Datenklassifizierung",
|
||||||
|
"object_class": "data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "DSP-02",
|
||||||
|
"title": "Data Inventory",
|
||||||
|
"statement": "Ein Dateninventar muss gefuehrt werden, das alle Verarbeitungen personenbezogener Daten dokumentiert.",
|
||||||
|
"keywords": ["inventar", "verzeichnis", "verarbeitung", "vvt"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Dateninventar",
|
||||||
|
"object_class": "register"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "DSP-03",
|
||||||
|
"title": "Data Retention and Deletion",
|
||||||
|
"statement": "Aufbewahrungsfristen muessen definiert und Daten nach Ablauf sicher geloescht werden.",
|
||||||
|
"keywords": ["retention", "aufbewahrung", "loeschung", "frist"],
|
||||||
|
"action_hint": "delete",
|
||||||
|
"object_hint": "Datenloeschung nach Frist",
|
||||||
|
"object_class": "data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "DSP-04",
|
||||||
|
"title": "Privacy Impact Assessment",
|
||||||
|
"statement": "Datenschutz-Folgenabschaetzungen muessen fuer risikoreiche Verarbeitungen durchgefuehrt werden.",
|
||||||
|
"keywords": ["dsfa", "pia", "folgenabschaetzung", "impact"],
|
||||||
|
"action_hint": "assess",
|
||||||
|
"object_hint": "Datenschutz-Folgenabschaetzung",
|
||||||
|
"object_class": "risk_artifact"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "DSP-05",
|
||||||
|
"title": "Data Subject Rights",
|
||||||
|
"statement": "Verfahren zur Bearbeitung von Betroffenenrechten muessen implementiert werden.",
|
||||||
|
"keywords": ["betroffenenrechte", "auskunft", "loeschung", "data subject"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Betroffenenrechte-Verfahren",
|
||||||
|
"object_class": "process"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "GRC",
|
||||||
|
"title": "Governance, Risk and Compliance",
|
||||||
|
"aliases": ["grc", "governance", "risk", "compliance", "risikomanagement"],
|
||||||
|
"keywords": ["governance", "risiko", "compliance", "management", "policy", "richtlinie"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "GRC-01",
|
||||||
|
"title": "Information Security Program",
|
||||||
|
"statement": "Ein umfassendes Informationssicherheitsprogramm muss etabliert und aufrechterhalten werden.",
|
||||||
|
"keywords": ["programm", "sicherheit", "information"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Informationssicherheitsprogramm",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "GRC-02",
|
||||||
|
"title": "Risk Management Program",
|
||||||
|
"statement": "Ein Risikomanagement-Programm muss implementiert werden, das Identifikation, Bewertung und Behandlung umfasst.",
|
||||||
|
"keywords": ["risiko", "management", "bewertung", "behandlung"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Risikomanagement-Programm",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "GRC-03",
|
||||||
|
"title": "Compliance Monitoring",
|
||||||
|
"statement": "Die Einhaltung regulatorischer und vertraglicher Anforderungen muss ueberwacht werden.",
|
||||||
|
"keywords": ["compliance", "einhaltung", "regulatorisch", "ueberwachung"],
|
||||||
|
"action_hint": "monitor",
|
||||||
|
"object_hint": "Compliance-Ueberwachung",
|
||||||
|
"object_class": "process"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "IAM",
|
||||||
|
"title": "Identity and Access Management",
|
||||||
|
"aliases": ["iam", "identity", "access management", "identitaetsmanagement", "zugriffsverwaltung"],
|
||||||
|
"keywords": ["identitaet", "zugriff", "identity", "access", "authentifizierung", "autorisierung", "sso"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IAM-01",
|
||||||
|
"title": "Identity and Access Policy",
|
||||||
|
"statement": "Identitaets- und Zugriffsmanagement-Richtlinien muessen definiert werden.",
|
||||||
|
"keywords": ["policy", "richtlinie"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "IAM-Richtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IAM-02",
|
||||||
|
"title": "Strong Authentication",
|
||||||
|
"statement": "Starke Authentifizierung (MFA) muss fuer administrative und sicherheitskritische Zugriffe gefordert werden.",
|
||||||
|
"keywords": ["mfa", "stark", "authentifizierung", "admin"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Starke Authentifizierung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IAM-03",
|
||||||
|
"title": "Identity Lifecycle Management",
|
||||||
|
"statement": "Identitaeten muessen ueber ihren gesamten Lebenszyklus verwaltet werden.",
|
||||||
|
"keywords": ["lifecycle", "lebenszyklus", "onboarding", "offboarding"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Identitaets-Lebenszyklus",
|
||||||
|
"object_class": "account"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IAM-04",
|
||||||
|
"title": "Access Review",
|
||||||
|
"statement": "Zugriffsrechte muessen regelmaessig ueberprueft und ueberschuessige Rechte entzogen werden.",
|
||||||
|
"keywords": ["review", "ueberpruefen", "rechte", "rezertifizierung"],
|
||||||
|
"action_hint": "review",
|
||||||
|
"object_hint": "Zugriffsrechte-Review",
|
||||||
|
"object_class": "access_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "LOG",
|
||||||
|
"title": "Logging and Monitoring",
|
||||||
|
"aliases": ["log", "logging", "monitoring", "protokollierung", "ueberwachung"],
|
||||||
|
"keywords": ["logging", "monitoring", "protokollierung", "ueberwachung", "siem", "alarm"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "LOG-01",
|
||||||
|
"title": "Logging Policy",
|
||||||
|
"statement": "Protokollierungs-Richtlinien muessen definiert werden, die Umfang und Aufbewahrung festlegen.",
|
||||||
|
"keywords": ["policy", "richtlinie", "umfang", "aufbewahrung"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Protokollierungsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "LOG-02",
|
||||||
|
"title": "Security Event Logging",
|
||||||
|
"statement": "Sicherheitsrelevante Ereignisse muessen erfasst und zentral gespeichert werden.",
|
||||||
|
"keywords": ["event", "ereignis", "sicherheit", "zentral"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Sicherheits-Event-Logging",
|
||||||
|
"object_class": "configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "LOG-03",
|
||||||
|
"title": "Monitoring and Alerting",
|
||||||
|
"statement": "Sicherheitsrelevante Logs muessen ueberwacht und bei Anomalien Alarme ausgeloest werden.",
|
||||||
|
"keywords": ["monitoring", "alerting", "alarm", "anomalie"],
|
||||||
|
"action_hint": "monitor",
|
||||||
|
"object_hint": "Log-Ueberwachung und Alarmierung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "SEF",
|
||||||
|
"title": "Security Incident Management",
|
||||||
|
"aliases": ["sef", "security incident", "incident management", "vorfallmanagement", "sicherheitsvorfall"],
|
||||||
|
"keywords": ["vorfall", "incident", "sicherheitsvorfall", "reaktion", "response", "meldung"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SEF-01",
|
||||||
|
"title": "Incident Management Policy",
|
||||||
|
"statement": "Ein Vorfallmanagement-Prozess muss definiert, dokumentiert und getestet werden.",
|
||||||
|
"keywords": ["policy", "richtlinie", "prozess"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Vorfallmanagement-Richtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SEF-02",
|
||||||
|
"title": "Incident Response Team",
|
||||||
|
"statement": "Ein Incident-Response-Team muss benannt und geschult werden.",
|
||||||
|
"keywords": ["team", "response", "schulung"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Incident-Response-Team",
|
||||||
|
"object_class": "role"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SEF-03",
|
||||||
|
"title": "Incident Reporting",
|
||||||
|
"statement": "Sicherheitsvorfaelle muessen innerhalb definierter Fristen an zustaendige Stellen gemeldet werden.",
|
||||||
|
"keywords": ["reporting", "meldung", "frist", "behoerde"],
|
||||||
|
"action_hint": "report",
|
||||||
|
"object_hint": "Vorfallmeldung",
|
||||||
|
"object_class": "incident"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SEF-04",
|
||||||
|
"title": "Incident Lessons Learned",
|
||||||
|
"statement": "Nach jedem Vorfall muss eine Nachbereitung mit Lessons Learned durchgefuehrt werden.",
|
||||||
|
"keywords": ["lessons learned", "nachbereitung", "verbesserung"],
|
||||||
|
"action_hint": "review",
|
||||||
|
"object_hint": "Vorfall-Nachbereitung",
|
||||||
|
"object_class": "record"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "TVM",
|
||||||
|
"title": "Threat and Vulnerability Management",
|
||||||
|
"aliases": ["tvm", "threat", "vulnerability", "schwachstelle", "bedrohung", "schwachstellenmanagement"],
|
||||||
|
"keywords": ["schwachstelle", "vulnerability", "threat", "bedrohung", "patch", "scan"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "TVM-01",
|
||||||
|
"title": "Vulnerability Management Policy",
|
||||||
|
"statement": "Schwachstellenmanagement-Richtlinien muessen definiert und umgesetzt werden.",
|
||||||
|
"keywords": ["policy", "richtlinie"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Schwachstellenmanagement-Richtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "TVM-02",
|
||||||
|
"title": "Vulnerability Scanning",
|
||||||
|
"statement": "Systeme muessen regelmaessig auf Schwachstellen gescannt werden.",
|
||||||
|
"keywords": ["scan", "scanning", "regelmaessig"],
|
||||||
|
"action_hint": "test",
|
||||||
|
"object_hint": "Schwachstellenscan",
|
||||||
|
"object_class": "system"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "TVM-03",
|
||||||
|
"title": "Vulnerability Remediation",
|
||||||
|
"statement": "Erkannte Schwachstellen muessen priorisiert und innerhalb definierter Fristen behoben werden.",
|
||||||
|
"keywords": ["remediation", "behebung", "frist", "priorisierung"],
|
||||||
|
"action_hint": "remediate",
|
||||||
|
"object_hint": "Schwachstellenbehebung",
|
||||||
|
"object_class": "system"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "TVM-04",
|
||||||
|
"title": "Penetration Testing",
|
||||||
|
"statement": "Regelmaessige Penetrationstests muessen durchgefuehrt werden.",
|
||||||
|
"keywords": ["penetration", "pentest", "test"],
|
||||||
|
"action_hint": "test",
|
||||||
|
"object_hint": "Penetrationstest",
|
||||||
|
"object_class": "system"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
514
backend-compliance/compliance/data/frameworks/nist_sp800_53.json
Normal file
514
backend-compliance/compliance/data/frameworks/nist_sp800_53.json
Normal file
@@ -0,0 +1,514 @@
|
|||||||
|
{
|
||||||
|
"framework_id": "NIST_SP800_53",
|
||||||
|
"display_name": "NIST SP 800-53 Rev. 5",
|
||||||
|
"license": {
|
||||||
|
"type": "public_domain",
|
||||||
|
"rag_allowed": true,
|
||||||
|
"use_as_metadata": true
|
||||||
|
},
|
||||||
|
"domains": [
|
||||||
|
{
|
||||||
|
"domain_id": "AC",
|
||||||
|
"title": "Access Control",
|
||||||
|
"aliases": ["access control", "zugriffskontrolle", "zugriffssteuerung"],
|
||||||
|
"keywords": ["access", "zugriff", "berechtigung", "authorization", "autorisierung"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AC-1",
|
||||||
|
"title": "Access Control Policy and Procedures",
|
||||||
|
"statement": "Zugriffskontrollrichtlinien und -verfahren muessen definiert, dokumentiert und regelmaessig ueberprueft werden.",
|
||||||
|
"keywords": ["policy", "richtlinie", "verfahren", "procedures"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Zugriffskontrollrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AC-2",
|
||||||
|
"title": "Account Management",
|
||||||
|
"statement": "Benutzerkonten muessen ueber ihren gesamten Lebenszyklus verwaltet werden: Erstellung, Aktivierung, Aenderung, Deaktivierung und Loeschung.",
|
||||||
|
"keywords": ["account", "konto", "benutzer", "lifecycle", "lebenszyklus"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Benutzerkontenverwaltung",
|
||||||
|
"object_class": "account"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AC-3",
|
||||||
|
"title": "Access Enforcement",
|
||||||
|
"statement": "Der Zugriff auf Systemressourcen muss gemaess der definierten Zugriffskontrollrichtlinie durchgesetzt werden.",
|
||||||
|
"keywords": ["enforcement", "durchsetzung", "ressourcen", "system"],
|
||||||
|
"action_hint": "restrict_access",
|
||||||
|
"object_hint": "Zugriffsdurchsetzung",
|
||||||
|
"object_class": "access_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AC-5",
|
||||||
|
"title": "Separation of Duties",
|
||||||
|
"statement": "Aufgabentrennung muss definiert und durchgesetzt werden, um Interessenkonflikte und Missbrauch zu verhindern.",
|
||||||
|
"keywords": ["separation", "trennung", "duties", "aufgaben", "funktionstrennung"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Aufgabentrennung",
|
||||||
|
"object_class": "role"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AC-6",
|
||||||
|
"title": "Least Privilege",
|
||||||
|
"statement": "Zugriffsrechte muessen nach dem Prinzip der minimalen Rechte vergeben werden.",
|
||||||
|
"keywords": ["least privilege", "minimal", "rechte", "privileg"],
|
||||||
|
"action_hint": "restrict_access",
|
||||||
|
"object_hint": "Minimale Rechtevergabe",
|
||||||
|
"object_class": "access_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AC-7",
|
||||||
|
"title": "Unsuccessful Logon Attempts",
|
||||||
|
"statement": "Fehlgeschlagene Anmeldeversuche muessen begrenzt und ueberwacht werden.",
|
||||||
|
"keywords": ["logon", "anmeldung", "fehlgeschlagen", "sperre", "lockout"],
|
||||||
|
"action_hint": "monitor",
|
||||||
|
"object_hint": "Anmeldeversuchsueberwachung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AC-17",
|
||||||
|
"title": "Remote Access",
|
||||||
|
"statement": "Fernzugriff muss autorisiert, ueberwacht und verschluesselt werden.",
|
||||||
|
"keywords": ["remote", "fern", "vpn", "fernzugriff"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Fernzugriffskonfiguration",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "AU",
|
||||||
|
"title": "Audit and Accountability",
|
||||||
|
"aliases": ["audit", "protokollierung", "accountability", "rechenschaftspflicht"],
|
||||||
|
"keywords": ["audit", "log", "protokoll", "nachvollziehbarkeit", "logging"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AU-1",
|
||||||
|
"title": "Audit Policy and Procedures",
|
||||||
|
"statement": "Audit- und Protokollierungsrichtlinien muessen definiert und regelmaessig ueberprueft werden.",
|
||||||
|
"keywords": ["policy", "richtlinie", "audit"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Auditrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AU-2",
|
||||||
|
"title": "Event Logging",
|
||||||
|
"statement": "Sicherheitsrelevante Ereignisse muessen identifiziert und protokolliert werden.",
|
||||||
|
"keywords": ["event", "ereignis", "logging", "protokollierung"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Ereignisprotokollierung",
|
||||||
|
"object_class": "configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AU-3",
|
||||||
|
"title": "Content of Audit Records",
|
||||||
|
"statement": "Audit-Eintraege muessen ausreichende Informationen enthalten: Zeitstempel, Quelle, Ergebnis, Identitaet.",
|
||||||
|
"keywords": ["content", "inhalt", "record", "eintrag"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Audit-Eintragsformat",
|
||||||
|
"object_class": "record"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AU-6",
|
||||||
|
"title": "Audit Record Review and Reporting",
|
||||||
|
"statement": "Audit-Eintraege muessen regelmaessig ueberprueft und bei Anomalien berichtet werden.",
|
||||||
|
"keywords": ["review", "ueberpruefen", "reporting", "anomalie"],
|
||||||
|
"action_hint": "review",
|
||||||
|
"object_hint": "Audit-Ueberpruefung",
|
||||||
|
"object_class": "record"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AU-9",
|
||||||
|
"title": "Protection of Audit Information",
|
||||||
|
"statement": "Audit-Daten muessen vor unbefugtem Zugriff, Aenderung und Loeschung geschuetzt werden.",
|
||||||
|
"keywords": ["schutz", "protection", "integritaet", "integrity"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Audit-Datenschutz",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "AT",
|
||||||
|
"title": "Awareness and Training",
|
||||||
|
"aliases": ["awareness", "training", "schulung", "sensibilisierung"],
|
||||||
|
"keywords": ["training", "schulung", "awareness", "sensibilisierung", "weiterbildung"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AT-1",
|
||||||
|
"title": "Policy and Procedures",
|
||||||
|
"statement": "Schulungs- und Sensibilisierungsrichtlinien muessen definiert und regelmaessig aktualisiert werden.",
|
||||||
|
"keywords": ["policy", "richtlinie"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Schulungsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AT-2",
|
||||||
|
"title": "Literacy Training and Awareness",
|
||||||
|
"statement": "Alle Mitarbeiter muessen regelmaessig Sicherheitsschulungen erhalten.",
|
||||||
|
"keywords": ["mitarbeiter", "schulung", "sicherheit"],
|
||||||
|
"action_hint": "train",
|
||||||
|
"object_hint": "Sicherheitsschulung",
|
||||||
|
"object_class": "training"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "AT-3",
|
||||||
|
"title": "Role-Based Training",
|
||||||
|
"statement": "Rollenbasierte Sicherheitsschulungen muessen fuer Mitarbeiter mit besonderen Sicherheitsaufgaben durchgefuehrt werden.",
|
||||||
|
"keywords": ["rollenbasiert", "role-based", "speziell"],
|
||||||
|
"action_hint": "train",
|
||||||
|
"object_hint": "Rollenbasierte Sicherheitsschulung",
|
||||||
|
"object_class": "training"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "CM",
|
||||||
|
"title": "Configuration Management",
|
||||||
|
"aliases": ["configuration management", "konfigurationsmanagement", "konfiguration"],
|
||||||
|
"keywords": ["konfiguration", "configuration", "baseline", "haertung", "hardening"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CM-1",
|
||||||
|
"title": "Policy and Procedures",
|
||||||
|
"statement": "Konfigurationsmanagement-Richtlinien muessen dokumentiert und gepflegt werden.",
|
||||||
|
"keywords": ["policy", "richtlinie"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Konfigurationsmanagement-Richtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CM-2",
|
||||||
|
"title": "Baseline Configuration",
|
||||||
|
"statement": "Basiskonfigurationen fuer Systeme muessen definiert, dokumentiert und gepflegt werden.",
|
||||||
|
"keywords": ["baseline", "basis", "standard"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Basiskonfiguration",
|
||||||
|
"object_class": "configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CM-6",
|
||||||
|
"title": "Configuration Settings",
|
||||||
|
"statement": "Sicherheitsrelevante Konfigurationseinstellungen muessen definiert und durchgesetzt werden.",
|
||||||
|
"keywords": ["settings", "einstellungen", "sicherheit"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Sicherheitskonfiguration",
|
||||||
|
"object_class": "configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CM-7",
|
||||||
|
"title": "Least Functionality",
|
||||||
|
"statement": "Systeme muessen so konfiguriert werden, dass nur notwendige Funktionen aktiv sind.",
|
||||||
|
"keywords": ["least functionality", "minimal", "dienste", "ports"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Minimalkonfiguration",
|
||||||
|
"object_class": "configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "CM-8",
|
||||||
|
"title": "System Component Inventory",
|
||||||
|
"statement": "Ein Inventar aller Systemkomponenten muss gefuehrt und aktuell gehalten werden.",
|
||||||
|
"keywords": ["inventar", "inventory", "komponenten", "assets"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Systemkomponenten-Inventar",
|
||||||
|
"object_class": "register"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "IA",
|
||||||
|
"title": "Identification and Authentication",
|
||||||
|
"aliases": ["identification", "authentication", "identifikation", "authentifizierung"],
|
||||||
|
"keywords": ["authentifizierung", "identifikation", "identity", "passwort", "mfa", "credential"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IA-1",
|
||||||
|
"title": "Policy and Procedures",
|
||||||
|
"statement": "Identifikations- und Authentifizierungsrichtlinien muessen dokumentiert und regelmaessig ueberprueft werden.",
|
||||||
|
"keywords": ["policy", "richtlinie"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Authentifizierungsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IA-2",
|
||||||
|
"title": "Identification and Authentication",
|
||||||
|
"statement": "Benutzer und Geraete muessen eindeutig identifiziert und authentifiziert werden.",
|
||||||
|
"keywords": ["benutzer", "geraete", "identifizierung"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Benutzerauthentifizierung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IA-2(1)",
|
||||||
|
"title": "Multi-Factor Authentication",
|
||||||
|
"statement": "Multi-Faktor-Authentifizierung muss fuer privilegierte Konten implementiert werden.",
|
||||||
|
"keywords": ["mfa", "multi-faktor", "zwei-faktor", "2fa"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Multi-Faktor-Authentifizierung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IA-5",
|
||||||
|
"title": "Authenticator Management",
|
||||||
|
"statement": "Authentifizierungsmittel (Passwoerter, Token, Zertifikate) muessen sicher verwaltet werden.",
|
||||||
|
"keywords": ["passwort", "token", "zertifikat", "credential"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Authentifizierungsmittel-Verwaltung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "IR",
|
||||||
|
"title": "Incident Response",
|
||||||
|
"aliases": ["incident response", "vorfallbehandlung", "vorfallreaktion", "incident management"],
|
||||||
|
"keywords": ["vorfall", "incident", "reaktion", "response", "breach", "sicherheitsvorfall"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IR-1",
|
||||||
|
"title": "Policy and Procedures",
|
||||||
|
"statement": "Vorfallreaktionsrichtlinien und -verfahren muessen definiert und regelmaessig aktualisiert werden.",
|
||||||
|
"keywords": ["policy", "richtlinie", "verfahren"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Vorfallreaktionsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IR-2",
|
||||||
|
"title": "Incident Response Training",
|
||||||
|
"statement": "Mitarbeiter muessen regelmaessig in der Vorfallreaktion geschult werden.",
|
||||||
|
"keywords": ["training", "schulung"],
|
||||||
|
"action_hint": "train",
|
||||||
|
"object_hint": "Vorfallreaktionsschulung",
|
||||||
|
"object_class": "training"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IR-4",
|
||||||
|
"title": "Incident Handling",
|
||||||
|
"statement": "Ein strukturierter Prozess fuer die Vorfallbehandlung muss implementiert werden: Erkennung, Analyse, Eindaemmung, Behebung.",
|
||||||
|
"keywords": ["handling", "behandlung", "erkennung", "eindaemmung"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Vorfallbehandlungsprozess",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IR-5",
|
||||||
|
"title": "Incident Monitoring",
|
||||||
|
"statement": "Sicherheitsvorfaelle muessen kontinuierlich ueberwacht und verfolgt werden.",
|
||||||
|
"keywords": ["monitoring", "ueberwachung", "tracking"],
|
||||||
|
"action_hint": "monitor",
|
||||||
|
"object_hint": "Vorfallsueberwachung",
|
||||||
|
"object_class": "incident"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IR-6",
|
||||||
|
"title": "Incident Reporting",
|
||||||
|
"statement": "Sicherheitsvorfaelle muessen innerhalb definierter Fristen an die zustaendigen Stellen gemeldet werden.",
|
||||||
|
"keywords": ["reporting", "meldung", "melden", "frist"],
|
||||||
|
"action_hint": "report",
|
||||||
|
"object_hint": "Vorfallmeldung",
|
||||||
|
"object_class": "incident"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "IR-8",
|
||||||
|
"title": "Incident Response Plan",
|
||||||
|
"statement": "Ein Vorfallreaktionsplan muss dokumentiert und regelmaessig getestet werden.",
|
||||||
|
"keywords": ["plan", "dokumentation", "test"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Vorfallreaktionsplan",
|
||||||
|
"object_class": "policy"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "RA",
|
||||||
|
"title": "Risk Assessment",
|
||||||
|
"aliases": ["risk assessment", "risikobewertung", "risikoanalyse"],
|
||||||
|
"keywords": ["risiko", "risk", "bewertung", "assessment", "analyse", "bedrohung", "threat"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "RA-1",
|
||||||
|
"title": "Policy and Procedures",
|
||||||
|
"statement": "Risikobewertungsrichtlinien muessen dokumentiert und regelmaessig aktualisiert werden.",
|
||||||
|
"keywords": ["policy", "richtlinie"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Risikobewertungsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "RA-3",
|
||||||
|
"title": "Risk Assessment",
|
||||||
|
"statement": "Regelmaessige Risikobewertungen muessen durchgefuehrt und dokumentiert werden.",
|
||||||
|
"keywords": ["bewertung", "assessment", "regelmaessig"],
|
||||||
|
"action_hint": "assess",
|
||||||
|
"object_hint": "Risikobewertung",
|
||||||
|
"object_class": "risk_artifact"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "RA-5",
|
||||||
|
"title": "Vulnerability Monitoring and Scanning",
|
||||||
|
"statement": "Systeme muessen regelmaessig auf Schwachstellen gescannt und ueberwacht werden.",
|
||||||
|
"keywords": ["vulnerability", "schwachstelle", "scan", "monitoring"],
|
||||||
|
"action_hint": "monitor",
|
||||||
|
"object_hint": "Schwachstellenueberwachung",
|
||||||
|
"object_class": "system"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "SC",
|
||||||
|
"title": "System and Communications Protection",
|
||||||
|
"aliases": ["system protection", "communications protection", "kommunikationsschutz", "systemschutz"],
|
||||||
|
"keywords": ["verschluesselung", "encryption", "tls", "netzwerk", "network", "kommunikation", "firewall"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SC-1",
|
||||||
|
"title": "Policy and Procedures",
|
||||||
|
"statement": "System- und Kommunikationsschutzrichtlinien muessen dokumentiert und aktuell gehalten werden.",
|
||||||
|
"keywords": ["policy", "richtlinie"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Kommunikationsschutzrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SC-7",
|
||||||
|
"title": "Boundary Protection",
|
||||||
|
"statement": "Netzwerkgrenzen muessen durch Firewall-Regeln und Zugangskontrollen geschuetzt werden.",
|
||||||
|
"keywords": ["boundary", "grenze", "firewall", "netzwerk"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Netzwerkgrenzschutz",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SC-8",
|
||||||
|
"title": "Transmission Confidentiality and Integrity",
|
||||||
|
"statement": "Daten muessen bei der Uebertragung durch Verschluesselung geschuetzt werden.",
|
||||||
|
"keywords": ["transmission", "uebertragung", "verschluesselung", "tls"],
|
||||||
|
"action_hint": "encrypt",
|
||||||
|
"object_hint": "Uebertragungsverschluesselung",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SC-12",
|
||||||
|
"title": "Cryptographic Key Establishment and Management",
|
||||||
|
"statement": "Kryptographische Schluessel muessen sicher erzeugt, verteilt, gespeichert und widerrufen werden.",
|
||||||
|
"keywords": ["key", "schluessel", "kryptographie", "management"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Schluesselverwaltung",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SC-13",
|
||||||
|
"title": "Cryptographic Protection",
|
||||||
|
"statement": "Kryptographische Mechanismen muessen gemaess anerkannten Standards implementiert werden.",
|
||||||
|
"keywords": ["kryptographie", "verschluesselung", "standard"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Kryptographischer Schutz",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "SI",
|
||||||
|
"title": "System and Information Integrity",
|
||||||
|
"aliases": ["system integrity", "information integrity", "systemintegritaet", "informationsintegritaet"],
|
||||||
|
"keywords": ["integritaet", "integrity", "malware", "patch", "flaw", "schwachstelle"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SI-1",
|
||||||
|
"title": "Policy and Procedures",
|
||||||
|
"statement": "System- und Informationsintegritaetsrichtlinien muessen dokumentiert und regelmaessig ueberprueft werden.",
|
||||||
|
"keywords": ["policy", "richtlinie"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Integritaetsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SI-2",
|
||||||
|
"title": "Flaw Remediation",
|
||||||
|
"statement": "Bekannte Schwachstellen muessen innerhalb definierter Fristen behoben werden.",
|
||||||
|
"keywords": ["flaw", "schwachstelle", "patch", "behebung", "remediation"],
|
||||||
|
"action_hint": "remediate",
|
||||||
|
"object_hint": "Schwachstellenbehebung",
|
||||||
|
"object_class": "system"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SI-3",
|
||||||
|
"title": "Malicious Code Protection",
|
||||||
|
"statement": "Systeme muessen vor Schadsoftware geschuetzt werden durch Erkennung und Abwehrmechanismen.",
|
||||||
|
"keywords": ["malware", "schadsoftware", "antivirus", "erkennung"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Schadsoftwareschutz",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SI-4",
|
||||||
|
"title": "System Monitoring",
|
||||||
|
"statement": "Systeme muessen kontinuierlich auf Sicherheitsereignisse und Anomalien ueberwacht werden.",
|
||||||
|
"keywords": ["monitoring", "ueberwachung", "anomalie", "siem"],
|
||||||
|
"action_hint": "monitor",
|
||||||
|
"object_hint": "Systemueberwachung",
|
||||||
|
"object_class": "system"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SI-5",
|
||||||
|
"title": "Security Alerts and Advisories",
|
||||||
|
"statement": "Sicherheitswarnungen muessen empfangen, bewertet und darauf reagiert werden.",
|
||||||
|
"keywords": ["alert", "warnung", "advisory", "cve"],
|
||||||
|
"action_hint": "monitor",
|
||||||
|
"object_hint": "Sicherheitswarnungen",
|
||||||
|
"object_class": "incident"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "SA",
|
||||||
|
"title": "System and Services Acquisition",
|
||||||
|
"aliases": ["system acquisition", "services acquisition", "systembeschaffung", "secure development"],
|
||||||
|
"keywords": ["beschaffung", "acquisition", "entwicklung", "development", "lieferkette", "supply chain"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SA-1",
|
||||||
|
"title": "Policy and Procedures",
|
||||||
|
"statement": "Beschaffungsrichtlinien mit Sicherheitsanforderungen muessen dokumentiert werden.",
|
||||||
|
"keywords": ["policy", "richtlinie", "beschaffung"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Beschaffungsrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SA-8",
|
||||||
|
"title": "Security and Privacy Engineering Principles",
|
||||||
|
"statement": "Sicherheits- und Datenschutzprinzipien muessen in die Systementwicklung integriert werden.",
|
||||||
|
"keywords": ["engineering", "development", "prinzipien", "design"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Security-by-Design-Prinzipien",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SA-11",
|
||||||
|
"title": "Developer Testing and Evaluation",
|
||||||
|
"statement": "Entwickler muessen Sicherheitstests und Code-Reviews durchfuehren.",
|
||||||
|
"keywords": ["testing", "test", "code review", "evaluation"],
|
||||||
|
"action_hint": "test",
|
||||||
|
"object_hint": "Entwickler-Sicherheitstests",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "SA-12",
|
||||||
|
"title": "Supply Chain Protection",
|
||||||
|
"statement": "Lieferkettenrisiken muessen bewertet und Schutzmassnahmen implementiert werden.",
|
||||||
|
"keywords": ["supply chain", "lieferkette", "third party", "drittanbieter"],
|
||||||
|
"action_hint": "assess",
|
||||||
|
"object_hint": "Lieferkettenrisikobewertung",
|
||||||
|
"object_class": "risk_artifact"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
353
backend-compliance/compliance/data/frameworks/owasp_asvs.json
Normal file
353
backend-compliance/compliance/data/frameworks/owasp_asvs.json
Normal file
@@ -0,0 +1,353 @@
|
|||||||
|
{
|
||||||
|
"framework_id": "OWASP_ASVS",
|
||||||
|
"display_name": "OWASP Application Security Verification Standard 4.0",
|
||||||
|
"license": {
|
||||||
|
"type": "cc_by_sa_4",
|
||||||
|
"rag_allowed": true,
|
||||||
|
"use_as_metadata": true
|
||||||
|
},
|
||||||
|
"domains": [
|
||||||
|
{
|
||||||
|
"domain_id": "V1",
|
||||||
|
"title": "Architecture, Design and Threat Modeling",
|
||||||
|
"aliases": ["architecture", "architektur", "design", "threat modeling", "bedrohungsmodellierung"],
|
||||||
|
"keywords": ["architektur", "design", "threat model", "bedrohung", "modellierung"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V1.1",
|
||||||
|
"title": "Secure Software Development Lifecycle",
|
||||||
|
"statement": "Ein sicherer Softwareentwicklungs-Lebenszyklus (SSDLC) muss definiert und angewendet werden.",
|
||||||
|
"keywords": ["sdlc", "lifecycle", "lebenszyklus", "entwicklung"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Sicherer Entwicklungs-Lebenszyklus",
|
||||||
|
"object_class": "process"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V1.2",
|
||||||
|
"title": "Authentication Architecture",
|
||||||
|
"statement": "Die Authentifizierungsarchitektur muss dokumentiert und regelmaessig ueberprueft werden.",
|
||||||
|
"keywords": ["authentication", "authentifizierung", "architektur"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Authentifizierungsarchitektur",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V1.4",
|
||||||
|
"title": "Access Control Architecture",
|
||||||
|
"statement": "Die Zugriffskontrollarchitektur muss dokumentiert und zentral durchgesetzt werden.",
|
||||||
|
"keywords": ["access control", "zugriffskontrolle", "architektur"],
|
||||||
|
"action_hint": "document",
|
||||||
|
"object_hint": "Zugriffskontrollarchitektur",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V1.5",
|
||||||
|
"title": "Input and Output Architecture",
|
||||||
|
"statement": "Eingabe- und Ausgabevalidierung muss architektonisch verankert und durchgaengig angewendet werden.",
|
||||||
|
"keywords": ["input", "output", "eingabe", "ausgabe", "validierung"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Ein-/Ausgabevalidierung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V1.6",
|
||||||
|
"title": "Cryptographic Architecture",
|
||||||
|
"statement": "Kryptographische Mechanismen muessen architektonisch definiert und standardisiert sein.",
|
||||||
|
"keywords": ["crypto", "kryptographie", "verschluesselung"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Kryptographie-Architektur",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "V2",
|
||||||
|
"title": "Authentication",
|
||||||
|
"aliases": ["authentication", "authentifizierung", "anmeldung", "login"],
|
||||||
|
"keywords": ["authentication", "authentifizierung", "passwort", "login", "anmeldung", "credential"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V2.1",
|
||||||
|
"title": "Password Security",
|
||||||
|
"statement": "Passwortrichtlinien muessen Mindestlaenge, Komplexitaet und Sperrmechanismen definieren.",
|
||||||
|
"keywords": ["passwort", "password", "laenge", "komplexitaet"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Passwortrichtlinie",
|
||||||
|
"object_class": "policy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V2.2",
|
||||||
|
"title": "General Authenticator Security",
|
||||||
|
"statement": "Authentifizierungsmittel muessen sicher gespeichert und uebertragen werden.",
|
||||||
|
"keywords": ["authenticator", "credential", "speicherung"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Sichere Credential-Verwaltung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V2.7",
|
||||||
|
"title": "Out-of-Band Verification",
|
||||||
|
"statement": "Out-of-Band-Verifikationsmechanismen muessen sicher implementiert werden.",
|
||||||
|
"keywords": ["oob", "out-of-band", "sms", "push"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Out-of-Band-Verifikation",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V2.8",
|
||||||
|
"title": "Multi-Factor Authentication",
|
||||||
|
"statement": "Multi-Faktor-Authentifizierung muss fuer sicherheitskritische Funktionen verfuegbar sein.",
|
||||||
|
"keywords": ["mfa", "multi-faktor", "totp", "fido"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Multi-Faktor-Authentifizierung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "V3",
|
||||||
|
"title": "Session Management",
|
||||||
|
"aliases": ["session", "sitzung", "session management", "sitzungsverwaltung"],
|
||||||
|
"keywords": ["session", "sitzung", "token", "cookie", "timeout"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V3.1",
|
||||||
|
"title": "Session Management Security",
|
||||||
|
"statement": "Sitzungstoken muessen sicher erzeugt, uebertragen und invalidiert werden.",
|
||||||
|
"keywords": ["token", "sitzung", "sicherheit"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Sichere Sitzungsverwaltung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V3.3",
|
||||||
|
"title": "Session Termination",
|
||||||
|
"statement": "Sitzungen muessen nach Inaktivitaet und bei Abmeldung zuverlaessig beendet werden.",
|
||||||
|
"keywords": ["termination", "timeout", "abmeldung", "beenden"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Sitzungstimeout",
|
||||||
|
"object_class": "configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V3.5",
|
||||||
|
"title": "Token-Based Session Management",
|
||||||
|
"statement": "Tokenbasierte Sitzungsmechanismen muessen gegen Diebstahl und Replay geschuetzt sein.",
|
||||||
|
"keywords": ["jwt", "token", "replay", "diebstahl"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Token-Schutz",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "V5",
|
||||||
|
"title": "Validation, Sanitization and Encoding",
|
||||||
|
"aliases": ["validation", "validierung", "sanitization", "encoding", "eingabevalidierung"],
|
||||||
|
"keywords": ["validierung", "sanitization", "encoding", "xss", "injection", "eingabe"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V5.1",
|
||||||
|
"title": "Input Validation",
|
||||||
|
"statement": "Alle Eingabedaten muessen serverseitig validiert werden.",
|
||||||
|
"keywords": ["input", "eingabe", "validierung", "serverseitig"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Eingabevalidierung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V5.2",
|
||||||
|
"title": "Sanitization and Sandboxing",
|
||||||
|
"statement": "Eingaben muessen bereinigt und in sicherer Umgebung verarbeitet werden.",
|
||||||
|
"keywords": ["sanitization", "bereinigung", "sandbox"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Eingabebereinigung",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V5.3",
|
||||||
|
"title": "Output Encoding and Injection Prevention",
|
||||||
|
"statement": "Ausgaben muessen kontextabhaengig kodiert werden, um Injection-Angriffe zu verhindern.",
|
||||||
|
"keywords": ["output", "encoding", "injection", "xss", "sql"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Ausgabe-Encoding",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "V6",
|
||||||
|
"title": "Stored Cryptography",
|
||||||
|
"aliases": ["cryptography", "kryptographie", "verschluesselung", "stored cryptography"],
|
||||||
|
"keywords": ["kryptographie", "verschluesselung", "hashing", "schluessel", "key management"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V6.1",
|
||||||
|
"title": "Data Classification",
|
||||||
|
"statement": "Daten muessen klassifiziert und entsprechend ihrer Schutzklasse behandelt werden.",
|
||||||
|
"keywords": ["klassifizierung", "classification", "schutzklasse"],
|
||||||
|
"action_hint": "define",
|
||||||
|
"object_hint": "Datenklassifizierung",
|
||||||
|
"object_class": "data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V6.2",
|
||||||
|
"title": "Algorithms",
|
||||||
|
"statement": "Nur zugelassene und aktuelle kryptographische Algorithmen duerfen verwendet werden.",
|
||||||
|
"keywords": ["algorithmus", "algorithm", "aes", "rsa"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Kryptographische Algorithmen",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V6.4",
|
||||||
|
"title": "Secret Management",
|
||||||
|
"statement": "Geheimnisse (Schluessel, Passwoerter, Tokens) muessen in einem Secret-Management-System verwaltet werden.",
|
||||||
|
"keywords": ["secret", "geheimnis", "vault", "key management"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Secret-Management",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "V8",
|
||||||
|
"title": "Data Protection",
|
||||||
|
"aliases": ["data protection", "datenschutz", "datenverarbeitung"],
|
||||||
|
"keywords": ["datenschutz", "data protection", "pii", "personenbezogen", "privacy"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V8.1",
|
||||||
|
"title": "General Data Protection",
|
||||||
|
"statement": "Personenbezogene Daten muessen gemaess Datenschutzanforderungen geschuetzt werden.",
|
||||||
|
"keywords": ["personenbezogen", "pii", "datenschutz"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Datenschutzmassnahmen",
|
||||||
|
"object_class": "data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V8.2",
|
||||||
|
"title": "Client-Side Data Protection",
|
||||||
|
"statement": "Clientseitig gespeicherte sensible Daten muessen geschuetzt und minimiert werden.",
|
||||||
|
"keywords": ["client", "browser", "localstorage", "cookie"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Clientseitiger Datenschutz",
|
||||||
|
"object_class": "technical_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V8.3",
|
||||||
|
"title": "Sensitive Private Data",
|
||||||
|
"statement": "Sensible Daten muessen bei Speicherung und Verarbeitung besonders geschuetzt werden.",
|
||||||
|
"keywords": ["sensibel", "vertraulich", "speicherung"],
|
||||||
|
"action_hint": "encrypt",
|
||||||
|
"object_hint": "Verschluesselung sensibler Daten",
|
||||||
|
"object_class": "data"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "V9",
|
||||||
|
"title": "Communication",
|
||||||
|
"aliases": ["communication", "kommunikation", "tls", "transport"],
|
||||||
|
"keywords": ["tls", "ssl", "https", "transport", "kommunikation", "verschluesselung"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V9.1",
|
||||||
|
"title": "Client Communication Security",
|
||||||
|
"statement": "Alle Client-Server-Kommunikation muss ueber TLS verschluesselt werden.",
|
||||||
|
"keywords": ["tls", "https", "client", "server"],
|
||||||
|
"action_hint": "encrypt",
|
||||||
|
"object_hint": "TLS-Transportverschluesselung",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V9.2",
|
||||||
|
"title": "Server Communication Security",
|
||||||
|
"statement": "Server-zu-Server-Kommunikation muss authentifiziert und verschluesselt erfolgen.",
|
||||||
|
"keywords": ["server", "mtls", "backend"],
|
||||||
|
"action_hint": "encrypt",
|
||||||
|
"object_hint": "Server-Kommunikationsverschluesselung",
|
||||||
|
"object_class": "cryptographic_control"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "V13",
|
||||||
|
"title": "API and Web Service",
|
||||||
|
"aliases": ["api", "web service", "rest", "graphql", "webservice"],
|
||||||
|
"keywords": ["api", "rest", "graphql", "webservice", "endpoint", "schnittstelle"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V13.1",
|
||||||
|
"title": "Generic Web Service Security",
|
||||||
|
"statement": "Web-Services muessen gegen gaengige Angriffe abgesichert werden.",
|
||||||
|
"keywords": ["web service", "sicherheit", "angriff"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "Web-Service-Absicherung",
|
||||||
|
"object_class": "interface"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V13.2",
|
||||||
|
"title": "RESTful Web Service",
|
||||||
|
"statement": "REST-APIs muessen Input-Validierung, Rate Limiting und sichere Authentifizierung implementieren.",
|
||||||
|
"keywords": ["rest", "api", "rate limiting", "input"],
|
||||||
|
"action_hint": "implement",
|
||||||
|
"object_hint": "REST-API-Absicherung",
|
||||||
|
"object_class": "interface"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V13.4",
|
||||||
|
"title": "GraphQL and Web Services",
|
||||||
|
"statement": "GraphQL-Endpoints muessen gegen Query-Complexity-Angriffe und Introspection geschuetzt werden.",
|
||||||
|
"keywords": ["graphql", "query", "complexity", "introspection"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "GraphQL-Absicherung",
|
||||||
|
"object_class": "interface"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"domain_id": "V14",
|
||||||
|
"title": "Configuration",
|
||||||
|
"aliases": ["configuration", "konfiguration", "hardening", "haertung"],
|
||||||
|
"keywords": ["konfiguration", "hardening", "haertung", "header", "deployment"],
|
||||||
|
"subcontrols": [
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V14.1",
|
||||||
|
"title": "Build and Deploy",
|
||||||
|
"statement": "Build- und Deployment-Prozesse muessen sicher konfiguriert und reproduzierbar sein.",
|
||||||
|
"keywords": ["build", "deploy", "ci/cd", "pipeline"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Sichere Build-Pipeline",
|
||||||
|
"object_class": "configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V14.2",
|
||||||
|
"title": "Dependency Management",
|
||||||
|
"statement": "Abhaengigkeiten muessen auf Schwachstellen geprueft und aktuell gehalten werden.",
|
||||||
|
"keywords": ["dependency", "abhaengigkeit", "sca", "sbom"],
|
||||||
|
"action_hint": "maintain",
|
||||||
|
"object_hint": "Abhaengigkeitsverwaltung",
|
||||||
|
"object_class": "system"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V14.3",
|
||||||
|
"title": "Unintended Security Disclosure",
|
||||||
|
"statement": "Fehlermeldungen und Debug-Informationen duerfen keine sicherheitsrelevanten Details preisgeben.",
|
||||||
|
"keywords": ["disclosure", "fehlermeldung", "debug", "information leakage"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "Fehlerbehandlung",
|
||||||
|
"object_class": "configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subcontrol_id": "V14.4",
|
||||||
|
"title": "HTTP Security Headers",
|
||||||
|
"statement": "HTTP-Sicherheitsheader muessen korrekt konfiguriert sein.",
|
||||||
|
"keywords": ["header", "csp", "hsts", "x-frame"],
|
||||||
|
"action_hint": "configure",
|
||||||
|
"object_hint": "HTTP-Sicherheitsheader",
|
||||||
|
"object_class": "configuration"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -8,12 +8,16 @@ from .models import (
|
|||||||
EvidenceDB,
|
EvidenceDB,
|
||||||
RiskDB,
|
RiskDB,
|
||||||
AuditExportDB,
|
AuditExportDB,
|
||||||
|
LLMGenerationAuditDB,
|
||||||
|
AssertionDB,
|
||||||
RegulationTypeEnum,
|
RegulationTypeEnum,
|
||||||
ControlTypeEnum,
|
ControlTypeEnum,
|
||||||
ControlDomainEnum,
|
ControlDomainEnum,
|
||||||
RiskLevelEnum,
|
RiskLevelEnum,
|
||||||
EvidenceStatusEnum,
|
EvidenceStatusEnum,
|
||||||
ControlStatusEnum,
|
ControlStatusEnum,
|
||||||
|
EvidenceConfidenceEnum,
|
||||||
|
EvidenceTruthStatusEnum,
|
||||||
)
|
)
|
||||||
from .repository import (
|
from .repository import (
|
||||||
RegulationRepository,
|
RegulationRepository,
|
||||||
@@ -33,6 +37,8 @@ __all__ = [
|
|||||||
"EvidenceDB",
|
"EvidenceDB",
|
||||||
"RiskDB",
|
"RiskDB",
|
||||||
"AuditExportDB",
|
"AuditExportDB",
|
||||||
|
"LLMGenerationAuditDB",
|
||||||
|
"AssertionDB",
|
||||||
# Enums
|
# Enums
|
||||||
"RegulationTypeEnum",
|
"RegulationTypeEnum",
|
||||||
"ControlTypeEnum",
|
"ControlTypeEnum",
|
||||||
@@ -40,6 +46,8 @@ __all__ = [
|
|||||||
"RiskLevelEnum",
|
"RiskLevelEnum",
|
||||||
"EvidenceStatusEnum",
|
"EvidenceStatusEnum",
|
||||||
"ControlStatusEnum",
|
"ControlStatusEnum",
|
||||||
|
"EvidenceConfidenceEnum",
|
||||||
|
"EvidenceTruthStatusEnum",
|
||||||
# Repositories
|
# Repositories
|
||||||
"RegulationRepository",
|
"RegulationRepository",
|
||||||
"RequirementRepository",
|
"RequirementRepository",
|
||||||
|
|||||||
@@ -65,6 +65,7 @@ class ControlStatusEnum(str, enum.Enum):
|
|||||||
FAIL = "fail" # Not passing
|
FAIL = "fail" # Not passing
|
||||||
NOT_APPLICABLE = "n/a" # Not applicable
|
NOT_APPLICABLE = "n/a" # Not applicable
|
||||||
PLANNED = "planned" # Planned for implementation
|
PLANNED = "planned" # Planned for implementation
|
||||||
|
IN_PROGRESS = "in_progress" # Implementation in progress
|
||||||
|
|
||||||
|
|
||||||
class RiskLevelEnum(str, enum.Enum):
|
class RiskLevelEnum(str, enum.Enum):
|
||||||
@@ -83,6 +84,26 @@ class EvidenceStatusEnum(str, enum.Enum):
|
|||||||
FAILED = "failed" # Failed validation
|
FAILED = "failed" # Failed validation
|
||||||
|
|
||||||
|
|
||||||
|
class EvidenceConfidenceEnum(str, enum.Enum):
|
||||||
|
"""Confidence level of evidence (Anti-Fake-Evidence)."""
|
||||||
|
E0 = "E0" # Generated / no real evidence (LLM output, placeholder)
|
||||||
|
E1 = "E1" # Uploaded but unreviewed (manual upload, no hash, no reviewer)
|
||||||
|
E2 = "E2" # Reviewed internally (human reviewed, hash verified)
|
||||||
|
E3 = "E3" # Observed by system (CI/CD pipeline, API with hash)
|
||||||
|
E4 = "E4" # Validated by external auditor
|
||||||
|
|
||||||
|
|
||||||
|
class EvidenceTruthStatusEnum(str, enum.Enum):
|
||||||
|
"""Truth status lifecycle for evidence (Anti-Fake-Evidence)."""
|
||||||
|
GENERATED = "generated"
|
||||||
|
UPLOADED = "uploaded"
|
||||||
|
OBSERVED = "observed"
|
||||||
|
VALIDATED_INTERNAL = "validated_internal"
|
||||||
|
REJECTED = "rejected"
|
||||||
|
PROVIDED_TO_AUDITOR = "provided_to_auditor"
|
||||||
|
ACCEPTED_BY_AUDITOR = "accepted_by_auditor"
|
||||||
|
|
||||||
|
|
||||||
class ExportStatusEnum(str, enum.Enum):
|
class ExportStatusEnum(str, enum.Enum):
|
||||||
"""Status of audit export."""
|
"""Status of audit export."""
|
||||||
PENDING = "pending"
|
PENDING = "pending"
|
||||||
@@ -239,6 +260,7 @@ class ControlDB(Base):
|
|||||||
# Status
|
# Status
|
||||||
status = Column(Enum(ControlStatusEnum), default=ControlStatusEnum.PLANNED)
|
status = Column(Enum(ControlStatusEnum), default=ControlStatusEnum.PLANNED)
|
||||||
status_notes = Column(Text)
|
status_notes = Column(Text)
|
||||||
|
status_justification = Column(Text) # Required for n/a transitions
|
||||||
|
|
||||||
# Ownership & Review
|
# Ownership & Review
|
||||||
owner = Column(String(100)) # Responsible person/team
|
owner = Column(String(100)) # Responsible person/team
|
||||||
@@ -321,6 +343,22 @@ class EvidenceDB(Base):
|
|||||||
ci_job_id = Column(String(100)) # CI/CD job reference
|
ci_job_id = Column(String(100)) # CI/CD job reference
|
||||||
uploaded_by = Column(String(100)) # User who uploaded
|
uploaded_by = Column(String(100)) # User who uploaded
|
||||||
|
|
||||||
|
# Anti-Fake-Evidence: Confidence & Truth tracking
|
||||||
|
confidence_level = Column(Enum(EvidenceConfidenceEnum), default=EvidenceConfidenceEnum.E1)
|
||||||
|
truth_status = Column(Enum(EvidenceTruthStatusEnum), default=EvidenceTruthStatusEnum.UPLOADED)
|
||||||
|
generation_mode = Column(String(100)) # e.g. "draft_assistance", "auto_generation"
|
||||||
|
may_be_used_as_evidence = Column(Boolean, default=True)
|
||||||
|
reviewed_by = Column(String(200))
|
||||||
|
reviewed_at = Column(DateTime)
|
||||||
|
|
||||||
|
# Anti-Fake-Evidence Phase 2: Four-Eyes review
|
||||||
|
approval_status = Column(String(30), default="none")
|
||||||
|
first_reviewer = Column(String(200))
|
||||||
|
first_reviewed_at = Column(DateTime)
|
||||||
|
second_reviewer = Column(String(200))
|
||||||
|
second_reviewed_at = Column(DateTime)
|
||||||
|
requires_four_eyes = Column(Boolean, default=False)
|
||||||
|
|
||||||
# Timestamps
|
# Timestamps
|
||||||
collected_at = Column(DateTime, default=datetime.utcnow)
|
collected_at = Column(DateTime, default=datetime.utcnow)
|
||||||
created_at = Column(DateTime, default=datetime.utcnow)
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
@@ -332,6 +370,7 @@ class EvidenceDB(Base):
|
|||||||
__table_args__ = (
|
__table_args__ = (
|
||||||
Index('ix_evidence_control_type', 'control_id', 'evidence_type'),
|
Index('ix_evidence_control_type', 'control_id', 'evidence_type'),
|
||||||
Index('ix_evidence_status', 'status'),
|
Index('ix_evidence_status', 'status'),
|
||||||
|
Index('ix_evidence_approval_status', 'approval_status'),
|
||||||
)
|
)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
@@ -1464,3 +1503,77 @@ class ISMSReadinessCheckDB(Base):
|
|||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f"<ISMSReadiness {self.check_date}: {self.overall_status}>"
|
return f"<ISMSReadiness {self.check_date}: {self.overall_status}>"
|
||||||
|
|
||||||
|
|
||||||
|
class LLMGenerationAuditDB(Base):
|
||||||
|
"""
|
||||||
|
Audit trail for LLM-generated content.
|
||||||
|
|
||||||
|
Every piece of content generated by an LLM is recorded here with its
|
||||||
|
truth_status and may_be_used_as_evidence flag, ensuring transparency
|
||||||
|
about what is real evidence vs. generated assistance.
|
||||||
|
"""
|
||||||
|
__tablename__ = 'compliance_llm_generation_audit'
|
||||||
|
|
||||||
|
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||||
|
tenant_id = Column(String(36), index=True)
|
||||||
|
|
||||||
|
entity_type = Column(String(50), nullable=False) # 'evidence', 'control', 'document'
|
||||||
|
entity_id = Column(String(36)) # FK to generated entity
|
||||||
|
generation_mode = Column(String(100), nullable=False) # 'draft_assistance', 'auto_generation'
|
||||||
|
truth_status = Column(Enum(EvidenceTruthStatusEnum), nullable=False, default=EvidenceTruthStatusEnum.GENERATED)
|
||||||
|
may_be_used_as_evidence = Column(Boolean, nullable=False, default=False)
|
||||||
|
|
||||||
|
llm_model = Column(String(100))
|
||||||
|
llm_provider = Column(String(50)) # 'ollama', 'anthropic'
|
||||||
|
prompt_hash = Column(String(64)) # SHA-256 of prompt
|
||||||
|
input_summary = Column(Text)
|
||||||
|
output_summary = Column(Text)
|
||||||
|
extra_metadata = Column("metadata", JSON, default=dict)
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
__table_args__ = (
|
||||||
|
Index('ix_llm_audit_entity', 'entity_type', 'entity_id'),
|
||||||
|
)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"<LLMGenerationAudit {self.entity_type}:{self.entity_id} mode={self.generation_mode}>"
|
||||||
|
|
||||||
|
|
||||||
|
class AssertionDB(Base):
|
||||||
|
"""
|
||||||
|
Assertion tracking — separates claims from verified facts.
|
||||||
|
|
||||||
|
Each sentence from a control/evidence/document is stored here with its
|
||||||
|
classification (assertion vs. fact vs. rationale) and optional evidence linkage.
|
||||||
|
"""
|
||||||
|
__tablename__ = 'compliance_assertions'
|
||||||
|
|
||||||
|
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||||
|
tenant_id = Column(String(36), index=True)
|
||||||
|
|
||||||
|
entity_type = Column(String(50), nullable=False) # 'control', 'evidence', 'document', 'obligation'
|
||||||
|
entity_id = Column(String(36), nullable=False)
|
||||||
|
sentence_text = Column(Text, nullable=False)
|
||||||
|
sentence_index = Column(Integer, nullable=False, default=0)
|
||||||
|
|
||||||
|
assertion_type = Column(String(20), nullable=False, default='assertion') # 'assertion' | 'fact' | 'rationale'
|
||||||
|
evidence_ids = Column(JSON, default=list)
|
||||||
|
confidence = Column(Float, default=0.0)
|
||||||
|
normative_tier = Column(String(20)) # 'pflicht' | 'empfehlung' | 'kann'
|
||||||
|
|
||||||
|
verified_by = Column(String(200))
|
||||||
|
verified_at = Column(DateTime)
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
__table_args__ = (
|
||||||
|
Index('ix_assertion_entity', 'entity_type', 'entity_id'),
|
||||||
|
Index('ix_assertion_type', 'assertion_type'),
|
||||||
|
)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"<Assertion {self.assertion_type}: {self.sentence_text[:50]}>"
|
||||||
|
|||||||
@@ -487,6 +487,137 @@ class ControlRepository:
|
|||||||
"compliance_score": round(score, 1),
|
"compliance_score": round(score, 1),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def get_multi_dimensional_score(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Calculate multi-dimensional compliance score (Anti-Fake-Evidence).
|
||||||
|
|
||||||
|
Returns 6 dimensions + hard_blocks + overall_readiness.
|
||||||
|
"""
|
||||||
|
from .models import (
|
||||||
|
EvidenceDB, RequirementDB, ControlMappingDB,
|
||||||
|
EvidenceConfidenceEnum, EvidenceTruthStatusEnum,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Weight map for confidence levels
|
||||||
|
conf_weights = {"E0": 0.0, "E1": 0.25, "E2": 0.5, "E3": 0.75, "E4": 1.0}
|
||||||
|
validated_statuses = {"validated_internal", "accepted_by_auditor", "provided_to_auditor"}
|
||||||
|
|
||||||
|
controls = self.get_all()
|
||||||
|
total_controls = len(controls)
|
||||||
|
|
||||||
|
if total_controls == 0:
|
||||||
|
return {
|
||||||
|
"requirement_coverage": 0.0,
|
||||||
|
"evidence_strength": 0.0,
|
||||||
|
"validation_quality": 0.0,
|
||||||
|
"evidence_freshness": 0.0,
|
||||||
|
"control_effectiveness": 0.0,
|
||||||
|
"overall_readiness": 0.0,
|
||||||
|
"hard_blocks": ["Keine Controls vorhanden"],
|
||||||
|
}
|
||||||
|
|
||||||
|
# 1. requirement_coverage: % requirements linked to at least one control
|
||||||
|
total_reqs = self.db.query(func.count(RequirementDB.id)).scalar() or 0
|
||||||
|
linked_reqs = (
|
||||||
|
self.db.query(func.count(func.distinct(ControlMappingDB.requirement_id)))
|
||||||
|
.scalar() or 0
|
||||||
|
)
|
||||||
|
requirement_coverage = (linked_reqs / total_reqs * 100) if total_reqs > 0 else 0.0
|
||||||
|
|
||||||
|
# 2. evidence_strength: weighted average of evidence confidence
|
||||||
|
all_evidence = self.db.query(EvidenceDB).all()
|
||||||
|
if all_evidence:
|
||||||
|
total_weight = 0.0
|
||||||
|
for e in all_evidence:
|
||||||
|
conf_val = e.confidence_level.value if e.confidence_level else "E1"
|
||||||
|
total_weight += conf_weights.get(conf_val, 0.25)
|
||||||
|
evidence_strength = (total_weight / len(all_evidence)) * 100
|
||||||
|
else:
|
||||||
|
evidence_strength = 0.0
|
||||||
|
|
||||||
|
# 3. validation_quality: % evidence with truth_status >= validated_internal
|
||||||
|
if all_evidence:
|
||||||
|
validated_count = sum(
|
||||||
|
1 for e in all_evidence
|
||||||
|
if (e.truth_status.value if e.truth_status else "uploaded") in validated_statuses
|
||||||
|
)
|
||||||
|
validation_quality = (validated_count / len(all_evidence)) * 100
|
||||||
|
else:
|
||||||
|
validation_quality = 0.0
|
||||||
|
|
||||||
|
# 4. evidence_freshness: % evidence not expired and reviewed < 90 days
|
||||||
|
now = datetime.now()
|
||||||
|
if all_evidence:
|
||||||
|
fresh_count = 0
|
||||||
|
for e in all_evidence:
|
||||||
|
is_expired = e.valid_until and e.valid_until < now
|
||||||
|
is_stale = e.reviewed_at and (now - e.reviewed_at).days > 90 if hasattr(e, 'reviewed_at') and e.reviewed_at else False
|
||||||
|
if not is_expired and not is_stale:
|
||||||
|
fresh_count += 1
|
||||||
|
evidence_freshness = (fresh_count / len(all_evidence)) * 100
|
||||||
|
else:
|
||||||
|
evidence_freshness = 0.0
|
||||||
|
|
||||||
|
# 5. control_effectiveness: existing formula
|
||||||
|
passed = sum(1 for c in controls if c.status == ControlStatusEnum.PASS)
|
||||||
|
partial = sum(1 for c in controls if c.status == ControlStatusEnum.PARTIAL)
|
||||||
|
control_effectiveness = ((passed + partial * 0.5) / total_controls) * 100
|
||||||
|
|
||||||
|
# 6. overall_readiness: weighted composite
|
||||||
|
overall_readiness = (
|
||||||
|
0.20 * requirement_coverage +
|
||||||
|
0.25 * evidence_strength +
|
||||||
|
0.20 * validation_quality +
|
||||||
|
0.10 * evidence_freshness +
|
||||||
|
0.25 * control_effectiveness
|
||||||
|
)
|
||||||
|
|
||||||
|
# Hard blocks
|
||||||
|
hard_blocks = []
|
||||||
|
|
||||||
|
# Critical controls without any evidence
|
||||||
|
critical_no_evidence = []
|
||||||
|
for c in controls:
|
||||||
|
if c.status in (ControlStatusEnum.PASS, ControlStatusEnum.PARTIAL):
|
||||||
|
evidence_for_ctrl = [e for e in all_evidence if e.control_id == c.id]
|
||||||
|
if not evidence_for_ctrl:
|
||||||
|
critical_no_evidence.append(c.control_id)
|
||||||
|
if critical_no_evidence:
|
||||||
|
hard_blocks.append(
|
||||||
|
f"{len(critical_no_evidence)} Controls mit Status pass/partial haben keine Evidence: "
|
||||||
|
f"{', '.join(critical_no_evidence[:5])}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Controls with only E0/E1 evidence claiming pass
|
||||||
|
weak_evidence_pass = []
|
||||||
|
for c in controls:
|
||||||
|
if c.status == ControlStatusEnum.PASS:
|
||||||
|
evidence_for_ctrl = [e for e in all_evidence if e.control_id == c.id]
|
||||||
|
if evidence_for_ctrl:
|
||||||
|
max_conf = max(
|
||||||
|
conf_weights.get(
|
||||||
|
e.confidence_level.value if e.confidence_level else "E1", 0.25
|
||||||
|
)
|
||||||
|
for e in evidence_for_ctrl
|
||||||
|
)
|
||||||
|
if max_conf < 0.5: # Only E0 or E1
|
||||||
|
weak_evidence_pass.append(c.control_id)
|
||||||
|
if weak_evidence_pass:
|
||||||
|
hard_blocks.append(
|
||||||
|
f"{len(weak_evidence_pass)} Controls auf 'pass' haben nur E0/E1-Evidence: "
|
||||||
|
f"{', '.join(weak_evidence_pass[:5])}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"requirement_coverage": round(requirement_coverage, 1),
|
||||||
|
"evidence_strength": round(evidence_strength, 1),
|
||||||
|
"validation_quality": round(validation_quality, 1),
|
||||||
|
"evidence_freshness": round(evidence_freshness, 1),
|
||||||
|
"control_effectiveness": round(control_effectiveness, 1),
|
||||||
|
"overall_readiness": round(overall_readiness, 1),
|
||||||
|
"hard_blocks": hard_blocks,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class ControlMappingRepository:
|
class ControlMappingRepository:
|
||||||
"""Repository for requirement-control mappings."""
|
"""Repository for requirement-control mappings."""
|
||||||
|
|||||||
80
backend-compliance/compliance/services/assertion_engine.py
Normal file
80
backend-compliance/compliance/services/assertion_engine.py
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
"""Assertion Engine — splits text into sentences and classifies each.
|
||||||
|
|
||||||
|
Each sentence is tagged as:
|
||||||
|
- assertion: normative statement (pflicht / empfehlung / kann)
|
||||||
|
- fact: references concrete evidence artifacts
|
||||||
|
- rationale: explains why something is required
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from .normative_patterns import (
|
||||||
|
PFLICHT_RE, EMPFEHLUNG_RE, KANN_RE, RATIONALE_RE, EVIDENCE_RE,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Sentence splitter: period/excl/question followed by space+uppercase, or newlines
|
||||||
|
_SENTENCE_SPLIT = re.compile(r'(?<=[.!?])\s+(?=[A-ZÄÖÜ])|(?:\n\s*\n)')
|
||||||
|
|
||||||
|
|
||||||
|
def extract_assertions(
|
||||||
|
text: str,
|
||||||
|
entity_type: str,
|
||||||
|
entity_id: str,
|
||||||
|
tenant_id: Optional[str] = None,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""Split *text* into sentences and classify each one.
|
||||||
|
|
||||||
|
Returns a list of dicts ready for AssertionDB creation.
|
||||||
|
"""
|
||||||
|
if not text or not text.strip():
|
||||||
|
return []
|
||||||
|
|
||||||
|
sentences = _SENTENCE_SPLIT.split(text.strip())
|
||||||
|
results: list[dict] = []
|
||||||
|
|
||||||
|
for idx, raw in enumerate(sentences):
|
||||||
|
sentence = raw.strip()
|
||||||
|
if not sentence or len(sentence) < 5:
|
||||||
|
continue
|
||||||
|
|
||||||
|
assertion_type, normative_tier = _classify_sentence(sentence)
|
||||||
|
|
||||||
|
results.append({
|
||||||
|
"tenant_id": tenant_id,
|
||||||
|
"entity_type": entity_type,
|
||||||
|
"entity_id": entity_id,
|
||||||
|
"sentence_text": sentence,
|
||||||
|
"sentence_index": idx,
|
||||||
|
"assertion_type": assertion_type,
|
||||||
|
"normative_tier": normative_tier,
|
||||||
|
"evidence_ids": [],
|
||||||
|
"confidence": 0.0,
|
||||||
|
})
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def _classify_sentence(sentence: str) -> tuple[str, Optional[str]]:
|
||||||
|
"""Return (assertion_type, normative_tier) for a single sentence."""
|
||||||
|
|
||||||
|
# 1. Check for evidence/fact keywords first
|
||||||
|
if EVIDENCE_RE.search(sentence):
|
||||||
|
return ("fact", None)
|
||||||
|
|
||||||
|
# 2. Check for rationale
|
||||||
|
normative_count = len(PFLICHT_RE.findall(sentence)) + len(EMPFEHLUNG_RE.findall(sentence)) + len(KANN_RE.findall(sentence))
|
||||||
|
rationale_count = len(RATIONALE_RE.findall(sentence))
|
||||||
|
if rationale_count > 0 and rationale_count >= normative_count:
|
||||||
|
return ("rationale", None)
|
||||||
|
|
||||||
|
# 3. Normative classification
|
||||||
|
if PFLICHT_RE.search(sentence):
|
||||||
|
return ("assertion", "pflicht")
|
||||||
|
if EMPFEHLUNG_RE.search(sentence):
|
||||||
|
return ("assertion", "empfehlung")
|
||||||
|
if KANN_RE.search(sentence):
|
||||||
|
return ("assertion", "kann")
|
||||||
|
|
||||||
|
# 4. Default: unclassified assertion
|
||||||
|
return ("assertion", None)
|
||||||
@@ -493,6 +493,9 @@ class GeneratedControl:
|
|||||||
applicable_industries: Optional[list] = None # e.g. ["all"] or ["Telekommunikation", "Energie"]
|
applicable_industries: Optional[list] = None # e.g. ["all"] or ["Telekommunikation", "Energie"]
|
||||||
applicable_company_size: Optional[list] = None # e.g. ["all"] or ["medium", "large", "enterprise"]
|
applicable_company_size: Optional[list] = None # e.g. ["all"] or ["medium", "large", "enterprise"]
|
||||||
scope_conditions: Optional[dict] = None # e.g. {"requires_any": ["uses_ai"], "description": "..."}
|
scope_conditions: Optional[dict] = None # e.g. {"requires_any": ["uses_ai"], "description": "..."}
|
||||||
|
# Anti-Fake-Evidence: truth tracking for generated controls
|
||||||
|
truth_status: str = "generated"
|
||||||
|
may_be_used_as_evidence: bool = False
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -781,10 +784,23 @@ REFORM_SYSTEM_PROMPT = """Du bist ein Security-Compliance-Experte. Deine Aufgabe
|
|||||||
Security Controls zu formulieren. Du formulierst IMMER in eigenen Worten.
|
Security Controls zu formulieren. Du formulierst IMMER in eigenen Worten.
|
||||||
KOPIERE KEINE Sätze aus dem Quelltext. Verwende eigene Begriffe und Struktur.
|
KOPIERE KEINE Sätze aus dem Quelltext. Verwende eigene Begriffe und Struktur.
|
||||||
NENNE NICHT die Quelle. Keine proprietären Bezeichner.
|
NENNE NICHT die Quelle. Keine proprietären Bezeichner.
|
||||||
|
|
||||||
|
WICHTIG — Truthfulness-Guardrail:
|
||||||
|
Deine Ausgabe ist ein ENTWURF. Formuliere NIEMALS Behauptungen über bereits erfolgte Umsetzung.
|
||||||
|
Verwende NICHT: "ist compliant", "erfüllt vollständig", "wurde geprüft", "wurde umgesetzt",
|
||||||
|
"ist auditiert", "vollständig implementiert", "nachweislich konform".
|
||||||
|
Verwende stattdessen: "soll umsetzen", "ist vorgesehen", "muss implementiert werden".
|
||||||
|
|
||||||
Antworte NUR mit validem JSON. Bei mehreren Controls antworte mit einem JSON-Array."""
|
Antworte NUR mit validem JSON. Bei mehreren Controls antworte mit einem JSON-Array."""
|
||||||
|
|
||||||
STRUCTURE_SYSTEM_PROMPT = """Du bist ein Security-Compliance-Experte. Strukturiere den gegebenen Text
|
STRUCTURE_SYSTEM_PROMPT = """Du bist ein Security-Compliance-Experte. Strukturiere den gegebenen Text
|
||||||
als praxisorientiertes Security Control. Erstelle eine verständliche, umsetzbare Formulierung.
|
als praxisorientiertes Security Control. Erstelle eine verständliche, umsetzbare Formulierung.
|
||||||
|
|
||||||
|
WICHTIG — Truthfulness-Guardrail:
|
||||||
|
Deine Ausgabe ist ein ENTWURF. Formuliere NIEMALS Behauptungen über bereits erfolgte Umsetzung.
|
||||||
|
Verwende NICHT: "ist compliant", "erfüllt vollständig", "wurde geprüft", "wurde umgesetzt".
|
||||||
|
Verwende stattdessen: "soll umsetzen", "ist vorgesehen", "muss implementiert werden".
|
||||||
|
|
||||||
Antworte NUR mit validem JSON. Bei mehreren Controls antworte mit einem JSON-Array."""
|
Antworte NUR mit validem JSON. Bei mehreren Controls antworte mit einem JSON-Array."""
|
||||||
|
|
||||||
# Shared applicability prompt block — appended to all generation prompts (v3)
|
# Shared applicability prompt block — appended to all generation prompts (v3)
|
||||||
@@ -1877,7 +1893,38 @@ Kategorien: {CATEGORY_LIST_STR}"""
|
|||||||
)
|
)
|
||||||
self.db.commit()
|
self.db.commit()
|
||||||
row = result.fetchone()
|
row = result.fetchone()
|
||||||
return str(row[0]) if row else None
|
control_uuid = str(row[0]) if row else None
|
||||||
|
|
||||||
|
# Anti-Fake-Evidence: Record LLM audit trail for generated control
|
||||||
|
if control_uuid:
|
||||||
|
try:
|
||||||
|
self.db.execute(
|
||||||
|
text("""
|
||||||
|
INSERT INTO compliance_llm_generation_audit (
|
||||||
|
entity_type, entity_id, generation_mode,
|
||||||
|
truth_status, may_be_used_as_evidence,
|
||||||
|
llm_model, llm_provider,
|
||||||
|
input_summary, output_summary
|
||||||
|
) VALUES (
|
||||||
|
'control', :entity_id, 'auto_generation',
|
||||||
|
'generated', FALSE,
|
||||||
|
:llm_model, :llm_provider,
|
||||||
|
:input_summary, :output_summary
|
||||||
|
)
|
||||||
|
"""),
|
||||||
|
{
|
||||||
|
"entity_id": control_uuid,
|
||||||
|
"llm_model": ANTHROPIC_MODEL if ANTHROPIC_API_KEY else OLLAMA_MODEL,
|
||||||
|
"llm_provider": "anthropic" if ANTHROPIC_API_KEY else "ollama",
|
||||||
|
"input_summary": f"Control generation for {control.control_id}",
|
||||||
|
"output_summary": control.title[:500] if control.title else None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.db.commit()
|
||||||
|
except Exception as audit_err:
|
||||||
|
logger.warning("Failed to create LLM audit record: %s", audit_err)
|
||||||
|
|
||||||
|
return control_uuid
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Failed to store control %s: %s", control.control_id, e)
|
logger.error("Failed to store control %s: %s", control.control_id, e)
|
||||||
self.db.rollback()
|
self.db.rollback()
|
||||||
|
|||||||
152
backend-compliance/compliance/services/control_status_machine.py
Normal file
152
backend-compliance/compliance/services/control_status_machine.py
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
"""
|
||||||
|
Control Status Transition State Machine.
|
||||||
|
|
||||||
|
Enforces that controls cannot be set to "pass" without sufficient evidence.
|
||||||
|
Prevents Compliance-Theater where controls claim compliance without real proof.
|
||||||
|
|
||||||
|
Transition rules:
|
||||||
|
planned → in_progress : always allowed
|
||||||
|
in_progress → pass : requires ≥1 evidence with confidence ≥ E2 and
|
||||||
|
truth_status in (uploaded, observed, validated_internal)
|
||||||
|
in_progress → partial : requires ≥1 evidence (any level)
|
||||||
|
pass → fail : always allowed (degradation)
|
||||||
|
any → n/a : requires status_justification
|
||||||
|
any → planned : always allowed (reset)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import List, Optional, Tuple
|
||||||
|
|
||||||
|
from ..db.models import EvidenceDB
|
||||||
|
|
||||||
|
|
||||||
|
# Confidence level ordering for comparisons
|
||||||
|
CONFIDENCE_ORDER = {"E0": 0, "E1": 1, "E2": 2, "E3": 3, "E4": 4}
|
||||||
|
|
||||||
|
# Truth statuses that qualify as "real" evidence for pass transitions
|
||||||
|
VALID_TRUTH_STATUSES = {"uploaded", "observed", "validated_internal", "accepted_by_auditor", "provided_to_auditor"}
|
||||||
|
|
||||||
|
|
||||||
|
def validate_transition(
|
||||||
|
current_status: str,
|
||||||
|
new_status: str,
|
||||||
|
evidence_list: Optional[List[EvidenceDB]] = None,
|
||||||
|
status_justification: Optional[str] = None,
|
||||||
|
bypass_for_auto_updater: bool = False,
|
||||||
|
) -> Tuple[bool, List[str]]:
|
||||||
|
"""
|
||||||
|
Validate whether a control status transition is allowed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current_status: Current control status value (e.g. "planned", "pass")
|
||||||
|
new_status: Requested new status
|
||||||
|
evidence_list: List of EvidenceDB objects linked to this control
|
||||||
|
status_justification: Text justification (required for n/a transitions)
|
||||||
|
bypass_for_auto_updater: If True, skip evidence checks (used by CI/CD auto-updater
|
||||||
|
which creates evidence atomically with status change)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (allowed: bool, violations: list[str])
|
||||||
|
"""
|
||||||
|
violations: List[str] = []
|
||||||
|
evidence_list = evidence_list or []
|
||||||
|
|
||||||
|
# Same status → no-op, always allowed
|
||||||
|
if current_status == new_status:
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
# Reset to planned is always allowed
|
||||||
|
if new_status == "planned":
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
# n/a requires justification
|
||||||
|
if new_status == "n/a":
|
||||||
|
if not status_justification or not status_justification.strip():
|
||||||
|
violations.append("Transition to 'n/a' requires a status_justification explaining why this control is not applicable.")
|
||||||
|
return len(violations) == 0, violations
|
||||||
|
|
||||||
|
# Degradation: pass → fail is always allowed
|
||||||
|
if current_status == "pass" and new_status == "fail":
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
# planned → in_progress: always allowed
|
||||||
|
if current_status == "planned" and new_status == "in_progress":
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
# in_progress → partial: needs at least 1 evidence
|
||||||
|
if new_status == "partial":
|
||||||
|
if not bypass_for_auto_updater and len(evidence_list) == 0:
|
||||||
|
violations.append("Transition to 'partial' requires at least 1 evidence record.")
|
||||||
|
return len(violations) == 0, violations
|
||||||
|
|
||||||
|
# in_progress → pass: strict requirements
|
||||||
|
if new_status == "pass":
|
||||||
|
if bypass_for_auto_updater:
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
if len(evidence_list) == 0:
|
||||||
|
violations.append("Transition to 'pass' requires at least 1 evidence record.")
|
||||||
|
return False, violations
|
||||||
|
|
||||||
|
# Check for at least one qualifying evidence
|
||||||
|
has_qualifying = False
|
||||||
|
for e in evidence_list:
|
||||||
|
conf = getattr(e, "confidence_level", None)
|
||||||
|
truth = getattr(e, "truth_status", None)
|
||||||
|
|
||||||
|
# Get string values from enum or string
|
||||||
|
conf_val = conf.value if hasattr(conf, "value") else str(conf) if conf else "E1"
|
||||||
|
truth_val = truth.value if hasattr(truth, "value") else str(truth) if truth else "uploaded"
|
||||||
|
|
||||||
|
if CONFIDENCE_ORDER.get(conf_val, 1) >= CONFIDENCE_ORDER["E2"] and truth_val in VALID_TRUTH_STATUSES:
|
||||||
|
has_qualifying = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not has_qualifying:
|
||||||
|
violations.append(
|
||||||
|
"Transition to 'pass' requires at least 1 evidence with confidence >= E2 "
|
||||||
|
"and truth_status in (uploaded, observed, validated_internal, accepted_by_auditor). "
|
||||||
|
"Current evidence does not meet this threshold."
|
||||||
|
)
|
||||||
|
|
||||||
|
return len(violations) == 0, violations
|
||||||
|
|
||||||
|
# in_progress → fail: always allowed
|
||||||
|
if new_status == "fail":
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
# Any other transition from planned/fail to pass requires going through in_progress
|
||||||
|
if current_status in ("planned", "fail") and new_status == "pass":
|
||||||
|
if bypass_for_auto_updater:
|
||||||
|
return True, []
|
||||||
|
violations.append(
|
||||||
|
f"Direct transition from '{current_status}' to 'pass' is not allowed. "
|
||||||
|
f"Move to 'in_progress' first, then to 'pass' with qualifying evidence."
|
||||||
|
)
|
||||||
|
return False, violations
|
||||||
|
|
||||||
|
# Default: allow other transitions (e.g. fail → partial, partial → pass)
|
||||||
|
# For partial → pass, apply the same evidence checks
|
||||||
|
if current_status == "partial" and new_status == "pass":
|
||||||
|
if bypass_for_auto_updater:
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
has_qualifying = False
|
||||||
|
for e in evidence_list:
|
||||||
|
conf = getattr(e, "confidence_level", None)
|
||||||
|
truth = getattr(e, "truth_status", None)
|
||||||
|
conf_val = conf.value if hasattr(conf, "value") else str(conf) if conf else "E1"
|
||||||
|
truth_val = truth.value if hasattr(truth, "value") else str(truth) if truth else "uploaded"
|
||||||
|
|
||||||
|
if CONFIDENCE_ORDER.get(conf_val, 1) >= CONFIDENCE_ORDER["E2"] and truth_val in VALID_TRUTH_STATUSES:
|
||||||
|
has_qualifying = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not has_qualifying:
|
||||||
|
violations.append(
|
||||||
|
"Transition from 'partial' to 'pass' requires at least 1 evidence with confidence >= E2 "
|
||||||
|
"and truth_status in (uploaded, observed, validated_internal, accepted_by_auditor)."
|
||||||
|
)
|
||||||
|
return len(violations) == 0, violations
|
||||||
|
|
||||||
|
# All other transitions allowed
|
||||||
|
return True, []
|
||||||
@@ -52,64 +52,18 @@ ANTHROPIC_API_URL = "https://api.anthropic.com/v1"
|
|||||||
# Tier 2: Empfehlung (recommendation) — weaker normative signals
|
# Tier 2: Empfehlung (recommendation) — weaker normative signals
|
||||||
# Tier 3: Kann (optional/permissive) — permissive signals
|
# Tier 3: Kann (optional/permissive) — permissive signals
|
||||||
# Nothing is rejected — everything is classified.
|
# Nothing is rejected — everything is classified.
|
||||||
|
#
|
||||||
|
# Patterns are defined in normative_patterns.py and imported here
|
||||||
|
# with local aliases for backward compatibility.
|
||||||
|
|
||||||
_PFLICHT_SIGNALS = [
|
from .normative_patterns import (
|
||||||
# Deutsche modale Pflichtformulierungen
|
PFLICHT_RE as _PFLICHT_RE,
|
||||||
r"\bmüssen\b", r"\bmuss\b", r"\bhat\s+sicherzustellen\b",
|
EMPFEHLUNG_RE as _EMPFEHLUNG_RE,
|
||||||
r"\bhaben\s+sicherzustellen\b", r"\bsind\s+verpflichtet\b",
|
KANN_RE as _KANN_RE,
|
||||||
r"\bist\s+verpflichtet\b",
|
NORMATIVE_RE as _NORMATIVE_RE,
|
||||||
# "ist zu prüfen", "sind zu dokumentieren" (direkt)
|
RATIONALE_RE as _RATIONALE_RE,
|
||||||
r"\bist\s+zu\s+\w+en\b", r"\bsind\s+zu\s+\w+en\b",
|
|
||||||
r"\bhat\s+zu\s+\w+en\b", r"\bhaben\s+zu\s+\w+en\b",
|
|
||||||
# "ist festzustellen", "sind vorzunehmen" (Compound-Verben, eingebettetes zu)
|
|
||||||
r"\bist\s+\w+zu\w+en\b", r"\bsind\s+\w+zu\w+en\b",
|
|
||||||
# "ist zusätzlich zu prüfen", "sind regelmäßig zu überwachen" (Adverb dazwischen)
|
|
||||||
r"\bist\s+\w+\s+zu\s+\w+en\b", r"\bsind\s+\w+\s+zu\s+\w+en\b",
|
|
||||||
r"\bhat\s+\w+\s+zu\s+\w+en\b", r"\bhaben\s+\w+\s+zu\s+\w+en\b",
|
|
||||||
# Englische Pflicht-Signale
|
|
||||||
r"\bshall\b", r"\bmust\b", r"\brequired\b",
|
|
||||||
# Compound-Infinitive (Gerundivum): mitzuteilen, anzuwenden, bereitzustellen
|
|
||||||
r"\b\w+zuteilen\b", r"\b\w+zuwenden\b", r"\b\w+zustellen\b", r"\b\w+zulegen\b",
|
|
||||||
r"\b\w+zunehmen\b", r"\b\w+zuführen\b", r"\b\w+zuhalten\b", r"\b\w+zusetzen\b",
|
|
||||||
r"\b\w+zuweisen\b", r"\b\w+zuordnen\b", r"\b\w+zufügen\b", r"\b\w+zugeben\b",
|
|
||||||
# Breites Pattern: "ist ... [bis 80 Zeichen] ... zu + Infinitiv"
|
|
||||||
r"\bist\b.{1,80}\bzu\s+\w+en\b", r"\bsind\b.{1,80}\bzu\s+\w+en\b",
|
|
||||||
]
|
|
||||||
_PFLICHT_RE = re.compile("|".join(_PFLICHT_SIGNALS), re.IGNORECASE)
|
|
||||||
|
|
||||||
_EMPFEHLUNG_SIGNALS = [
|
|
||||||
# Modale Verben (schwaecher als "muss")
|
|
||||||
r"\bsoll\b", r"\bsollen\b", r"\bsollte\b", r"\bsollten\b",
|
|
||||||
r"\bgewährleisten\b", r"\bsicherstellen\b",
|
|
||||||
# Englische Empfehlungs-Signale
|
|
||||||
r"\bshould\b", r"\bensure\b", r"\brecommend\w*\b",
|
|
||||||
# Haeufige normative Infinitive (ohne Hilfsverb, als Empfehlung)
|
|
||||||
r"\bnachweisen\b", r"\beinhalten\b", r"\bunterlassen\b", r"\bwahren\b",
|
|
||||||
r"\bdokumentieren\b", r"\bimplementieren\b", r"\büberprüfen\b", r"\büberwachen\b",
|
|
||||||
# Pruefanweisungen als normative Aussage
|
|
||||||
r"\bprüfen,\s+ob\b", r"\bkontrollieren,\s+ob\b",
|
|
||||||
]
|
|
||||||
_EMPFEHLUNG_RE = re.compile("|".join(_EMPFEHLUNG_SIGNALS), re.IGNORECASE)
|
|
||||||
|
|
||||||
_KANN_SIGNALS = [
|
|
||||||
r"\bkann\b", r"\bkönnen\b", r"\bdarf\b", r"\bdürfen\b",
|
|
||||||
r"\bmay\b", r"\boptional\b",
|
|
||||||
]
|
|
||||||
_KANN_RE = re.compile("|".join(_KANN_SIGNALS), re.IGNORECASE)
|
|
||||||
|
|
||||||
# Union of all normative signals (for backward-compatible has_normative_signal flag)
|
|
||||||
_NORMATIVE_RE = re.compile(
|
|
||||||
"|".join(_PFLICHT_SIGNALS + _EMPFEHLUNG_SIGNALS + _KANN_SIGNALS),
|
|
||||||
re.IGNORECASE,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
_RATIONALE_SIGNALS = [
|
|
||||||
r"\bda\s+", r"\bweil\b", r"\bgrund\b", r"\berwägung",
|
|
||||||
r"\bbecause\b", r"\breason\b", r"\brationale\b",
|
|
||||||
r"\bkönnen\s+.*\s+verursachen\b", r"\bführt\s+zu\b",
|
|
||||||
]
|
|
||||||
_RATIONALE_RE = re.compile("|".join(_RATIONALE_SIGNALS), re.IGNORECASE)
|
|
||||||
|
|
||||||
_TEST_SIGNALS = [
|
_TEST_SIGNALS = [
|
||||||
r"\btesten\b", r"\btest\b", r"\bprüfung\b", r"\bprüfen\b",
|
r"\btesten\b", r"\btest\b", r"\bprüfung\b", r"\bprüfen\b",
|
||||||
r"\bgetestet\b", r"\bwirksamkeit\b", r"\baudit\b",
|
r"\bgetestet\b", r"\bwirksamkeit\b", r"\baudit\b",
|
||||||
@@ -1493,7 +1447,37 @@ def _normalize_object(object_raw: str) -> str:
|
|||||||
return obj[:80] or "unknown"
|
return obj[:80] or "unknown"
|
||||||
|
|
||||||
|
|
||||||
# ── 7b. Output Validator (Negativregeln) ─────────────────────────────────
|
# ── 7b. Framework / Composite Detection ──────────────────────────────────
|
||||||
|
|
||||||
|
_FRAMEWORK_KEYWORDS: list[str] = [
|
||||||
|
"praktiken", "kontrollen gemäß", "maßnahmen gemäß", "anforderungen aus",
|
||||||
|
"anforderungen gemäß", "gemäß .+ umzusetzen", "framework", "standard",
|
||||||
|
"controls for", "practices for", "requirements from",
|
||||||
|
]
|
||||||
|
|
||||||
|
_COMPOSITE_OBJECT_KEYWORDS: list[str] = [
|
||||||
|
"ccm", "nist", "iso 27001", "iso 27002", "owasp", "bsi",
|
||||||
|
"cis controls", "cobit", "sox", "pci dss", "hitrust",
|
||||||
|
"soc 2", "soc2", "enisa", "kritis",
|
||||||
|
]
|
||||||
|
|
||||||
|
_COMPOSITE_RE = re.compile(
|
||||||
|
"|".join(_FRAMEWORK_KEYWORDS + _COMPOSITE_OBJECT_KEYWORDS),
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_composite_obligation(obligation_text: str, object_: str) -> bool:
|
||||||
|
"""Detect framework-level / composite obligations that are NOT atomic.
|
||||||
|
|
||||||
|
Returns True if the obligation references a framework domain, standard,
|
||||||
|
or set of practices rather than a single auditable requirement.
|
||||||
|
"""
|
||||||
|
combined = f"{obligation_text} {object_}"
|
||||||
|
return bool(_COMPOSITE_RE.search(combined))
|
||||||
|
|
||||||
|
|
||||||
|
# ── 7c. Output Validator (Negativregeln) ─────────────────────────────────
|
||||||
|
|
||||||
def _validate_atomic_control(
|
def _validate_atomic_control(
|
||||||
atomic: "AtomicControlCandidate",
|
atomic: "AtomicControlCandidate",
|
||||||
@@ -1544,6 +1528,9 @@ def _validate_atomic_control(
|
|||||||
if object_class == "general":
|
if object_class == "general":
|
||||||
issues.append("WARN: object_class is 'general' (unclassified)")
|
issues.append("WARN: object_class is 'general' (unclassified)")
|
||||||
|
|
||||||
|
if getattr(atomic, "_is_composite", False):
|
||||||
|
issues.append("WARN: composite/framework obligation — requires further decomposition")
|
||||||
|
|
||||||
for issue in issues:
|
for issue in issues:
|
||||||
if issue.startswith("ERROR:"):
|
if issue.startswith("ERROR:"):
|
||||||
logger.warning("Validation: %s — title=%s", issue, atomic.title[:60])
|
logger.warning("Validation: %s — title=%s", issue, atomic.title[:60])
|
||||||
@@ -1703,6 +1690,12 @@ def _compose_deterministic(
|
|||||||
atomic._deadline_hours = deadline_hours # type: ignore[attr-defined]
|
atomic._deadline_hours = deadline_hours # type: ignore[attr-defined]
|
||||||
atomic._frequency = frequency # type: ignore[attr-defined]
|
atomic._frequency = frequency # type: ignore[attr-defined]
|
||||||
|
|
||||||
|
# ── Composite / Framework detection ───────────────────────
|
||||||
|
is_composite = _is_composite_obligation(obligation_text, object_)
|
||||||
|
atomic._is_composite = is_composite # type: ignore[attr-defined]
|
||||||
|
atomic._atomicity = "composite" if is_composite else "atomic" # type: ignore[attr-defined]
|
||||||
|
atomic._requires_decomposition = is_composite # type: ignore[attr-defined]
|
||||||
|
|
||||||
# ── Validate (log issues, never reject) ───────────────────
|
# ── Validate (log issues, never reject) ───────────────────
|
||||||
validation_issues = _validate_atomic_control(atomic, action_type, object_class)
|
validation_issues = _validate_atomic_control(atomic, action_type, object_class)
|
||||||
atomic._validation_issues = validation_issues # type: ignore[attr-defined]
|
atomic._validation_issues = validation_issues # type: ignore[attr-defined]
|
||||||
@@ -2403,23 +2396,7 @@ class DecompositionPass:
|
|||||||
else:
|
else:
|
||||||
# Deterministic engine — no LLM required
|
# Deterministic engine — no LLM required
|
||||||
for obl in batch:
|
for obl in batch:
|
||||||
sub_actions = _split_compound_action(obl["action"])
|
await self._route_and_compose(obl, stats)
|
||||||
for sub_action in sub_actions:
|
|
||||||
atomic = _compose_deterministic(
|
|
||||||
obligation_text=obl["obligation_text"],
|
|
||||||
action=sub_action,
|
|
||||||
object_=obl["object"],
|
|
||||||
parent_title=obl["parent_title"],
|
|
||||||
parent_severity=obl["parent_severity"],
|
|
||||||
parent_category=obl["parent_category"],
|
|
||||||
is_test=obl["is_test"],
|
|
||||||
is_reporting=obl["is_reporting"],
|
|
||||||
trigger_type=obl.get("trigger_type"),
|
|
||||||
condition=obl.get("condition"),
|
|
||||||
)
|
|
||||||
await self._process_pass0b_control(
|
|
||||||
obl, {}, stats, atomic=atomic,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Commit after each successful sub-batch
|
# Commit after each successful sub-batch
|
||||||
self.db.commit()
|
self.db.commit()
|
||||||
@@ -2435,6 +2412,107 @@ class DecompositionPass:
|
|||||||
logger.info("Pass 0b: %s", stats)
|
logger.info("Pass 0b: %s", stats)
|
||||||
return stats
|
return stats
|
||||||
|
|
||||||
|
async def _route_and_compose(
|
||||||
|
self, obl: dict, stats: dict,
|
||||||
|
) -> None:
|
||||||
|
"""Route an obligation through the framework detection layer,
|
||||||
|
then compose atomic controls.
|
||||||
|
|
||||||
|
Routing types:
|
||||||
|
- atomic: compose directly via _compose_deterministic
|
||||||
|
- compound: split compound verbs, compose each
|
||||||
|
- framework_container: decompose via framework registry,
|
||||||
|
then compose each sub-obligation
|
||||||
|
"""
|
||||||
|
from compliance.services.framework_decomposition import (
|
||||||
|
classify_routing,
|
||||||
|
decompose_framework_container,
|
||||||
|
)
|
||||||
|
|
||||||
|
routing = classify_routing(
|
||||||
|
obligation_text=obl["obligation_text"],
|
||||||
|
action_raw=obl["action"],
|
||||||
|
object_raw=obl["object"],
|
||||||
|
condition_raw=obl.get("condition"),
|
||||||
|
)
|
||||||
|
|
||||||
|
if routing.routing_type == "framework_container" and routing.framework_ref:
|
||||||
|
# Decompose framework container into sub-obligations
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id=obl["candidate_id"],
|
||||||
|
parent_control_id=obl["parent_control_id"],
|
||||||
|
obligation_text=obl["obligation_text"],
|
||||||
|
framework_ref=routing.framework_ref,
|
||||||
|
framework_domain=routing.framework_domain,
|
||||||
|
)
|
||||||
|
stats.setdefault("framework_decomposed", 0)
|
||||||
|
stats.setdefault("framework_sub_obligations", 0)
|
||||||
|
|
||||||
|
if result.release_state == "decomposed" and result.decomposed_obligations:
|
||||||
|
stats["framework_decomposed"] += 1
|
||||||
|
stats["framework_sub_obligations"] += len(result.decomposed_obligations)
|
||||||
|
logger.info(
|
||||||
|
"Framework decomposition: %s → %s/%s → %d sub-obligations",
|
||||||
|
obl["candidate_id"], routing.framework_ref,
|
||||||
|
routing.framework_domain, len(result.decomposed_obligations),
|
||||||
|
)
|
||||||
|
# Compose each sub-obligation
|
||||||
|
for d_obl in result.decomposed_obligations:
|
||||||
|
sub_obl = {
|
||||||
|
**obl,
|
||||||
|
"obligation_text": d_obl.obligation_text,
|
||||||
|
"action": d_obl.action_raw,
|
||||||
|
"object": d_obl.object_raw,
|
||||||
|
}
|
||||||
|
sub_actions = _split_compound_action(sub_obl["action"])
|
||||||
|
for sub_action in sub_actions:
|
||||||
|
atomic = _compose_deterministic(
|
||||||
|
obligation_text=sub_obl["obligation_text"],
|
||||||
|
action=sub_action,
|
||||||
|
object_=sub_obl["object"],
|
||||||
|
parent_title=obl["parent_title"],
|
||||||
|
parent_severity=obl["parent_severity"],
|
||||||
|
parent_category=obl["parent_category"],
|
||||||
|
is_test=obl["is_test"],
|
||||||
|
is_reporting=obl["is_reporting"],
|
||||||
|
trigger_type=obl.get("trigger_type"),
|
||||||
|
condition=obl.get("condition"),
|
||||||
|
)
|
||||||
|
# Enrich gen_meta with framework info
|
||||||
|
atomic._framework_ref = routing.framework_ref # type: ignore[attr-defined]
|
||||||
|
atomic._framework_domain = routing.framework_domain # type: ignore[attr-defined]
|
||||||
|
atomic._framework_subcontrol_id = d_obl.subcontrol_id # type: ignore[attr-defined]
|
||||||
|
atomic._decomposition_source = "framework_decomposition" # type: ignore[attr-defined]
|
||||||
|
await self._process_pass0b_control(
|
||||||
|
obl, {}, stats, atomic=atomic,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
# Unmatched framework — fall through to normal composition
|
||||||
|
logger.warning(
|
||||||
|
"Framework decomposition unmatched: %s — %s",
|
||||||
|
obl["candidate_id"], result.issues,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Atomic or compound or unmatched framework: normal composition
|
||||||
|
sub_actions = _split_compound_action(obl["action"])
|
||||||
|
for sub_action in sub_actions:
|
||||||
|
atomic = _compose_deterministic(
|
||||||
|
obligation_text=obl["obligation_text"],
|
||||||
|
action=sub_action,
|
||||||
|
object_=obl["object"],
|
||||||
|
parent_title=obl["parent_title"],
|
||||||
|
parent_severity=obl["parent_severity"],
|
||||||
|
parent_category=obl["parent_category"],
|
||||||
|
is_test=obl["is_test"],
|
||||||
|
is_reporting=obl["is_reporting"],
|
||||||
|
trigger_type=obl.get("trigger_type"),
|
||||||
|
condition=obl.get("condition"),
|
||||||
|
)
|
||||||
|
await self._process_pass0b_control(
|
||||||
|
obl, {}, stats, atomic=atomic,
|
||||||
|
)
|
||||||
|
|
||||||
async def _process_pass0b_control(
|
async def _process_pass0b_control(
|
||||||
self, obl: dict, parsed: dict, stats: dict,
|
self, obl: dict, parsed: dict, stats: dict,
|
||||||
atomic: Optional[AtomicControlCandidate] = None,
|
atomic: Optional[AtomicControlCandidate] = None,
|
||||||
@@ -2855,6 +2933,13 @@ class DecompositionPass:
|
|||||||
"deadline_hours": getattr(atomic, "_deadline_hours", None),
|
"deadline_hours": getattr(atomic, "_deadline_hours", None),
|
||||||
"frequency": getattr(atomic, "_frequency", None),
|
"frequency": getattr(atomic, "_frequency", None),
|
||||||
"validation_issues": getattr(atomic, "_validation_issues", []),
|
"validation_issues": getattr(atomic, "_validation_issues", []),
|
||||||
|
"is_composite": getattr(atomic, "_is_composite", False),
|
||||||
|
"atomicity": getattr(atomic, "_atomicity", "atomic"),
|
||||||
|
"requires_decomposition": getattr(atomic, "_requires_decomposition", False),
|
||||||
|
"framework_ref": getattr(atomic, "_framework_ref", None),
|
||||||
|
"framework_domain": getattr(atomic, "_framework_domain", None),
|
||||||
|
"framework_subcontrol_id": getattr(atomic, "_framework_subcontrol_id", None),
|
||||||
|
"decomposition_source": getattr(atomic, "_decomposition_source", "direct"),
|
||||||
}),
|
}),
|
||||||
"framework_id": "14b1bdd2-abc7-4a43-adae-14471ee5c7cf",
|
"framework_id": "14b1bdd2-abc7-4a43-adae-14471ee5c7cf",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -0,0 +1,714 @@
|
|||||||
|
"""Framework Decomposition Engine — decomposes framework-container obligations.
|
||||||
|
|
||||||
|
Sits between Pass 0a (obligation extraction) and Pass 0b (atomic control
|
||||||
|
composition). Detects obligations that reference a framework domain (e.g.
|
||||||
|
"CCM-Praktiken fuer AIS") and decomposes them into concrete sub-obligations
|
||||||
|
using an internal framework registry.
|
||||||
|
|
||||||
|
Three routing types:
|
||||||
|
atomic → pass through to Pass 0b unchanged
|
||||||
|
compound → split compound verbs, then Pass 0b
|
||||||
|
framework_container → decompose via registry, then Pass 0b
|
||||||
|
|
||||||
|
The registry is a set of JSON files under compliance/data/frameworks/.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import uuid
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Registry loading
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
_REGISTRY_DIR = Path(__file__).resolve().parent.parent / "data" / "frameworks"
|
||||||
|
_REGISTRY: dict[str, dict] = {} # framework_id → framework dict
|
||||||
|
|
||||||
|
|
||||||
|
def _load_registry() -> dict[str, dict]:
|
||||||
|
"""Load all framework JSON files from the registry directory."""
|
||||||
|
registry: dict[str, dict] = {}
|
||||||
|
if not _REGISTRY_DIR.is_dir():
|
||||||
|
logger.warning("Framework registry dir not found: %s", _REGISTRY_DIR)
|
||||||
|
return registry
|
||||||
|
|
||||||
|
for fpath in sorted(_REGISTRY_DIR.glob("*.json")):
|
||||||
|
try:
|
||||||
|
with open(fpath, encoding="utf-8") as f:
|
||||||
|
fw = json.load(f)
|
||||||
|
fw_id = fw.get("framework_id", fpath.stem)
|
||||||
|
registry[fw_id] = fw
|
||||||
|
logger.info(
|
||||||
|
"Loaded framework: %s (%d domains)",
|
||||||
|
fw_id,
|
||||||
|
len(fw.get("domains", [])),
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Failed to load framework file: %s", fpath)
|
||||||
|
return registry
|
||||||
|
|
||||||
|
|
||||||
|
def get_registry() -> dict[str, dict]:
|
||||||
|
"""Return the global framework registry (lazy-loaded)."""
|
||||||
|
global _REGISTRY
|
||||||
|
if not _REGISTRY:
|
||||||
|
_REGISTRY = _load_registry()
|
||||||
|
return _REGISTRY
|
||||||
|
|
||||||
|
|
||||||
|
def reload_registry() -> dict[str, dict]:
|
||||||
|
"""Force-reload the framework registry from disk."""
|
||||||
|
global _REGISTRY
|
||||||
|
_REGISTRY = _load_registry()
|
||||||
|
return _REGISTRY
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Framework alias index (built from registry)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _build_alias_index(registry: dict[str, dict]) -> dict[str, str]:
|
||||||
|
"""Build a lowercase alias → framework_id lookup."""
|
||||||
|
idx: dict[str, str] = {}
|
||||||
|
for fw_id, fw in registry.items():
|
||||||
|
# Framework-level aliases
|
||||||
|
idx[fw_id.lower()] = fw_id
|
||||||
|
name = fw.get("display_name", "")
|
||||||
|
if name:
|
||||||
|
idx[name.lower()] = fw_id
|
||||||
|
# Common short forms
|
||||||
|
for part in fw_id.lower().replace("_", " ").split():
|
||||||
|
if len(part) >= 3:
|
||||||
|
idx[part] = fw_id
|
||||||
|
return idx
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Routing — classify obligation type
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Extended patterns for framework detection (beyond the simple _COMPOSITE_RE
|
||||||
|
# in decomposition_pass.py — here we also capture the framework name)
|
||||||
|
_FRAMEWORK_PATTERN = re.compile(
|
||||||
|
r"(?:praktiken|kontrollen|ma(?:ss|ß)nahmen|anforderungen|vorgaben|controls|practices|measures|requirements)"
|
||||||
|
r"\s+(?:f(?:ue|ü)r|aus|gem(?:ae|ä)(?:ss|ß)|nach|from|of|for|per)\s+"
|
||||||
|
r"(.+?)(?:\s+(?:m(?:ue|ü)ssen|sollen|sind|werden|implementieren|umsetzen|einf(?:ue|ü)hren)|\.|,|$)",
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Direct framework name references
|
||||||
|
_DIRECT_FRAMEWORK_RE = re.compile(
|
||||||
|
r"\b(?:CSA\s*CCM|NIST\s*(?:SP\s*)?800-53|OWASP\s*(?:ASVS|SAMM|Top\s*10)"
|
||||||
|
r"|CIS\s*Controls|BSI\s*(?:IT-)?Grundschutz|ENISA|ISO\s*2700[12]"
|
||||||
|
r"|COBIT|SOX|PCI\s*DSS|HITRUST|SOC\s*2|KRITIS)\b",
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Compound verb patterns (multiple main verbs)
|
||||||
|
_COMPOUND_VERB_RE = re.compile(
|
||||||
|
r"\b(?:und|sowie|als\s+auch|or|and)\b",
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
|
||||||
|
# No-split phrases that look compound but aren't
|
||||||
|
_NO_SPLIT_PHRASES = [
|
||||||
|
"pflegen und aufrechterhalten",
|
||||||
|
"dokumentieren und pflegen",
|
||||||
|
"definieren und dokumentieren",
|
||||||
|
"erstellen und freigeben",
|
||||||
|
"pruefen und genehmigen",
|
||||||
|
"identifizieren und bewerten",
|
||||||
|
"erkennen und melden",
|
||||||
|
"define and maintain",
|
||||||
|
"create and maintain",
|
||||||
|
"establish and maintain",
|
||||||
|
"monitor and review",
|
||||||
|
"detect and respond",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class RoutingResult:
|
||||||
|
"""Result of obligation routing classification."""
|
||||||
|
routing_type: str # atomic | compound | framework_container | unknown_review
|
||||||
|
framework_ref: Optional[str] = None
|
||||||
|
framework_domain: Optional[str] = None
|
||||||
|
domain_title: Optional[str] = None
|
||||||
|
confidence: float = 0.0
|
||||||
|
reason: str = ""
|
||||||
|
|
||||||
|
|
||||||
|
def classify_routing(
|
||||||
|
obligation_text: str,
|
||||||
|
action_raw: str,
|
||||||
|
object_raw: str,
|
||||||
|
condition_raw: Optional[str] = None,
|
||||||
|
) -> RoutingResult:
|
||||||
|
"""Classify an obligation into atomic / compound / framework_container."""
|
||||||
|
combined = f"{obligation_text} {object_raw}".lower()
|
||||||
|
|
||||||
|
# --- Step 1: Framework container detection ---
|
||||||
|
fw_result = _detect_framework(obligation_text, object_raw)
|
||||||
|
if fw_result.routing_type == "framework_container":
|
||||||
|
return fw_result
|
||||||
|
|
||||||
|
# --- Step 2: Compound verb detection ---
|
||||||
|
if _is_compound_obligation(action_raw, obligation_text):
|
||||||
|
return RoutingResult(
|
||||||
|
routing_type="compound",
|
||||||
|
confidence=0.7,
|
||||||
|
reason="multiple_main_verbs",
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- Step 3: Default = atomic ---
|
||||||
|
return RoutingResult(
|
||||||
|
routing_type="atomic",
|
||||||
|
confidence=0.9,
|
||||||
|
reason="single_action_single_object",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _detect_framework(
|
||||||
|
obligation_text: str, object_raw: str,
|
||||||
|
) -> RoutingResult:
|
||||||
|
"""Detect if obligation references a framework domain."""
|
||||||
|
combined = f"{obligation_text} {object_raw}"
|
||||||
|
registry = get_registry()
|
||||||
|
alias_idx = _build_alias_index(registry)
|
||||||
|
|
||||||
|
# Strategy 1: direct framework name match
|
||||||
|
m = _DIRECT_FRAMEWORK_RE.search(combined)
|
||||||
|
if m:
|
||||||
|
fw_name = m.group(0).strip()
|
||||||
|
fw_id = _resolve_framework_id(fw_name, alias_idx, registry)
|
||||||
|
if fw_id:
|
||||||
|
domain_id, domain_title = _match_domain(
|
||||||
|
combined, registry[fw_id],
|
||||||
|
)
|
||||||
|
return RoutingResult(
|
||||||
|
routing_type="framework_container",
|
||||||
|
framework_ref=fw_id,
|
||||||
|
framework_domain=domain_id,
|
||||||
|
domain_title=domain_title,
|
||||||
|
confidence=0.95 if domain_id else 0.75,
|
||||||
|
reason=f"direct_framework_match:{fw_name}",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Framework name recognized but not in registry
|
||||||
|
return RoutingResult(
|
||||||
|
routing_type="framework_container",
|
||||||
|
framework_ref=None,
|
||||||
|
framework_domain=None,
|
||||||
|
confidence=0.6,
|
||||||
|
reason=f"direct_framework_match_no_registry:{fw_name}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Strategy 2: pattern match ("Praktiken fuer X")
|
||||||
|
m2 = _FRAMEWORK_PATTERN.search(combined)
|
||||||
|
if m2:
|
||||||
|
ref_text = m2.group(1).strip()
|
||||||
|
fw_id, domain_id, domain_title = _resolve_from_ref_text(
|
||||||
|
ref_text, registry, alias_idx,
|
||||||
|
)
|
||||||
|
if fw_id:
|
||||||
|
return RoutingResult(
|
||||||
|
routing_type="framework_container",
|
||||||
|
framework_ref=fw_id,
|
||||||
|
framework_domain=domain_id,
|
||||||
|
domain_title=domain_title,
|
||||||
|
confidence=0.85 if domain_id else 0.65,
|
||||||
|
reason=f"pattern_match:{ref_text}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Strategy 3: keyword-heavy object
|
||||||
|
if _has_framework_keywords(object_raw):
|
||||||
|
return RoutingResult(
|
||||||
|
routing_type="framework_container",
|
||||||
|
framework_ref=None,
|
||||||
|
framework_domain=None,
|
||||||
|
confidence=0.5,
|
||||||
|
reason="framework_keywords_in_object",
|
||||||
|
)
|
||||||
|
|
||||||
|
return RoutingResult(routing_type="atomic", confidence=0.0)
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_framework_id(
|
||||||
|
name: str,
|
||||||
|
alias_idx: dict[str, str],
|
||||||
|
registry: dict[str, dict],
|
||||||
|
) -> Optional[str]:
|
||||||
|
"""Resolve a framework name to its registry ID."""
|
||||||
|
normalized = re.sub(r"\s+", " ", name.strip().lower())
|
||||||
|
# Direct alias match
|
||||||
|
if normalized in alias_idx:
|
||||||
|
return alias_idx[normalized]
|
||||||
|
# Try compact form (strip spaces, hyphens, underscores)
|
||||||
|
compact = re.sub(r"[\s_\-]+", "", normalized)
|
||||||
|
for alias, fw_id in alias_idx.items():
|
||||||
|
if re.sub(r"[\s_\-]+", "", alias) == compact:
|
||||||
|
return fw_id
|
||||||
|
# Substring match in display names
|
||||||
|
for fw_id, fw in registry.items():
|
||||||
|
display = fw.get("display_name", "").lower()
|
||||||
|
if normalized in display or display in normalized:
|
||||||
|
return fw_id
|
||||||
|
# Partial match: check if normalized contains any alias (for multi-word refs)
|
||||||
|
for alias, fw_id in alias_idx.items():
|
||||||
|
if len(alias) >= 4 and alias in normalized:
|
||||||
|
return fw_id
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _match_domain(
|
||||||
|
text: str, framework: dict,
|
||||||
|
) -> tuple[Optional[str], Optional[str]]:
|
||||||
|
"""Match a domain within a framework from text references."""
|
||||||
|
text_lower = text.lower()
|
||||||
|
best_id: Optional[str] = None
|
||||||
|
best_title: Optional[str] = None
|
||||||
|
best_score = 0
|
||||||
|
|
||||||
|
for domain in framework.get("domains", []):
|
||||||
|
score = 0
|
||||||
|
domain_id = domain["domain_id"]
|
||||||
|
title = domain.get("title", "")
|
||||||
|
|
||||||
|
# Exact domain ID match (e.g. "AIS")
|
||||||
|
if re.search(rf"\b{re.escape(domain_id)}\b", text, re.IGNORECASE):
|
||||||
|
score += 10
|
||||||
|
|
||||||
|
# Full title match
|
||||||
|
if title.lower() in text_lower:
|
||||||
|
score += 8
|
||||||
|
|
||||||
|
# Alias match
|
||||||
|
for alias in domain.get("aliases", []):
|
||||||
|
if alias.lower() in text_lower:
|
||||||
|
score += 6
|
||||||
|
break
|
||||||
|
|
||||||
|
# Keyword overlap
|
||||||
|
kw_hits = sum(
|
||||||
|
1 for kw in domain.get("keywords", [])
|
||||||
|
if kw.lower() in text_lower
|
||||||
|
)
|
||||||
|
score += kw_hits
|
||||||
|
|
||||||
|
if score > best_score:
|
||||||
|
best_score = score
|
||||||
|
best_id = domain_id
|
||||||
|
best_title = title
|
||||||
|
|
||||||
|
if best_score >= 3:
|
||||||
|
return best_id, best_title
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_from_ref_text(
|
||||||
|
ref_text: str,
|
||||||
|
registry: dict[str, dict],
|
||||||
|
alias_idx: dict[str, str],
|
||||||
|
) -> tuple[Optional[str], Optional[str], Optional[str]]:
|
||||||
|
"""Resolve framework + domain from a reference text like 'AIS' or 'Application Security'."""
|
||||||
|
ref_lower = ref_text.lower()
|
||||||
|
|
||||||
|
for fw_id, fw in registry.items():
|
||||||
|
for domain in fw.get("domains", []):
|
||||||
|
# Check domain ID
|
||||||
|
if domain["domain_id"].lower() in ref_lower:
|
||||||
|
return fw_id, domain["domain_id"], domain.get("title")
|
||||||
|
# Check title
|
||||||
|
if domain.get("title", "").lower() in ref_lower:
|
||||||
|
return fw_id, domain["domain_id"], domain.get("title")
|
||||||
|
# Check aliases
|
||||||
|
for alias in domain.get("aliases", []):
|
||||||
|
if alias.lower() in ref_lower or ref_lower in alias.lower():
|
||||||
|
return fw_id, domain["domain_id"], domain.get("title")
|
||||||
|
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
|
||||||
|
_FRAMEWORK_KW_SET = {
|
||||||
|
"praktiken", "kontrollen", "massnahmen", "maßnahmen",
|
||||||
|
"anforderungen", "vorgaben", "framework", "standard",
|
||||||
|
"baseline", "katalog", "domain", "family", "category",
|
||||||
|
"practices", "controls", "measures", "requirements",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _has_framework_keywords(text: str) -> bool:
|
||||||
|
"""Check if text contains framework-indicator keywords."""
|
||||||
|
words = set(re.findall(r"[a-zäöüß]+", text.lower()))
|
||||||
|
return len(words & _FRAMEWORK_KW_SET) >= 2
|
||||||
|
|
||||||
|
|
||||||
|
def _is_compound_obligation(action_raw: str, obligation_text: str) -> bool:
|
||||||
|
"""Detect if the obligation has multiple competing main verbs."""
|
||||||
|
if not action_raw:
|
||||||
|
return False
|
||||||
|
|
||||||
|
action_lower = action_raw.lower().strip()
|
||||||
|
|
||||||
|
# Check no-split phrases first
|
||||||
|
for phrase in _NO_SPLIT_PHRASES:
|
||||||
|
if phrase in action_lower:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Must have a conjunction
|
||||||
|
if not _COMPOUND_VERB_RE.search(action_lower):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Split by conjunctions and check if we get 2+ meaningful verbs
|
||||||
|
parts = re.split(r"\b(?:und|sowie|als\s+auch|or|and)\b", action_lower)
|
||||||
|
meaningful = [p.strip() for p in parts if len(p.strip()) >= 3]
|
||||||
|
return len(meaningful) >= 2
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Framework Decomposition
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DecomposedObligation:
|
||||||
|
"""A concrete obligation derived from a framework container."""
|
||||||
|
obligation_candidate_id: str
|
||||||
|
parent_control_id: str
|
||||||
|
parent_framework_container_id: str
|
||||||
|
source_ref_law: str
|
||||||
|
source_ref_article: str
|
||||||
|
obligation_text: str
|
||||||
|
actor: str
|
||||||
|
action_raw: str
|
||||||
|
object_raw: str
|
||||||
|
condition_raw: Optional[str] = None
|
||||||
|
trigger_raw: Optional[str] = None
|
||||||
|
routing_type: str = "atomic"
|
||||||
|
release_state: str = "decomposed"
|
||||||
|
subcontrol_id: str = ""
|
||||||
|
# Metadata
|
||||||
|
action_hint: str = ""
|
||||||
|
object_hint: str = ""
|
||||||
|
object_class: str = ""
|
||||||
|
keywords: list[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FrameworkDecompositionResult:
|
||||||
|
"""Result of framework decomposition."""
|
||||||
|
framework_container_id: str
|
||||||
|
source_obligation_candidate_id: str
|
||||||
|
framework_ref: Optional[str]
|
||||||
|
framework_domain: Optional[str]
|
||||||
|
domain_title: Optional[str]
|
||||||
|
matched_subcontrols: list[str]
|
||||||
|
decomposition_confidence: float
|
||||||
|
release_state: str # decomposed | unmatched | error
|
||||||
|
decomposed_obligations: list[DecomposedObligation]
|
||||||
|
issues: list[str]
|
||||||
|
|
||||||
|
|
||||||
|
def decompose_framework_container(
|
||||||
|
obligation_candidate_id: str,
|
||||||
|
parent_control_id: str,
|
||||||
|
obligation_text: str,
|
||||||
|
framework_ref: Optional[str],
|
||||||
|
framework_domain: Optional[str],
|
||||||
|
actor: str = "organization",
|
||||||
|
) -> FrameworkDecompositionResult:
|
||||||
|
"""Decompose a framework-container obligation into concrete sub-obligations.
|
||||||
|
|
||||||
|
Steps:
|
||||||
|
1. Resolve framework from registry
|
||||||
|
2. Resolve domain within framework
|
||||||
|
3. Select relevant subcontrols (keyword filter or full domain)
|
||||||
|
4. Generate decomposed obligations
|
||||||
|
"""
|
||||||
|
container_id = f"FWC-{uuid.uuid4().hex[:8]}"
|
||||||
|
registry = get_registry()
|
||||||
|
issues: list[str] = []
|
||||||
|
|
||||||
|
# Step 1: Resolve framework
|
||||||
|
fw = None
|
||||||
|
if framework_ref and framework_ref in registry:
|
||||||
|
fw = registry[framework_ref]
|
||||||
|
else:
|
||||||
|
# Try to find by name in text
|
||||||
|
fw, framework_ref = _find_framework_in_text(obligation_text, registry)
|
||||||
|
|
||||||
|
if not fw:
|
||||||
|
issues.append("ERROR: framework_not_matched")
|
||||||
|
return FrameworkDecompositionResult(
|
||||||
|
framework_container_id=container_id,
|
||||||
|
source_obligation_candidate_id=obligation_candidate_id,
|
||||||
|
framework_ref=framework_ref,
|
||||||
|
framework_domain=framework_domain,
|
||||||
|
domain_title=None,
|
||||||
|
matched_subcontrols=[],
|
||||||
|
decomposition_confidence=0.0,
|
||||||
|
release_state="unmatched",
|
||||||
|
decomposed_obligations=[],
|
||||||
|
issues=issues,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 2: Resolve domain
|
||||||
|
domain_data = None
|
||||||
|
domain_title = None
|
||||||
|
if framework_domain:
|
||||||
|
for d in fw.get("domains", []):
|
||||||
|
if d["domain_id"].lower() == framework_domain.lower():
|
||||||
|
domain_data = d
|
||||||
|
domain_title = d.get("title")
|
||||||
|
break
|
||||||
|
if not domain_data:
|
||||||
|
# Try matching from text
|
||||||
|
domain_id, domain_title = _match_domain(obligation_text, fw)
|
||||||
|
if domain_id:
|
||||||
|
for d in fw.get("domains", []):
|
||||||
|
if d["domain_id"] == domain_id:
|
||||||
|
domain_data = d
|
||||||
|
framework_domain = domain_id
|
||||||
|
break
|
||||||
|
|
||||||
|
if not domain_data:
|
||||||
|
issues.append("WARN: domain_not_matched — using all domains")
|
||||||
|
# Fall back to all subcontrols across all domains
|
||||||
|
all_subcontrols = []
|
||||||
|
for d in fw.get("domains", []):
|
||||||
|
for sc in d.get("subcontrols", []):
|
||||||
|
sc["_domain_id"] = d["domain_id"]
|
||||||
|
all_subcontrols.append(sc)
|
||||||
|
subcontrols = _select_subcontrols(obligation_text, all_subcontrols)
|
||||||
|
if not subcontrols:
|
||||||
|
issues.append("ERROR: no_subcontrols_matched")
|
||||||
|
return FrameworkDecompositionResult(
|
||||||
|
framework_container_id=container_id,
|
||||||
|
source_obligation_candidate_id=obligation_candidate_id,
|
||||||
|
framework_ref=framework_ref,
|
||||||
|
framework_domain=framework_domain,
|
||||||
|
domain_title=None,
|
||||||
|
matched_subcontrols=[],
|
||||||
|
decomposition_confidence=0.0,
|
||||||
|
release_state="unmatched",
|
||||||
|
decomposed_obligations=[],
|
||||||
|
issues=issues,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Step 3: Select subcontrols from domain
|
||||||
|
raw_subcontrols = domain_data.get("subcontrols", [])
|
||||||
|
subcontrols = _select_subcontrols(obligation_text, raw_subcontrols)
|
||||||
|
if not subcontrols:
|
||||||
|
# Full domain decomposition
|
||||||
|
subcontrols = raw_subcontrols
|
||||||
|
|
||||||
|
# Quality check: too many subcontrols
|
||||||
|
if len(subcontrols) > 25:
|
||||||
|
issues.append(f"WARN: {len(subcontrols)} subcontrols — may be too broad")
|
||||||
|
|
||||||
|
# Step 4: Generate decomposed obligations
|
||||||
|
display_name = fw.get("display_name", framework_ref or "Unknown")
|
||||||
|
decomposed: list[DecomposedObligation] = []
|
||||||
|
matched_ids: list[str] = []
|
||||||
|
|
||||||
|
for sc in subcontrols:
|
||||||
|
sc_id = sc.get("subcontrol_id", "")
|
||||||
|
matched_ids.append(sc_id)
|
||||||
|
|
||||||
|
action_hint = sc.get("action_hint", "")
|
||||||
|
object_hint = sc.get("object_hint", "")
|
||||||
|
|
||||||
|
# Quality warnings
|
||||||
|
if not action_hint:
|
||||||
|
issues.append(f"WARN: {sc_id} missing action_hint")
|
||||||
|
if not object_hint:
|
||||||
|
issues.append(f"WARN: {sc_id} missing object_hint")
|
||||||
|
|
||||||
|
obl_id = f"{obligation_candidate_id}-{sc_id}"
|
||||||
|
|
||||||
|
decomposed.append(DecomposedObligation(
|
||||||
|
obligation_candidate_id=obl_id,
|
||||||
|
parent_control_id=parent_control_id,
|
||||||
|
parent_framework_container_id=container_id,
|
||||||
|
source_ref_law=display_name,
|
||||||
|
source_ref_article=sc_id,
|
||||||
|
obligation_text=sc.get("statement", ""),
|
||||||
|
actor=actor,
|
||||||
|
action_raw=action_hint or _infer_action(sc.get("statement", "")),
|
||||||
|
object_raw=object_hint or _infer_object(sc.get("statement", "")),
|
||||||
|
routing_type="atomic",
|
||||||
|
release_state="decomposed",
|
||||||
|
subcontrol_id=sc_id,
|
||||||
|
action_hint=action_hint,
|
||||||
|
object_hint=object_hint,
|
||||||
|
object_class=sc.get("object_class", ""),
|
||||||
|
keywords=sc.get("keywords", []),
|
||||||
|
))
|
||||||
|
|
||||||
|
# Check if decomposed are identical to container
|
||||||
|
for d in decomposed:
|
||||||
|
if d.obligation_text.strip() == obligation_text.strip():
|
||||||
|
issues.append(f"WARN: {d.subcontrol_id} identical to container text")
|
||||||
|
|
||||||
|
confidence = _compute_decomposition_confidence(
|
||||||
|
framework_ref, framework_domain, domain_data, len(subcontrols), issues,
|
||||||
|
)
|
||||||
|
|
||||||
|
return FrameworkDecompositionResult(
|
||||||
|
framework_container_id=container_id,
|
||||||
|
source_obligation_candidate_id=obligation_candidate_id,
|
||||||
|
framework_ref=framework_ref,
|
||||||
|
framework_domain=framework_domain,
|
||||||
|
domain_title=domain_title,
|
||||||
|
matched_subcontrols=matched_ids,
|
||||||
|
decomposition_confidence=confidence,
|
||||||
|
release_state="decomposed",
|
||||||
|
decomposed_obligations=decomposed,
|
||||||
|
issues=issues,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_framework_in_text(
|
||||||
|
text: str, registry: dict[str, dict],
|
||||||
|
) -> tuple[Optional[dict], Optional[str]]:
|
||||||
|
"""Try to find a framework by searching text for known names."""
|
||||||
|
alias_idx = _build_alias_index(registry)
|
||||||
|
m = _DIRECT_FRAMEWORK_RE.search(text)
|
||||||
|
if m:
|
||||||
|
fw_id = _resolve_framework_id(m.group(0), alias_idx, registry)
|
||||||
|
if fw_id and fw_id in registry:
|
||||||
|
return registry[fw_id], fw_id
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
def _select_subcontrols(
|
||||||
|
obligation_text: str, subcontrols: list[dict],
|
||||||
|
) -> list[dict]:
|
||||||
|
"""Select relevant subcontrols based on keyword matching.
|
||||||
|
|
||||||
|
Returns empty list if no targeted match found (caller falls back to
|
||||||
|
full domain).
|
||||||
|
"""
|
||||||
|
text_lower = obligation_text.lower()
|
||||||
|
scored: list[tuple[int, dict]] = []
|
||||||
|
|
||||||
|
for sc in subcontrols:
|
||||||
|
score = 0
|
||||||
|
for kw in sc.get("keywords", []):
|
||||||
|
if kw.lower() in text_lower:
|
||||||
|
score += 1
|
||||||
|
# Title match
|
||||||
|
title = sc.get("title", "").lower()
|
||||||
|
if title and title in text_lower:
|
||||||
|
score += 3
|
||||||
|
# Object hint in text
|
||||||
|
obj = sc.get("object_hint", "").lower()
|
||||||
|
if obj and obj in text_lower:
|
||||||
|
score += 2
|
||||||
|
|
||||||
|
if score > 0:
|
||||||
|
scored.append((score, sc))
|
||||||
|
|
||||||
|
if not scored:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Only return those with meaningful overlap (score >= 2)
|
||||||
|
scored.sort(key=lambda x: x[0], reverse=True)
|
||||||
|
return [sc for score, sc in scored if score >= 2]
|
||||||
|
|
||||||
|
|
||||||
|
def _infer_action(statement: str) -> str:
|
||||||
|
"""Infer a basic action verb from a statement."""
|
||||||
|
s = statement.lower()
|
||||||
|
if any(w in s for w in ["definiert", "definieren", "define"]):
|
||||||
|
return "definieren"
|
||||||
|
if any(w in s for w in ["implementiert", "implementieren", "implement"]):
|
||||||
|
return "implementieren"
|
||||||
|
if any(w in s for w in ["dokumentiert", "dokumentieren", "document"]):
|
||||||
|
return "dokumentieren"
|
||||||
|
if any(w in s for w in ["ueberwacht", "ueberwachen", "monitor"]):
|
||||||
|
return "ueberwachen"
|
||||||
|
if any(w in s for w in ["getestet", "testen", "test"]):
|
||||||
|
return "testen"
|
||||||
|
if any(w in s for w in ["geschuetzt", "schuetzen", "protect"]):
|
||||||
|
return "implementieren"
|
||||||
|
if any(w in s for w in ["verwaltet", "verwalten", "manage"]):
|
||||||
|
return "pflegen"
|
||||||
|
if any(w in s for w in ["gemeldet", "melden", "report"]):
|
||||||
|
return "melden"
|
||||||
|
return "implementieren"
|
||||||
|
|
||||||
|
|
||||||
|
def _infer_object(statement: str) -> str:
|
||||||
|
"""Infer the primary object from a statement (first noun phrase)."""
|
||||||
|
# Simple heuristic: take the text after "muessen"/"muss" up to the verb
|
||||||
|
m = re.search(
|
||||||
|
r"(?:muessen|muss|m(?:ü|ue)ssen)\s+(.+?)(?:\s+werden|\s+sein|\.|,|$)",
|
||||||
|
statement,
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
if m:
|
||||||
|
return m.group(1).strip()[:80]
|
||||||
|
# Fallback: first 80 chars
|
||||||
|
return statement[:80] if statement else ""
|
||||||
|
|
||||||
|
|
||||||
|
def _compute_decomposition_confidence(
|
||||||
|
framework_ref: Optional[str],
|
||||||
|
domain: Optional[str],
|
||||||
|
domain_data: Optional[dict],
|
||||||
|
num_subcontrols: int,
|
||||||
|
issues: list[str],
|
||||||
|
) -> float:
|
||||||
|
"""Compute confidence score for the decomposition."""
|
||||||
|
score = 0.3
|
||||||
|
if framework_ref:
|
||||||
|
score += 0.25
|
||||||
|
if domain:
|
||||||
|
score += 0.20
|
||||||
|
if domain_data:
|
||||||
|
score += 0.10
|
||||||
|
if 1 <= num_subcontrols <= 15:
|
||||||
|
score += 0.10
|
||||||
|
elif num_subcontrols > 15:
|
||||||
|
score += 0.05 # less confident with too many
|
||||||
|
|
||||||
|
# Penalize errors
|
||||||
|
errors = sum(1 for i in issues if i.startswith("ERROR:"))
|
||||||
|
score -= errors * 0.15
|
||||||
|
return round(max(min(score, 1.0), 0.0), 2)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Registry statistics (for admin/debugging)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def registry_stats() -> dict:
|
||||||
|
"""Return summary statistics about the loaded registry."""
|
||||||
|
reg = get_registry()
|
||||||
|
stats = {
|
||||||
|
"frameworks": len(reg),
|
||||||
|
"details": [],
|
||||||
|
}
|
||||||
|
total_domains = 0
|
||||||
|
total_subcontrols = 0
|
||||||
|
for fw_id, fw in reg.items():
|
||||||
|
domains = fw.get("domains", [])
|
||||||
|
n_sc = sum(len(d.get("subcontrols", [])) for d in domains)
|
||||||
|
total_domains += len(domains)
|
||||||
|
total_subcontrols += n_sc
|
||||||
|
stats["details"].append({
|
||||||
|
"framework_id": fw_id,
|
||||||
|
"display_name": fw.get("display_name", ""),
|
||||||
|
"domains": len(domains),
|
||||||
|
"subcontrols": n_sc,
|
||||||
|
})
|
||||||
|
stats["total_domains"] = total_domains
|
||||||
|
stats["total_subcontrols"] = total_subcontrols
|
||||||
|
return stats
|
||||||
59
backend-compliance/compliance/services/normative_patterns.py
Normal file
59
backend-compliance/compliance/services/normative_patterns.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
"""Shared normative language patterns for assertion classification.
|
||||||
|
|
||||||
|
Extracted from decomposition_pass.py for reuse in the assertion engine.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
_PFLICHT_SIGNALS = [
|
||||||
|
r"\bmüssen\b", r"\bmuss\b", r"\bhat\s+sicherzustellen\b",
|
||||||
|
r"\bhaben\s+sicherzustellen\b", r"\bsind\s+verpflichtet\b",
|
||||||
|
r"\bist\s+verpflichtet\b",
|
||||||
|
r"\bist\s+zu\s+\w+en\b", r"\bsind\s+zu\s+\w+en\b",
|
||||||
|
r"\bhat\s+zu\s+\w+en\b", r"\bhaben\s+zu\s+\w+en\b",
|
||||||
|
r"\bist\s+\w+zu\w+en\b", r"\bsind\s+\w+zu\w+en\b",
|
||||||
|
r"\bist\s+\w+\s+zu\s+\w+en\b", r"\bsind\s+\w+\s+zu\s+\w+en\b",
|
||||||
|
r"\bhat\s+\w+\s+zu\s+\w+en\b", r"\bhaben\s+\w+\s+zu\s+\w+en\b",
|
||||||
|
r"\bshall\b", r"\bmust\b", r"\brequired\b",
|
||||||
|
r"\b\w+zuteilen\b", r"\b\w+zuwenden\b", r"\b\w+zustellen\b", r"\b\w+zulegen\b",
|
||||||
|
r"\b\w+zunehmen\b", r"\b\w+zuführen\b", r"\b\w+zuhalten\b", r"\b\w+zusetzen\b",
|
||||||
|
r"\b\w+zuweisen\b", r"\b\w+zuordnen\b", r"\b\w+zufügen\b", r"\b\w+zugeben\b",
|
||||||
|
r"\bist\b.{1,80}\bzu\s+\w+en\b", r"\bsind\b.{1,80}\bzu\s+\w+en\b",
|
||||||
|
]
|
||||||
|
PFLICHT_RE = re.compile("|".join(_PFLICHT_SIGNALS), re.IGNORECASE)
|
||||||
|
|
||||||
|
_EMPFEHLUNG_SIGNALS = [
|
||||||
|
r"\bsoll\b", r"\bsollen\b", r"\bsollte\b", r"\bsollten\b",
|
||||||
|
r"\bgewährleisten\b", r"\bsicherstellen\b",
|
||||||
|
r"\bshould\b", r"\bensure\b", r"\brecommend\w*\b",
|
||||||
|
r"\bnachweisen\b", r"\beinhalten\b", r"\bunterlassen\b", r"\bwahren\b",
|
||||||
|
r"\bdokumentieren\b", r"\bimplementieren\b", r"\büberprüfen\b", r"\büberwachen\b",
|
||||||
|
r"\bprüfen,\s+ob\b", r"\bkontrollieren,\s+ob\b",
|
||||||
|
]
|
||||||
|
EMPFEHLUNG_RE = re.compile("|".join(_EMPFEHLUNG_SIGNALS), re.IGNORECASE)
|
||||||
|
|
||||||
|
_KANN_SIGNALS = [
|
||||||
|
r"\bkann\b", r"\bkönnen\b", r"\bdarf\b", r"\bdürfen\b",
|
||||||
|
r"\bmay\b", r"\boptional\b",
|
||||||
|
]
|
||||||
|
KANN_RE = re.compile("|".join(_KANN_SIGNALS), re.IGNORECASE)
|
||||||
|
|
||||||
|
NORMATIVE_RE = re.compile(
|
||||||
|
"|".join(_PFLICHT_SIGNALS + _EMPFEHLUNG_SIGNALS + _KANN_SIGNALS),
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
|
||||||
|
_RATIONALE_SIGNALS = [
|
||||||
|
r"\bda\s+", r"\bweil\b", r"\bgrund\b", r"\berwägung",
|
||||||
|
r"\bbecause\b", r"\breason\b", r"\brationale\b",
|
||||||
|
r"\bkönnen\s+.*\s+verursachen\b", r"\bführt\s+zu\b",
|
||||||
|
]
|
||||||
|
RATIONALE_RE = re.compile("|".join(_RATIONALE_SIGNALS), re.IGNORECASE)
|
||||||
|
|
||||||
|
# Evidence-related keywords (for fact detection)
|
||||||
|
_EVIDENCE_KEYWORDS = [
|
||||||
|
r"\bnachweis\b", r"\bzertifikat\b", r"\baudit.report\b",
|
||||||
|
r"\bprotokoll\b", r"\bdokumentation\b", r"\bbericht\b",
|
||||||
|
r"\bcertificate\b", r"\bevidence\b", r"\bproof\b",
|
||||||
|
]
|
||||||
|
EVIDENCE_RE = re.compile("|".join(_EVIDENCE_KEYWORDS), re.IGNORECASE)
|
||||||
125
backend-compliance/migrations/076_anti_fake_evidence.sql
Normal file
125
backend-compliance/migrations/076_anti_fake_evidence.sql
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
-- Migration 076: Anti-Fake-Evidence Guardrails (Phase 1)
|
||||||
|
--
|
||||||
|
-- Prevents "Compliance-Theater": generated content passed off as real evidence,
|
||||||
|
-- controls without evidence marked as "pass", unvalidated 100% compliance claims.
|
||||||
|
--
|
||||||
|
-- Changes:
|
||||||
|
-- 1. New ENUM types for evidence confidence + truth status
|
||||||
|
-- 2. New columns on compliance_evidence (confidence, truth, review tracking)
|
||||||
|
-- 3. New value 'in_progress' for controlstatusenum
|
||||||
|
-- 4. status_justification column on compliance_controls
|
||||||
|
-- 5. New table compliance_llm_generation_audit
|
||||||
|
-- 6. Backfill existing evidence based on source
|
||||||
|
-- 7. Indexes on new columns
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- 1. New ENUM types
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- NOTE: CREATE TYPE cannot run inside a transaction block when combined with
|
||||||
|
-- ALTER TYPE ... ADD VALUE. Each statement here is auto-committed separately
|
||||||
|
-- when executed outside a transaction (which is the default for psql scripts).
|
||||||
|
|
||||||
|
CREATE TYPE evidence_confidence_level AS ENUM (
|
||||||
|
'E0', -- Generated / no real evidence (LLM output, placeholder)
|
||||||
|
'E1', -- Uploaded but unreviewed (manual upload, no hash, no reviewer)
|
||||||
|
'E2', -- Reviewed internally (human reviewed, hash verified)
|
||||||
|
'E3', -- Observed by system (CI/CD pipeline, API with hash)
|
||||||
|
'E4' -- Validated by external auditor
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TYPE evidence_truth_status AS ENUM (
|
||||||
|
'generated', -- Created by LLM / system generation
|
||||||
|
'uploaded', -- Manually uploaded by user
|
||||||
|
'observed', -- Automatically observed (CI/CD, monitoring)
|
||||||
|
'validated_internal', -- Reviewed + approved by internal reviewer
|
||||||
|
'rejected', -- Reviewed and rejected
|
||||||
|
'provided_to_auditor', -- Shared with external auditor
|
||||||
|
'accepted_by_auditor' -- Accepted by external auditor
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- 2. Add 'in_progress' to controlstatusenum
|
||||||
|
-- ============================================================================
|
||||||
|
-- ALTER TYPE ... ADD VALUE cannot run inside a transaction.
|
||||||
|
|
||||||
|
ALTER TYPE controlstatusenum ADD VALUE IF NOT EXISTS 'in_progress';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- 3. New columns on compliance_evidence
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
ALTER TABLE compliance_evidence
|
||||||
|
ADD COLUMN IF NOT EXISTS confidence_level evidence_confidence_level DEFAULT 'E1',
|
||||||
|
ADD COLUMN IF NOT EXISTS truth_status evidence_truth_status DEFAULT 'uploaded',
|
||||||
|
ADD COLUMN IF NOT EXISTS generation_mode VARCHAR(100),
|
||||||
|
ADD COLUMN IF NOT EXISTS may_be_used_as_evidence BOOLEAN DEFAULT TRUE,
|
||||||
|
ADD COLUMN IF NOT EXISTS reviewed_by VARCHAR(200),
|
||||||
|
ADD COLUMN IF NOT EXISTS reviewed_at TIMESTAMPTZ;
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- 4. status_justification on compliance_controls
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
ALTER TABLE compliance_controls
|
||||||
|
ADD COLUMN IF NOT EXISTS status_justification TEXT;
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- 5. LLM Generation Audit table
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS compliance_llm_generation_audit (
|
||||||
|
id VARCHAR(36) PRIMARY KEY DEFAULT gen_random_uuid()::text,
|
||||||
|
tenant_id VARCHAR(36),
|
||||||
|
entity_type VARCHAR(50) NOT NULL, -- 'evidence', 'control', 'document', ...
|
||||||
|
entity_id VARCHAR(36), -- FK to the generated entity
|
||||||
|
generation_mode VARCHAR(100) NOT NULL, -- 'draft_assistance', 'auto_generation', ...
|
||||||
|
truth_status evidence_truth_status NOT NULL DEFAULT 'generated',
|
||||||
|
may_be_used_as_evidence BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
llm_model VARCHAR(100),
|
||||||
|
llm_provider VARCHAR(50), -- 'ollama', 'anthropic', ...
|
||||||
|
prompt_hash VARCHAR(64), -- SHA-256 of the prompt
|
||||||
|
input_summary TEXT, -- Truncated input for auditability
|
||||||
|
output_summary TEXT, -- Truncated output for auditability
|
||||||
|
metadata JSONB DEFAULT '{}'::jsonb,
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- 6. Backfill existing evidence based on source
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- CI pipeline evidence → E3 + observed
|
||||||
|
UPDATE compliance_evidence
|
||||||
|
SET confidence_level = 'E3',
|
||||||
|
truth_status = 'observed'
|
||||||
|
WHERE source = 'ci_pipeline'
|
||||||
|
AND confidence_level = 'E1';
|
||||||
|
|
||||||
|
-- API evidence → E3 + observed
|
||||||
|
UPDATE compliance_evidence
|
||||||
|
SET confidence_level = 'E3',
|
||||||
|
truth_status = 'observed'
|
||||||
|
WHERE source = 'api'
|
||||||
|
AND confidence_level = 'E1';
|
||||||
|
|
||||||
|
-- Manual/upload evidence stays at E1 + uploaded (default)
|
||||||
|
|
||||||
|
-- Generated evidence → E0 + generated
|
||||||
|
UPDATE compliance_evidence
|
||||||
|
SET confidence_level = 'E0',
|
||||||
|
truth_status = 'generated',
|
||||||
|
may_be_used_as_evidence = FALSE
|
||||||
|
WHERE source = 'generated'
|
||||||
|
AND confidence_level = 'E1';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- 7. Indexes
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_evidence_confidence ON compliance_evidence (confidence_level);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_evidence_truth_status ON compliance_evidence (truth_status);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_evidence_may_be_used ON compliance_evidence (may_be_used_as_evidence);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_llm_audit_entity ON compliance_llm_generation_audit (entity_type, entity_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_llm_audit_tenant ON compliance_llm_generation_audit (tenant_id);
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
-- Migration 077: Anti-Fake-Evidence Phase 2
|
||||||
|
-- Assertions table, Four-Eyes columns on Evidence, Audit-Trail performance index
|
||||||
|
|
||||||
|
-- 1A. Assertions table
|
||||||
|
CREATE TABLE IF NOT EXISTS compliance_assertions (
|
||||||
|
id VARCHAR(36) PRIMARY KEY DEFAULT gen_random_uuid()::text,
|
||||||
|
tenant_id VARCHAR(36),
|
||||||
|
entity_type VARCHAR(50) NOT NULL,
|
||||||
|
entity_id VARCHAR(36) NOT NULL,
|
||||||
|
sentence_text TEXT NOT NULL,
|
||||||
|
sentence_index INTEGER NOT NULL DEFAULT 0,
|
||||||
|
assertion_type VARCHAR(20) NOT NULL DEFAULT 'assertion',
|
||||||
|
evidence_ids JSONB DEFAULT '[]'::jsonb,
|
||||||
|
confidence FLOAT DEFAULT 0.0,
|
||||||
|
normative_tier VARCHAR(20),
|
||||||
|
verified_by VARCHAR(200),
|
||||||
|
verified_at TIMESTAMPTZ,
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_assertion_entity ON compliance_assertions (entity_type, entity_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_assertion_type ON compliance_assertions (assertion_type);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_assertion_tenant ON compliance_assertions (tenant_id);
|
||||||
|
|
||||||
|
-- 1B. Four-Eyes columns on Evidence
|
||||||
|
ALTER TABLE compliance_evidence
|
||||||
|
ADD COLUMN IF NOT EXISTS approval_status VARCHAR(30) DEFAULT 'none',
|
||||||
|
ADD COLUMN IF NOT EXISTS first_reviewer VARCHAR(200),
|
||||||
|
ADD COLUMN IF NOT EXISTS first_reviewed_at TIMESTAMPTZ,
|
||||||
|
ADD COLUMN IF NOT EXISTS second_reviewer VARCHAR(200),
|
||||||
|
ADD COLUMN IF NOT EXISTS second_reviewed_at TIMESTAMPTZ,
|
||||||
|
ADD COLUMN IF NOT EXISTS requires_four_eyes BOOLEAN DEFAULT FALSE;
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_evidence_approval_status ON compliance_evidence (approval_status);
|
||||||
|
|
||||||
|
-- 1C. Audit-Trail performance index
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_audit_trail_entity_action
|
||||||
|
ON compliance_audit_trail (entity_type, action, performed_at);
|
||||||
562
backend-compliance/tests/test_anti_fake_evidence.py
Normal file
562
backend-compliance/tests/test_anti_fake_evidence.py
Normal file
@@ -0,0 +1,562 @@
|
|||||||
|
"""Tests for Anti-Fake-Evidence Phase 1 guardrails.
|
||||||
|
|
||||||
|
~45 tests covering:
|
||||||
|
- Evidence confidence classification
|
||||||
|
- Evidence truth status classification
|
||||||
|
- Control status transition state machine
|
||||||
|
- Multi-dimensional compliance score
|
||||||
|
- LLM generation audit
|
||||||
|
- Evidence review endpoint
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.testclient import TestClient
|
||||||
|
|
||||||
|
from compliance.api.evidence_routes import router as evidence_router
|
||||||
|
from compliance.api.llm_audit_routes import router as llm_audit_router
|
||||||
|
from compliance.api.evidence_routes import _classify_confidence, _classify_truth_status
|
||||||
|
from compliance.services.control_status_machine import validate_transition
|
||||||
|
from compliance.db.models import (
|
||||||
|
EvidenceConfidenceEnum,
|
||||||
|
EvidenceTruthStatusEnum,
|
||||||
|
ControlStatusEnum,
|
||||||
|
)
|
||||||
|
from classroom_engine.database import get_db
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# App setup with mocked DB dependency
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
app.include_router(evidence_router)
|
||||||
|
app.include_router(llm_audit_router, prefix="/compliance")
|
||||||
|
|
||||||
|
mock_db = MagicMock()
|
||||||
|
|
||||||
|
|
||||||
|
def override_get_db():
|
||||||
|
yield mock_db
|
||||||
|
|
||||||
|
|
||||||
|
app.dependency_overrides[get_db] = override_get_db
|
||||||
|
client = TestClient(app)
|
||||||
|
|
||||||
|
EVIDENCE_UUID = "eeeeeeee-aaaa-bbbb-cccc-ffffffffffff"
|
||||||
|
CONTROL_UUID = "cccccccc-aaaa-bbbb-cccc-dddddddddddd"
|
||||||
|
NOW = datetime(2026, 3, 23, 12, 0, 0)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def make_evidence(overrides=None):
|
||||||
|
e = MagicMock()
|
||||||
|
e.id = EVIDENCE_UUID
|
||||||
|
e.control_id = CONTROL_UUID
|
||||||
|
e.evidence_type = "test_results"
|
||||||
|
e.title = "Pytest Test Report"
|
||||||
|
e.description = "All tests passing"
|
||||||
|
e.artifact_url = "https://ci.example.com/job/123/artifact"
|
||||||
|
e.artifact_path = None
|
||||||
|
e.artifact_hash = "abc123def456"
|
||||||
|
e.file_size_bytes = None
|
||||||
|
e.mime_type = None
|
||||||
|
e.status = MagicMock()
|
||||||
|
e.status.value = "valid"
|
||||||
|
e.uploaded_by = None
|
||||||
|
e.source = "ci_pipeline"
|
||||||
|
e.ci_job_id = "job-123"
|
||||||
|
e.valid_from = NOW
|
||||||
|
e.valid_until = NOW + timedelta(days=90)
|
||||||
|
e.collected_at = NOW
|
||||||
|
e.created_at = NOW
|
||||||
|
# Anti-fake-evidence fields
|
||||||
|
e.confidence_level = EvidenceConfidenceEnum.E3
|
||||||
|
e.truth_status = EvidenceTruthStatusEnum.OBSERVED
|
||||||
|
e.generation_mode = None
|
||||||
|
e.may_be_used_as_evidence = True
|
||||||
|
e.reviewed_by = None
|
||||||
|
e.reviewed_at = None
|
||||||
|
# Phase 2 fields
|
||||||
|
e.approval_status = "none"
|
||||||
|
e.first_reviewer = None
|
||||||
|
e.first_reviewed_at = None
|
||||||
|
e.second_reviewer = None
|
||||||
|
e.second_reviewed_at = None
|
||||||
|
e.requires_four_eyes = False
|
||||||
|
if overrides:
|
||||||
|
for k, v in overrides.items():
|
||||||
|
setattr(e, k, v)
|
||||||
|
return e
|
||||||
|
|
||||||
|
|
||||||
|
def make_control(overrides=None):
|
||||||
|
c = MagicMock()
|
||||||
|
c.id = CONTROL_UUID
|
||||||
|
c.control_id = "GOV-001"
|
||||||
|
c.title = "Access Control"
|
||||||
|
c.status = ControlStatusEnum.PLANNED
|
||||||
|
if overrides:
|
||||||
|
for k, v in overrides.items():
|
||||||
|
setattr(c, k, v)
|
||||||
|
return c
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 1. TestEvidenceConfidenceClassification
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestEvidenceConfidenceClassification:
|
||||||
|
"""Test automatic confidence level classification."""
|
||||||
|
|
||||||
|
def test_ci_pipeline_returns_e3(self):
|
||||||
|
assert _classify_confidence("ci_pipeline") == EvidenceConfidenceEnum.E3
|
||||||
|
|
||||||
|
def test_api_with_hash_returns_e3(self):
|
||||||
|
assert _classify_confidence("api", artifact_hash="sha256:abc") == EvidenceConfidenceEnum.E3
|
||||||
|
|
||||||
|
def test_api_without_hash_returns_e3(self):
|
||||||
|
assert _classify_confidence("api") == EvidenceConfidenceEnum.E3
|
||||||
|
|
||||||
|
def test_manual_returns_e1(self):
|
||||||
|
assert _classify_confidence("manual") == EvidenceConfidenceEnum.E1
|
||||||
|
|
||||||
|
def test_upload_returns_e1(self):
|
||||||
|
assert _classify_confidence("upload") == EvidenceConfidenceEnum.E1
|
||||||
|
|
||||||
|
def test_generated_returns_e0(self):
|
||||||
|
assert _classify_confidence("generated") == EvidenceConfidenceEnum.E0
|
||||||
|
|
||||||
|
def test_unknown_source_returns_e1(self):
|
||||||
|
assert _classify_confidence("some_random_source") == EvidenceConfidenceEnum.E1
|
||||||
|
|
||||||
|
def test_none_source_returns_e1(self):
|
||||||
|
assert _classify_confidence(None) == EvidenceConfidenceEnum.E1
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 2. TestEvidenceTruthStatus
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestEvidenceTruthStatus:
|
||||||
|
"""Test automatic truth status classification."""
|
||||||
|
|
||||||
|
def test_ci_pipeline_returns_observed(self):
|
||||||
|
assert _classify_truth_status("ci_pipeline") == EvidenceTruthStatusEnum.OBSERVED
|
||||||
|
|
||||||
|
def test_manual_returns_uploaded(self):
|
||||||
|
assert _classify_truth_status("manual") == EvidenceTruthStatusEnum.UPLOADED
|
||||||
|
|
||||||
|
def test_upload_returns_uploaded(self):
|
||||||
|
assert _classify_truth_status("upload") == EvidenceTruthStatusEnum.UPLOADED
|
||||||
|
|
||||||
|
def test_generated_returns_generated(self):
|
||||||
|
assert _classify_truth_status("generated") == EvidenceTruthStatusEnum.GENERATED
|
||||||
|
|
||||||
|
def test_api_returns_observed(self):
|
||||||
|
assert _classify_truth_status("api") == EvidenceTruthStatusEnum.OBSERVED
|
||||||
|
|
||||||
|
def test_none_returns_uploaded(self):
|
||||||
|
assert _classify_truth_status(None) == EvidenceTruthStatusEnum.UPLOADED
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 3. TestControlStatusTransitions
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestControlStatusTransitions:
|
||||||
|
"""Test the control status transition state machine."""
|
||||||
|
|
||||||
|
def test_planned_to_in_progress_allowed(self):
|
||||||
|
allowed, violations = validate_transition("planned", "in_progress")
|
||||||
|
assert allowed is True
|
||||||
|
assert violations == []
|
||||||
|
|
||||||
|
def test_in_progress_to_pass_without_evidence_blocked(self):
|
||||||
|
allowed, violations = validate_transition("in_progress", "pass", evidence_list=[])
|
||||||
|
assert allowed is False
|
||||||
|
assert len(violations) > 0
|
||||||
|
assert "pass" in violations[0].lower()
|
||||||
|
|
||||||
|
def test_in_progress_to_pass_with_e2_evidence_allowed(self):
|
||||||
|
e = make_evidence({
|
||||||
|
"confidence_level": EvidenceConfidenceEnum.E2,
|
||||||
|
"truth_status": EvidenceTruthStatusEnum.VALIDATED_INTERNAL,
|
||||||
|
})
|
||||||
|
allowed, violations = validate_transition("in_progress", "pass", evidence_list=[e])
|
||||||
|
assert allowed is True
|
||||||
|
assert violations == []
|
||||||
|
|
||||||
|
def test_in_progress_to_pass_with_e1_evidence_blocked(self):
|
||||||
|
e = make_evidence({
|
||||||
|
"confidence_level": EvidenceConfidenceEnum.E1,
|
||||||
|
"truth_status": EvidenceTruthStatusEnum.UPLOADED,
|
||||||
|
})
|
||||||
|
allowed, violations = validate_transition("in_progress", "pass", evidence_list=[e])
|
||||||
|
assert allowed is False
|
||||||
|
assert "E2" in violations[0]
|
||||||
|
|
||||||
|
def test_in_progress_to_partial_with_evidence_allowed(self):
|
||||||
|
e = make_evidence({"confidence_level": EvidenceConfidenceEnum.E0})
|
||||||
|
allowed, violations = validate_transition("in_progress", "partial", evidence_list=[e])
|
||||||
|
assert allowed is True
|
||||||
|
|
||||||
|
def test_in_progress_to_partial_without_evidence_blocked(self):
|
||||||
|
allowed, violations = validate_transition("in_progress", "partial", evidence_list=[])
|
||||||
|
assert allowed is False
|
||||||
|
|
||||||
|
def test_pass_to_fail_always_allowed(self):
|
||||||
|
allowed, violations = validate_transition("pass", "fail")
|
||||||
|
assert allowed is True
|
||||||
|
|
||||||
|
def test_any_to_na_requires_justification(self):
|
||||||
|
allowed, violations = validate_transition("in_progress", "n/a", status_justification=None)
|
||||||
|
assert allowed is False
|
||||||
|
assert "justification" in violations[0].lower()
|
||||||
|
|
||||||
|
def test_any_to_na_with_justification_allowed(self):
|
||||||
|
allowed, violations = validate_transition("in_progress", "n/a", status_justification="Not applicable for this project")
|
||||||
|
assert allowed is True
|
||||||
|
|
||||||
|
def test_any_to_planned_always_allowed(self):
|
||||||
|
allowed, violations = validate_transition("pass", "planned")
|
||||||
|
assert allowed is True
|
||||||
|
|
||||||
|
def test_same_status_noop_allowed(self):
|
||||||
|
allowed, violations = validate_transition("pass", "pass")
|
||||||
|
assert allowed is True
|
||||||
|
|
||||||
|
def test_bypass_for_auto_updater(self):
|
||||||
|
allowed, violations = validate_transition("in_progress", "pass", evidence_list=[], bypass_for_auto_updater=True)
|
||||||
|
assert allowed is True
|
||||||
|
|
||||||
|
def test_partial_to_pass_needs_e2(self):
|
||||||
|
e = make_evidence({
|
||||||
|
"confidence_level": EvidenceConfidenceEnum.E1,
|
||||||
|
"truth_status": EvidenceTruthStatusEnum.UPLOADED,
|
||||||
|
})
|
||||||
|
allowed, violations = validate_transition("partial", "pass", evidence_list=[e])
|
||||||
|
assert allowed is False
|
||||||
|
|
||||||
|
def test_partial_to_pass_with_e3_allowed(self):
|
||||||
|
e = make_evidence({
|
||||||
|
"confidence_level": EvidenceConfidenceEnum.E3,
|
||||||
|
"truth_status": EvidenceTruthStatusEnum.OBSERVED,
|
||||||
|
})
|
||||||
|
allowed, violations = validate_transition("partial", "pass", evidence_list=[e])
|
||||||
|
assert allowed is True
|
||||||
|
|
||||||
|
def test_in_progress_to_fail_allowed(self):
|
||||||
|
allowed, violations = validate_transition("in_progress", "fail")
|
||||||
|
assert allowed is True
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 4. TestMultiDimensionalScore
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestMultiDimensionalScore:
|
||||||
|
"""Test multi-dimensional score calculation."""
|
||||||
|
|
||||||
|
def test_score_structure(self):
|
||||||
|
"""Score result should have all required keys."""
|
||||||
|
from compliance.db.repository import ControlRepository
|
||||||
|
repo = ControlRepository(mock_db)
|
||||||
|
|
||||||
|
with patch.object(repo, 'get_all', return_value=[]):
|
||||||
|
result = repo.get_multi_dimensional_score()
|
||||||
|
|
||||||
|
assert "requirement_coverage" in result
|
||||||
|
assert "evidence_strength" in result
|
||||||
|
assert "validation_quality" in result
|
||||||
|
assert "evidence_freshness" in result
|
||||||
|
assert "control_effectiveness" in result
|
||||||
|
assert "overall_readiness" in result
|
||||||
|
assert "hard_blocks" in result
|
||||||
|
|
||||||
|
def test_empty_controls_returns_zeros(self):
|
||||||
|
from compliance.db.repository import ControlRepository
|
||||||
|
repo = ControlRepository(mock_db)
|
||||||
|
|
||||||
|
with patch.object(repo, 'get_all', return_value=[]):
|
||||||
|
result = repo.get_multi_dimensional_score()
|
||||||
|
|
||||||
|
assert result["overall_readiness"] == 0.0
|
||||||
|
assert "Keine Controls" in result["hard_blocks"][0]
|
||||||
|
|
||||||
|
def test_hard_blocks_pass_without_evidence(self):
|
||||||
|
"""Controls on 'pass' without evidence should trigger hard block."""
|
||||||
|
from compliance.db.repository import ControlRepository
|
||||||
|
repo = ControlRepository(mock_db)
|
||||||
|
|
||||||
|
ctrl = make_control({"status": ControlStatusEnum.PASS})
|
||||||
|
mock_db.query.return_value.all.return_value = [] # no evidence
|
||||||
|
mock_db.query.return_value.scalar.return_value = 0
|
||||||
|
|
||||||
|
with patch.object(repo, 'get_all', return_value=[ctrl]):
|
||||||
|
result = repo.get_multi_dimensional_score()
|
||||||
|
|
||||||
|
assert any("Evidence" in b or "evidence" in b.lower() for b in result["hard_blocks"])
|
||||||
|
|
||||||
|
def test_all_dimensions_are_floats(self):
|
||||||
|
from compliance.db.repository import ControlRepository
|
||||||
|
repo = ControlRepository(mock_db)
|
||||||
|
|
||||||
|
with patch.object(repo, 'get_all', return_value=[]):
|
||||||
|
result = repo.get_multi_dimensional_score()
|
||||||
|
|
||||||
|
for key in ["requirement_coverage", "evidence_strength", "validation_quality",
|
||||||
|
"evidence_freshness", "control_effectiveness", "overall_readiness"]:
|
||||||
|
assert isinstance(result[key], float), f"{key} should be float"
|
||||||
|
|
||||||
|
def test_hard_blocks_is_list(self):
|
||||||
|
from compliance.db.repository import ControlRepository
|
||||||
|
repo = ControlRepository(mock_db)
|
||||||
|
|
||||||
|
with patch.object(repo, 'get_all', return_value=[]):
|
||||||
|
result = repo.get_multi_dimensional_score()
|
||||||
|
|
||||||
|
assert isinstance(result["hard_blocks"], list)
|
||||||
|
|
||||||
|
def test_backwards_compatibility_with_old_score(self):
|
||||||
|
"""get_statistics should still work and return compliance_score."""
|
||||||
|
from compliance.db.repository import ControlRepository
|
||||||
|
repo = ControlRepository(mock_db)
|
||||||
|
|
||||||
|
mock_db.query.return_value.scalar.return_value = 0
|
||||||
|
mock_db.query.return_value.group_by.return_value.all.return_value = []
|
||||||
|
|
||||||
|
result = repo.get_statistics()
|
||||||
|
assert "compliance_score" in result
|
||||||
|
assert "total" in result
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 5. TestForbiddenFormulations
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestForbiddenFormulations:
|
||||||
|
"""Test forbidden formulation detection (tested via the validate endpoint context)."""
|
||||||
|
|
||||||
|
def test_import_works(self):
|
||||||
|
"""Verify forbidden pattern check function is importable and callable."""
|
||||||
|
# This tests the Python-side schema, the actual check is in TypeScript
|
||||||
|
from compliance.api.schemas import MultiDimensionalScore, StatusTransitionError
|
||||||
|
score = MultiDimensionalScore()
|
||||||
|
assert score.overall_readiness == 0.0
|
||||||
|
err = StatusTransitionError(current_status="planned", requested_status="pass")
|
||||||
|
assert err.allowed is False
|
||||||
|
|
||||||
|
def test_status_transition_error_schema(self):
|
||||||
|
from compliance.api.schemas import StatusTransitionError
|
||||||
|
err = StatusTransitionError(
|
||||||
|
allowed=False,
|
||||||
|
current_status="in_progress",
|
||||||
|
requested_status="pass",
|
||||||
|
violations=["Need E2 evidence"],
|
||||||
|
)
|
||||||
|
assert err.violations == ["Need E2 evidence"]
|
||||||
|
|
||||||
|
def test_multi_dimensional_score_defaults(self):
|
||||||
|
from compliance.api.schemas import MultiDimensionalScore
|
||||||
|
score = MultiDimensionalScore()
|
||||||
|
assert score.requirement_coverage == 0.0
|
||||||
|
assert score.hard_blocks == []
|
||||||
|
|
||||||
|
def test_multi_dimensional_score_with_data(self):
|
||||||
|
from compliance.api.schemas import MultiDimensionalScore
|
||||||
|
score = MultiDimensionalScore(
|
||||||
|
requirement_coverage=80.0,
|
||||||
|
evidence_strength=60.0,
|
||||||
|
validation_quality=40.0,
|
||||||
|
evidence_freshness=90.0,
|
||||||
|
control_effectiveness=70.0,
|
||||||
|
overall_readiness=65.0,
|
||||||
|
hard_blocks=["3 Controls ohne Evidence"],
|
||||||
|
)
|
||||||
|
assert score.overall_readiness == 65.0
|
||||||
|
assert len(score.hard_blocks) == 1
|
||||||
|
|
||||||
|
def test_evidence_response_has_anti_fake_fields(self):
|
||||||
|
from compliance.api.schemas import EvidenceResponse
|
||||||
|
fields = EvidenceResponse.model_fields
|
||||||
|
assert "confidence_level" in fields
|
||||||
|
assert "truth_status" in fields
|
||||||
|
assert "generation_mode" in fields
|
||||||
|
assert "may_be_used_as_evidence" in fields
|
||||||
|
assert "reviewed_by" in fields
|
||||||
|
assert "reviewed_at" in fields
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 6. TestLLMGenerationAudit
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestLLMGenerationAudit:
|
||||||
|
"""Test LLM generation audit trail."""
|
||||||
|
|
||||||
|
def test_create_audit_record(self):
|
||||||
|
"""POST /compliance/llm-audit should create a record."""
|
||||||
|
mock_record = MagicMock()
|
||||||
|
mock_record.id = "audit-001"
|
||||||
|
mock_record.tenant_id = None
|
||||||
|
mock_record.entity_type = "document"
|
||||||
|
mock_record.entity_id = None
|
||||||
|
mock_record.generation_mode = "draft_assistance"
|
||||||
|
mock_record.truth_status = EvidenceTruthStatusEnum.GENERATED
|
||||||
|
mock_record.may_be_used_as_evidence = False
|
||||||
|
mock_record.llm_model = "qwen2.5vl:32b"
|
||||||
|
mock_record.llm_provider = "ollama"
|
||||||
|
mock_record.prompt_hash = None
|
||||||
|
mock_record.input_summary = "Test input"
|
||||||
|
mock_record.output_summary = "Test output"
|
||||||
|
mock_record.extra_metadata = {}
|
||||||
|
mock_record.created_at = NOW
|
||||||
|
|
||||||
|
mock_db.add = MagicMock()
|
||||||
|
mock_db.commit = MagicMock()
|
||||||
|
mock_db.refresh = MagicMock(side_effect=lambda r: setattr(r, 'id', 'audit-001'))
|
||||||
|
|
||||||
|
# We need to patch the LLMGenerationAuditDB constructor
|
||||||
|
with patch('compliance.api.llm_audit_routes.LLMGenerationAuditDB', return_value=mock_record):
|
||||||
|
resp = client.post("/compliance/llm-audit", json={
|
||||||
|
"entity_type": "document",
|
||||||
|
"generation_mode": "draft_assistance",
|
||||||
|
"truth_status": "generated",
|
||||||
|
"may_be_used_as_evidence": False,
|
||||||
|
"llm_model": "qwen2.5vl:32b",
|
||||||
|
"llm_provider": "ollama",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["entity_type"] == "document"
|
||||||
|
assert data["truth_status"] == "generated"
|
||||||
|
assert data["may_be_used_as_evidence"] is False
|
||||||
|
|
||||||
|
def test_truth_status_always_generated_for_llm(self):
|
||||||
|
"""LLM-generated content should always start with truth_status=generated."""
|
||||||
|
from compliance.db.models import LLMGenerationAuditDB, EvidenceTruthStatusEnum
|
||||||
|
audit = LLMGenerationAuditDB()
|
||||||
|
# Default should be GENERATED
|
||||||
|
assert audit.truth_status is None or audit.truth_status == EvidenceTruthStatusEnum.GENERATED
|
||||||
|
|
||||||
|
def test_may_be_used_as_evidence_defaults_false(self):
|
||||||
|
"""Generated content should NOT be usable as evidence by default."""
|
||||||
|
from compliance.db.models import LLMGenerationAuditDB
|
||||||
|
audit = LLMGenerationAuditDB()
|
||||||
|
assert audit.may_be_used_as_evidence is False or audit.may_be_used_as_evidence is None
|
||||||
|
|
||||||
|
def test_list_audit_records(self):
|
||||||
|
"""GET /compliance/llm-audit should return records."""
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.count.return_value = 0
|
||||||
|
mock_query.filter.return_value = mock_query
|
||||||
|
mock_query.order_by.return_value = mock_query
|
||||||
|
mock_query.offset.return_value = mock_query
|
||||||
|
mock_query.limit.return_value = mock_query
|
||||||
|
mock_query.all.return_value = []
|
||||||
|
mock_db.query.return_value = mock_query
|
||||||
|
|
||||||
|
resp = client.get("/compliance/llm-audit")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert "records" in data
|
||||||
|
assert "total" in data
|
||||||
|
assert data["total"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 7. TestEvidenceReview
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestEvidenceReview:
|
||||||
|
"""Test evidence review endpoint."""
|
||||||
|
|
||||||
|
def test_review_upgrades_confidence(self):
|
||||||
|
"""PATCH /evidence/{id}/review should update confidence and set reviewer."""
|
||||||
|
evidence = make_evidence({
|
||||||
|
"confidence_level": EvidenceConfidenceEnum.E1,
|
||||||
|
"truth_status": EvidenceTruthStatusEnum.UPLOADED,
|
||||||
|
})
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.commit = MagicMock()
|
||||||
|
mock_db.refresh = MagicMock()
|
||||||
|
|
||||||
|
resp = client.patch(f"/evidence/{EVIDENCE_UUID}/review", json={
|
||||||
|
"confidence_level": "E2",
|
||||||
|
"truth_status": "validated_internal",
|
||||||
|
"reviewed_by": "auditor@example.com",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert resp.status_code == 200
|
||||||
|
# Verify the evidence was updated
|
||||||
|
assert evidence.confidence_level == EvidenceConfidenceEnum.E2
|
||||||
|
assert evidence.truth_status == EvidenceTruthStatusEnum.VALIDATED_INTERNAL
|
||||||
|
assert evidence.reviewed_by == "auditor@example.com"
|
||||||
|
assert evidence.reviewed_at is not None
|
||||||
|
|
||||||
|
def test_review_nonexistent_evidence_returns_404(self):
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = None
|
||||||
|
resp = client.patch("/evidence/nonexistent-id/review", json={
|
||||||
|
"reviewed_by": "someone",
|
||||||
|
})
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_review_invalid_confidence_returns_400(self):
|
||||||
|
evidence = make_evidence()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
|
||||||
|
resp = client.patch(f"/evidence/{EVIDENCE_UUID}/review", json={
|
||||||
|
"confidence_level": "INVALID",
|
||||||
|
"reviewed_by": "someone",
|
||||||
|
})
|
||||||
|
assert resp.status_code == 400
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 8. TestControlUpdateIntegration
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestControlUpdateIntegration:
|
||||||
|
"""Test that ControlUpdate schema includes status_justification."""
|
||||||
|
|
||||||
|
def test_control_update_has_status_justification(self):
|
||||||
|
from compliance.api.schemas import ControlUpdate
|
||||||
|
fields = ControlUpdate.model_fields
|
||||||
|
assert "status_justification" in fields
|
||||||
|
|
||||||
|
def test_control_response_has_status_justification(self):
|
||||||
|
from compliance.api.schemas import ControlResponse
|
||||||
|
fields = ControlResponse.model_fields
|
||||||
|
assert "status_justification" in fields
|
||||||
|
|
||||||
|
def test_control_status_enum_has_in_progress(self):
|
||||||
|
assert ControlStatusEnum.IN_PROGRESS.value == "in_progress"
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 9. TestEvidenceEnums
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestEvidenceEnums:
|
||||||
|
"""Test the new evidence enums."""
|
||||||
|
|
||||||
|
def test_confidence_enum_values(self):
|
||||||
|
assert EvidenceConfidenceEnum.E0.value == "E0"
|
||||||
|
assert EvidenceConfidenceEnum.E1.value == "E1"
|
||||||
|
assert EvidenceConfidenceEnum.E2.value == "E2"
|
||||||
|
assert EvidenceConfidenceEnum.E3.value == "E3"
|
||||||
|
assert EvidenceConfidenceEnum.E4.value == "E4"
|
||||||
|
|
||||||
|
def test_truth_status_enum_values(self):
|
||||||
|
assert EvidenceTruthStatusEnum.GENERATED.value == "generated"
|
||||||
|
assert EvidenceTruthStatusEnum.UPLOADED.value == "uploaded"
|
||||||
|
assert EvidenceTruthStatusEnum.OBSERVED.value == "observed"
|
||||||
|
assert EvidenceTruthStatusEnum.VALIDATED_INTERNAL.value == "validated_internal"
|
||||||
|
assert EvidenceTruthStatusEnum.REJECTED.value == "rejected"
|
||||||
|
assert EvidenceTruthStatusEnum.PROVIDED_TO_AUDITOR.value == "provided_to_auditor"
|
||||||
|
assert EvidenceTruthStatusEnum.ACCEPTED_BY_AUDITOR.value == "accepted_by_auditor"
|
||||||
528
backend-compliance/tests/test_anti_fake_evidence_phase2.py
Normal file
528
backend-compliance/tests/test_anti_fake_evidence_phase2.py
Normal file
@@ -0,0 +1,528 @@
|
|||||||
|
"""Tests for Anti-Fake-Evidence Phase 2.
|
||||||
|
|
||||||
|
~35 tests covering:
|
||||||
|
- Audit trail extension (evidence review/create logging)
|
||||||
|
- Assertion engine (extraction, CRUD, verify, summary)
|
||||||
|
- Four-Eyes review (domain check, first/second review, same-person reject)
|
||||||
|
- UI badge data (response schema includes new fields)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.testclient import TestClient
|
||||||
|
|
||||||
|
from compliance.api.evidence_routes import (
|
||||||
|
router as evidence_router,
|
||||||
|
_requires_four_eyes,
|
||||||
|
_classify_confidence,
|
||||||
|
_classify_truth_status,
|
||||||
|
)
|
||||||
|
from compliance.api.assertion_routes import router as assertion_router
|
||||||
|
from compliance.services.assertion_engine import extract_assertions, _classify_sentence
|
||||||
|
from compliance.db.models import (
|
||||||
|
EvidenceConfidenceEnum,
|
||||||
|
EvidenceTruthStatusEnum,
|
||||||
|
ControlStatusEnum,
|
||||||
|
AssertionDB,
|
||||||
|
)
|
||||||
|
from classroom_engine.database import get_db
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# App setup with mocked DB dependency
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
app.include_router(evidence_router)
|
||||||
|
app.include_router(assertion_router)
|
||||||
|
|
||||||
|
mock_db = MagicMock()
|
||||||
|
|
||||||
|
|
||||||
|
def override_get_db():
|
||||||
|
yield mock_db
|
||||||
|
|
||||||
|
|
||||||
|
app.dependency_overrides[get_db] = override_get_db
|
||||||
|
client = TestClient(app)
|
||||||
|
|
||||||
|
EVIDENCE_UUID = "eeee0002-aaaa-bbbb-cccc-ffffffffffff"
|
||||||
|
CONTROL_UUID = "cccc0002-aaaa-bbbb-cccc-dddddddddddd"
|
||||||
|
ASSERTION_UUID = "aaaa0002-bbbb-cccc-dddd-eeeeeeeeeeee"
|
||||||
|
NOW = datetime(2026, 3, 23, 14, 0, 0)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def make_evidence(overrides=None):
|
||||||
|
e = MagicMock()
|
||||||
|
e.id = EVIDENCE_UUID
|
||||||
|
e.control_id = CONTROL_UUID
|
||||||
|
e.evidence_type = "test_results"
|
||||||
|
e.title = "Phase 2 Test Evidence"
|
||||||
|
e.description = "Testing four-eyes"
|
||||||
|
e.artifact_url = "https://ci.example.com/artifact"
|
||||||
|
e.artifact_path = None
|
||||||
|
e.artifact_hash = "abc123"
|
||||||
|
e.file_size_bytes = None
|
||||||
|
e.mime_type = None
|
||||||
|
e.status = MagicMock()
|
||||||
|
e.status.value = "valid"
|
||||||
|
e.uploaded_by = None
|
||||||
|
e.source = "api"
|
||||||
|
e.ci_job_id = None
|
||||||
|
e.valid_from = NOW
|
||||||
|
e.valid_until = NOW + timedelta(days=90)
|
||||||
|
e.collected_at = NOW
|
||||||
|
e.created_at = NOW
|
||||||
|
e.confidence_level = EvidenceConfidenceEnum.E1
|
||||||
|
e.truth_status = EvidenceTruthStatusEnum.UPLOADED
|
||||||
|
e.generation_mode = None
|
||||||
|
e.may_be_used_as_evidence = True
|
||||||
|
e.reviewed_by = None
|
||||||
|
e.reviewed_at = None
|
||||||
|
# Phase 2 fields
|
||||||
|
e.approval_status = "none"
|
||||||
|
e.first_reviewer = None
|
||||||
|
e.first_reviewed_at = None
|
||||||
|
e.second_reviewer = None
|
||||||
|
e.second_reviewed_at = None
|
||||||
|
e.requires_four_eyes = False
|
||||||
|
if overrides:
|
||||||
|
for k, v in overrides.items():
|
||||||
|
setattr(e, k, v)
|
||||||
|
return e
|
||||||
|
|
||||||
|
|
||||||
|
def make_assertion(overrides=None):
|
||||||
|
a = MagicMock()
|
||||||
|
a.id = ASSERTION_UUID
|
||||||
|
a.tenant_id = "tenant-001"
|
||||||
|
a.entity_type = "control"
|
||||||
|
a.entity_id = CONTROL_UUID
|
||||||
|
a.sentence_text = "Test assertion sentence"
|
||||||
|
a.sentence_index = 0
|
||||||
|
a.assertion_type = "assertion"
|
||||||
|
a.evidence_ids = []
|
||||||
|
a.confidence = 0.0
|
||||||
|
a.normative_tier = "pflicht"
|
||||||
|
a.verified_by = None
|
||||||
|
a.verified_at = None
|
||||||
|
a.created_at = NOW
|
||||||
|
a.updated_at = NOW
|
||||||
|
if overrides:
|
||||||
|
for k, v in overrides.items():
|
||||||
|
setattr(a, k, v)
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 1. TestAuditTrailExtension
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestAuditTrailExtension:
|
||||||
|
"""Test that evidence review and create log audit trail entries."""
|
||||||
|
|
||||||
|
def test_review_evidence_logs_audit_trail(self):
|
||||||
|
evidence = make_evidence()
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"confidence_level": "E2", "reviewed_by": "auditor@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
# db.add should be called for audit trail entries
|
||||||
|
assert mock_db.add.called
|
||||||
|
|
||||||
|
def test_review_evidence_records_old_and_new_confidence(self):
|
||||||
|
evidence = make_evidence({"confidence_level": EvidenceConfidenceEnum.E1})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"confidence_level": "E3", "reviewed_by": "reviewer@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
|
||||||
|
def test_review_evidence_records_truth_status_change(self):
|
||||||
|
evidence = make_evidence({"truth_status": EvidenceTruthStatusEnum.UPLOADED})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"truth_status": "validated_internal", "reviewed_by": "reviewer@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
|
||||||
|
def test_review_nonexistent_evidence_returns_404(self):
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
"/evidence/nonexistent/review",
|
||||||
|
json={"reviewed_by": "someone"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_reject_evidence_logs_audit_trail(self):
|
||||||
|
evidence = make_evidence()
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/reject",
|
||||||
|
json={"reviewed_by": "auditor@test.com", "rejection_reason": "Fake evidence"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["approval_status"] == "rejected"
|
||||||
|
|
||||||
|
def test_reject_nonexistent_evidence_returns_404(self):
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
"/evidence/nonexistent/reject",
|
||||||
|
json={"reviewed_by": "someone"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_audit_trail_query_endpoint(self):
|
||||||
|
mock_db.reset_mock()
|
||||||
|
trail_entry = MagicMock()
|
||||||
|
trail_entry.id = "trail-001"
|
||||||
|
trail_entry.entity_type = "evidence"
|
||||||
|
trail_entry.entity_id = EVIDENCE_UUID
|
||||||
|
trail_entry.entity_name = "Test"
|
||||||
|
trail_entry.action = "review"
|
||||||
|
trail_entry.field_changed = "confidence_level"
|
||||||
|
trail_entry.old_value = "E1"
|
||||||
|
trail_entry.new_value = "E2"
|
||||||
|
trail_entry.change_summary = None
|
||||||
|
trail_entry.performed_by = "auditor"
|
||||||
|
trail_entry.performed_at = NOW
|
||||||
|
trail_entry.checksum = "abc"
|
||||||
|
mock_db.query.return_value.filter.return_value.filter.return_value.order_by.return_value.limit.return_value.all.return_value = [trail_entry]
|
||||||
|
|
||||||
|
resp = client.get(f"/audit-trail?entity_type=evidence&entity_id={EVIDENCE_UUID}")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["total"] >= 1
|
||||||
|
|
||||||
|
def test_audit_trail_checksum_present(self):
|
||||||
|
"""Audit trail entries should have a checksum for integrity."""
|
||||||
|
from compliance.api.audit_trail_utils import create_signature
|
||||||
|
sig = create_signature("evidence|123|review|user@test.com")
|
||||||
|
assert len(sig) == 64 # SHA-256 hex digest
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 2. TestAssertionEngine
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestAssertionEngine:
|
||||||
|
"""Test assertion extraction and classification."""
|
||||||
|
|
||||||
|
def test_pflicht_sentence_classified_as_assertion(self):
|
||||||
|
result = _classify_sentence("Die Organisation muss ein ISMS implementieren.")
|
||||||
|
assert result == ("assertion", "pflicht")
|
||||||
|
|
||||||
|
def test_empfehlung_sentence_classified(self):
|
||||||
|
result = _classify_sentence("Die Organisation sollte regelmäßige Audits durchführen.")
|
||||||
|
assert result == ("assertion", "empfehlung")
|
||||||
|
|
||||||
|
def test_kann_sentence_classified(self):
|
||||||
|
result = _classify_sentence("Optional kann ein externes Audit durchgeführt werden.")
|
||||||
|
assert result == ("assertion", "kann")
|
||||||
|
|
||||||
|
def test_rationale_sentence_classified(self):
|
||||||
|
result = _classify_sentence("Dies ist erforderlich, weil Datenverlust schwere Folgen hat.")
|
||||||
|
assert result == ("rationale", None)
|
||||||
|
|
||||||
|
def test_fact_sentence_with_evidence_keyword(self):
|
||||||
|
result = _classify_sentence("Das Zertifikat wurde am 15.03.2026 ausgestellt.")
|
||||||
|
assert result == ("fact", None)
|
||||||
|
|
||||||
|
def test_extract_assertions_splits_sentences(self):
|
||||||
|
text = "Die Organisation muss Daten schützen. Sie sollte regelmäßig prüfen."
|
||||||
|
results = extract_assertions(text, "control", "ctrl-001")
|
||||||
|
assert len(results) == 2
|
||||||
|
assert results[0]["assertion_type"] == "assertion"
|
||||||
|
assert results[0]["normative_tier"] == "pflicht"
|
||||||
|
assert results[1]["normative_tier"] == "empfehlung"
|
||||||
|
|
||||||
|
def test_extract_assertions_empty_text(self):
|
||||||
|
results = extract_assertions("", "control", "ctrl-001")
|
||||||
|
assert results == []
|
||||||
|
|
||||||
|
def test_extract_assertions_single_sentence(self):
|
||||||
|
results = extract_assertions("Der Betreiber muss ein Audit durchführen.", "control", "ctrl-001")
|
||||||
|
assert len(results) == 1
|
||||||
|
assert results[0]["normative_tier"] == "pflicht"
|
||||||
|
|
||||||
|
def test_mixed_text_with_rationale(self):
|
||||||
|
text = "Die Organisation muss ein ISMS implementieren. Dies ist notwendig, weil Compliance gefordert ist."
|
||||||
|
results = extract_assertions(text, "control", "ctrl-001")
|
||||||
|
assert len(results) == 2
|
||||||
|
types = [r["assertion_type"] for r in results]
|
||||||
|
assert "assertion" in types
|
||||||
|
assert "rationale" in types
|
||||||
|
|
||||||
|
def test_assertion_crud_create(self):
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
# Mock the added object to return proper values
|
||||||
|
def side_effect_add(obj):
|
||||||
|
obj.id = ASSERTION_UUID
|
||||||
|
obj.created_at = NOW
|
||||||
|
obj.updated_at = NOW
|
||||||
|
obj.sentence_index = 0
|
||||||
|
obj.confidence = 0.0
|
||||||
|
mock_db.add.side_effect = side_effect_add
|
||||||
|
|
||||||
|
resp = client.post(
|
||||||
|
"/assertions?tenant_id=tenant-001",
|
||||||
|
json={
|
||||||
|
"entity_type": "control",
|
||||||
|
"entity_id": CONTROL_UUID,
|
||||||
|
"sentence_text": "Die Organisation muss ein ISMS implementieren.",
|
||||||
|
"assertion_type": "assertion",
|
||||||
|
"normative_tier": "pflicht",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
|
||||||
|
def test_assertion_verify_endpoint(self):
|
||||||
|
a = make_assertion()
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = a
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.post(f"/assertions/{ASSERTION_UUID}/verify?verified_by=auditor@test.com")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert a.assertion_type == "fact"
|
||||||
|
assert a.verified_by == "auditor@test.com"
|
||||||
|
|
||||||
|
def test_assertion_summary(self):
|
||||||
|
mock_db.reset_mock()
|
||||||
|
a1 = make_assertion({"assertion_type": "assertion", "verified_by": None})
|
||||||
|
a2 = make_assertion({"assertion_type": "fact", "verified_by": "user"})
|
||||||
|
a3 = make_assertion({"assertion_type": "rationale", "verified_by": None})
|
||||||
|
mock_db.query.return_value.filter.return_value.filter.return_value.filter.return_value.all.return_value = [a1, a2, a3]
|
||||||
|
# Direct .all() for no-filter case
|
||||||
|
mock_db.query.return_value.all.return_value = [a1, a2, a3]
|
||||||
|
|
||||||
|
resp = client.get("/assertions/summary")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["total_assertions"] == 3
|
||||||
|
assert data["total_facts"] == 1
|
||||||
|
assert data["total_rationale"] == 1
|
||||||
|
assert data["unverified_count"] == 1
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 3. TestFourEyesReview
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestFourEyesReview:
|
||||||
|
"""Test Four-Eyes review process."""
|
||||||
|
|
||||||
|
def test_gov_domain_requires_four_eyes(self):
|
||||||
|
assert _requires_four_eyes("gov") is True
|
||||||
|
|
||||||
|
def test_priv_domain_requires_four_eyes(self):
|
||||||
|
assert _requires_four_eyes("priv") is True
|
||||||
|
|
||||||
|
def test_ops_domain_does_not_require_four_eyes(self):
|
||||||
|
assert _requires_four_eyes("ops") is False
|
||||||
|
|
||||||
|
def test_sdlc_domain_does_not_require_four_eyes(self):
|
||||||
|
assert _requires_four_eyes("sdlc") is False
|
||||||
|
|
||||||
|
def test_first_review_sets_first_approved(self):
|
||||||
|
evidence = make_evidence({
|
||||||
|
"requires_four_eyes": True,
|
||||||
|
"approval_status": "pending_first",
|
||||||
|
})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"reviewed_by": "reviewer1@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert evidence.first_reviewer == "reviewer1@test.com"
|
||||||
|
assert evidence.approval_status == "first_approved"
|
||||||
|
|
||||||
|
def test_second_review_different_person_approves(self):
|
||||||
|
evidence = make_evidence({
|
||||||
|
"requires_four_eyes": True,
|
||||||
|
"approval_status": "first_approved",
|
||||||
|
"first_reviewer": "reviewer1@test.com",
|
||||||
|
})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"reviewed_by": "reviewer2@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert evidence.second_reviewer == "reviewer2@test.com"
|
||||||
|
assert evidence.approval_status == "approved"
|
||||||
|
|
||||||
|
def test_same_person_second_review_rejected(self):
|
||||||
|
evidence = make_evidence({
|
||||||
|
"requires_four_eyes": True,
|
||||||
|
"approval_status": "first_approved",
|
||||||
|
"first_reviewer": "reviewer1@test.com",
|
||||||
|
})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"reviewed_by": "reviewer1@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 400
|
||||||
|
assert "different" in resp.json()["detail"].lower()
|
||||||
|
|
||||||
|
def test_already_approved_blocked(self):
|
||||||
|
evidence = make_evidence({
|
||||||
|
"requires_four_eyes": True,
|
||||||
|
"approval_status": "approved",
|
||||||
|
})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"reviewed_by": "reviewer3@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 400
|
||||||
|
assert "already" in resp.json()["detail"].lower()
|
||||||
|
|
||||||
|
def test_rejected_evidence_cannot_be_reviewed(self):
|
||||||
|
evidence = make_evidence({
|
||||||
|
"requires_four_eyes": True,
|
||||||
|
"approval_status": "rejected",
|
||||||
|
})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"reviewed_by": "reviewer@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 400
|
||||||
|
|
||||||
|
def test_reject_endpoint(self):
|
||||||
|
evidence = make_evidence({"requires_four_eyes": True})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/reject",
|
||||||
|
json={"reviewed_by": "auditor@test.com", "rejection_reason": "Not authentic"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert evidence.approval_status == "rejected"
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 4. TestUIBadgeData
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestUIBadgeData:
|
||||||
|
"""Test that evidence response includes all Phase 2 fields."""
|
||||||
|
|
||||||
|
def test_evidence_response_includes_approval_status(self):
|
||||||
|
evidence = make_evidence({
|
||||||
|
"approval_status": "first_approved",
|
||||||
|
"first_reviewer": "reviewer1@test.com",
|
||||||
|
"first_reviewed_at": NOW,
|
||||||
|
"requires_four_eyes": True,
|
||||||
|
})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
mock_db.refresh.return_value = None
|
||||||
|
|
||||||
|
resp = client.patch(
|
||||||
|
f"/evidence/{EVIDENCE_UUID}/review",
|
||||||
|
json={"reviewed_by": "reviewer2@test.com"},
|
||||||
|
)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert "approval_status" in data
|
||||||
|
assert "requires_four_eyes" in data
|
||||||
|
assert data["requires_four_eyes"] is True
|
||||||
|
|
||||||
|
def test_evidence_response_includes_four_eyes_fields(self):
|
||||||
|
evidence = make_evidence({
|
||||||
|
"requires_four_eyes": True,
|
||||||
|
"approval_status": "approved",
|
||||||
|
"first_reviewer": "r1@test.com",
|
||||||
|
"first_reviewed_at": NOW,
|
||||||
|
"second_reviewer": "r2@test.com",
|
||||||
|
"second_reviewed_at": NOW,
|
||||||
|
})
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = evidence
|
||||||
|
|
||||||
|
# Use list endpoint
|
||||||
|
mock_db.query.return_value.filter.return_value.all.return_value = [evidence]
|
||||||
|
mock_db.query.return_value.all.return_value = [evidence]
|
||||||
|
|
||||||
|
# Direct test via _build_evidence_response
|
||||||
|
from compliance.api.evidence_routes import _build_evidence_response
|
||||||
|
resp = _build_evidence_response(evidence)
|
||||||
|
assert resp.approval_status == "approved"
|
||||||
|
assert resp.first_reviewer == "r1@test.com"
|
||||||
|
assert resp.second_reviewer == "r2@test.com"
|
||||||
|
assert resp.requires_four_eyes is True
|
||||||
|
|
||||||
|
def test_assertion_response_schema(self):
|
||||||
|
a = make_assertion()
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_db.query.return_value.filter.return_value.first.return_value = a
|
||||||
|
|
||||||
|
resp = client.get(f"/assertions/{ASSERTION_UUID}")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert "assertion_type" in data
|
||||||
|
assert "normative_tier" in data
|
||||||
|
assert "evidence_ids" in data
|
||||||
|
assert "verified_by" in data
|
||||||
|
|
||||||
|
def test_evidence_response_includes_confidence_and_truth(self):
|
||||||
|
evidence = make_evidence({
|
||||||
|
"confidence_level": EvidenceConfidenceEnum.E3,
|
||||||
|
"truth_status": EvidenceTruthStatusEnum.OBSERVED,
|
||||||
|
})
|
||||||
|
from compliance.api.evidence_routes import _build_evidence_response
|
||||||
|
resp = _build_evidence_response(evidence)
|
||||||
|
assert resp.confidence_level == "E3"
|
||||||
|
assert resp.truth_status == "observed"
|
||||||
|
|
||||||
|
def test_evidence_response_none_four_eyes_fields_default(self):
|
||||||
|
evidence = make_evidence()
|
||||||
|
from compliance.api.evidence_routes import _build_evidence_response
|
||||||
|
resp = _build_evidence_response(evidence)
|
||||||
|
assert resp.approval_status == "none"
|
||||||
|
assert resp.requires_four_eyes is False
|
||||||
|
assert resp.first_reviewer is None
|
||||||
191
backend-compliance/tests/test_anti_fake_evidence_phase3.py
Normal file
191
backend-compliance/tests/test_anti_fake_evidence_phase3.py
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
"""Tests for Anti-Fake-Evidence Phase 3: Enforcement.
|
||||||
|
|
||||||
|
~8 tests covering:
|
||||||
|
- Evidence distribution endpoint (confidence counts, four-eyes pending)
|
||||||
|
- Dashboard multi-score presence
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from unittest.mock import MagicMock, patch, PropertyMock
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.testclient import TestClient
|
||||||
|
|
||||||
|
from compliance.api.dashboard_routes import router as dashboard_router
|
||||||
|
from compliance.db.models import EvidenceConfidenceEnum, EvidenceTruthStatusEnum
|
||||||
|
from classroom_engine.database import get_db
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# App setup with mocked DB dependency
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
app.include_router(dashboard_router)
|
||||||
|
|
||||||
|
mock_db = MagicMock()
|
||||||
|
|
||||||
|
|
||||||
|
def override_get_db():
|
||||||
|
yield mock_db
|
||||||
|
|
||||||
|
|
||||||
|
app.dependency_overrides[get_db] = override_get_db
|
||||||
|
client = TestClient(app)
|
||||||
|
|
||||||
|
NOW = datetime(2026, 3, 23, 14, 0, 0)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def make_evidence(confidence="E1", requires_four_eyes=False, approval_status="none"):
|
||||||
|
e = MagicMock()
|
||||||
|
e.confidence_level = MagicMock()
|
||||||
|
e.confidence_level.value = confidence
|
||||||
|
e.requires_four_eyes = requires_four_eyes
|
||||||
|
e.approval_status = approval_status
|
||||||
|
return e
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 1. TestEvidenceDistributionEndpoint
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestEvidenceDistributionEndpoint:
|
||||||
|
"""Test GET /dashboard/evidence-distribution endpoint."""
|
||||||
|
|
||||||
|
def _setup_evidence(self, evidence_list):
|
||||||
|
"""Configure mock DB to return evidence list via EvidenceRepository."""
|
||||||
|
mock_db.reset_mock()
|
||||||
|
# EvidenceRepository(db).get_all() internally does db.query(...).all()
|
||||||
|
# We patch the EvidenceRepository class to return our list
|
||||||
|
return evidence_list
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
def test_empty_db_returns_zero_counts(self, mock_repo_cls):
|
||||||
|
mock_repo = MagicMock()
|
||||||
|
mock_repo.get_all.return_value = []
|
||||||
|
mock_repo_cls.return_value = mock_repo
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/evidence-distribution")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["total"] == 0
|
||||||
|
assert data["four_eyes_pending"] == 0
|
||||||
|
assert data["by_confidence"] == {"E0": 0, "E1": 0, "E2": 0, "E3": 0, "E4": 0}
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
def test_counts_by_confidence_level(self, mock_repo_cls):
|
||||||
|
evidence = [
|
||||||
|
make_evidence("E0"),
|
||||||
|
make_evidence("E1"),
|
||||||
|
make_evidence("E1"),
|
||||||
|
make_evidence("E2"),
|
||||||
|
make_evidence("E3"),
|
||||||
|
make_evidence("E3"),
|
||||||
|
make_evidence("E3"),
|
||||||
|
make_evidence("E4"),
|
||||||
|
]
|
||||||
|
mock_repo = MagicMock()
|
||||||
|
mock_repo.get_all.return_value = evidence
|
||||||
|
mock_repo_cls.return_value = mock_repo
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/evidence-distribution")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["total"] == 8
|
||||||
|
assert data["by_confidence"]["E0"] == 1
|
||||||
|
assert data["by_confidence"]["E1"] == 2
|
||||||
|
assert data["by_confidence"]["E2"] == 1
|
||||||
|
assert data["by_confidence"]["E3"] == 3
|
||||||
|
assert data["by_confidence"]["E4"] == 1
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
def test_four_eyes_pending_count(self, mock_repo_cls):
|
||||||
|
evidence = [
|
||||||
|
make_evidence("E1", requires_four_eyes=True, approval_status="pending_first"),
|
||||||
|
make_evidence("E2", requires_four_eyes=True, approval_status="first_approved"),
|
||||||
|
make_evidence("E2", requires_four_eyes=True, approval_status="approved"),
|
||||||
|
make_evidence("E1", requires_four_eyes=True, approval_status="rejected"),
|
||||||
|
make_evidence("E1", requires_four_eyes=False, approval_status="none"),
|
||||||
|
]
|
||||||
|
mock_repo = MagicMock()
|
||||||
|
mock_repo.get_all.return_value = evidence
|
||||||
|
mock_repo_cls.return_value = mock_repo
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/evidence-distribution")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
# pending_first and first_approved are pending; approved and rejected are not
|
||||||
|
assert data["four_eyes_pending"] == 2
|
||||||
|
assert data["total"] == 5
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
def test_null_confidence_defaults_to_e1(self, mock_repo_cls):
|
||||||
|
e = MagicMock()
|
||||||
|
e.confidence_level = None
|
||||||
|
e.requires_four_eyes = False
|
||||||
|
e.approval_status = "none"
|
||||||
|
|
||||||
|
mock_repo = MagicMock()
|
||||||
|
mock_repo.get_all.return_value = [e]
|
||||||
|
mock_repo_cls.return_value = mock_repo
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/evidence-distribution")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["by_confidence"]["E1"] == 1
|
||||||
|
assert data["total"] == 1
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
def test_all_four_eyes_approved_zero_pending(self, mock_repo_cls):
|
||||||
|
evidence = [
|
||||||
|
make_evidence("E2", requires_four_eyes=True, approval_status="approved"),
|
||||||
|
make_evidence("E3", requires_four_eyes=True, approval_status="approved"),
|
||||||
|
]
|
||||||
|
mock_repo = MagicMock()
|
||||||
|
mock_repo.get_all.return_value = evidence
|
||||||
|
mock_repo_cls.return_value = mock_repo
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/evidence-distribution")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["four_eyes_pending"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# 2. TestDashboardMultiScore
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestDashboardMultiScore:
|
||||||
|
"""Test that dashboard response includes multi_score."""
|
||||||
|
|
||||||
|
def test_dashboard_response_schema_includes_multi_score(self):
|
||||||
|
"""DashboardResponse schema must include the multi_score field."""
|
||||||
|
from compliance.api.schemas import DashboardResponse
|
||||||
|
fields = DashboardResponse.model_fields
|
||||||
|
assert "multi_score" in fields, "DashboardResponse must have multi_score field"
|
||||||
|
|
||||||
|
def test_multi_score_schema_has_required_fields(self):
|
||||||
|
"""MultiDimensionalScore schema should have all 7 fields."""
|
||||||
|
from compliance.api.schemas import MultiDimensionalScore
|
||||||
|
fields = MultiDimensionalScore.model_fields
|
||||||
|
required = [
|
||||||
|
"requirement_coverage",
|
||||||
|
"evidence_strength",
|
||||||
|
"validation_quality",
|
||||||
|
"evidence_freshness",
|
||||||
|
"control_effectiveness",
|
||||||
|
"overall_readiness",
|
||||||
|
"hard_blocks",
|
||||||
|
]
|
||||||
|
for field in required:
|
||||||
|
assert field in fields, f"Missing field: {field}"
|
||||||
|
|
||||||
|
def test_multi_score_default_values(self):
|
||||||
|
"""MultiDimensionalScore defaults should be sensible."""
|
||||||
|
from compliance.api.schemas import MultiDimensionalScore
|
||||||
|
score = MultiDimensionalScore()
|
||||||
|
assert score.overall_readiness == 0.0
|
||||||
|
assert score.hard_blocks == []
|
||||||
|
assert score.requirement_coverage == 0.0
|
||||||
277
backend-compliance/tests/test_anti_fake_evidence_phase4.py
Normal file
277
backend-compliance/tests/test_anti_fake_evidence_phase4.py
Normal file
@@ -0,0 +1,277 @@
|
|||||||
|
"""Tests for Anti-Fake-Evidence Phase 4a: Traceability Matrix.
|
||||||
|
|
||||||
|
6 tests covering:
|
||||||
|
- Empty DB returns empty controls + zero summary
|
||||||
|
- Nested structure: Control → Evidence → Assertions
|
||||||
|
- Assertions appear under correct evidence
|
||||||
|
- Coverage flags computed correctly
|
||||||
|
- Control without evidence has correct coverage
|
||||||
|
- Summary counts match
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.testclient import TestClient
|
||||||
|
|
||||||
|
from compliance.api.dashboard_routes import router as dashboard_router
|
||||||
|
from classroom_engine.database import get_db
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# App setup with mocked DB dependency
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
app.include_router(dashboard_router)
|
||||||
|
|
||||||
|
mock_db = MagicMock()
|
||||||
|
|
||||||
|
|
||||||
|
def override_get_db():
|
||||||
|
yield mock_db
|
||||||
|
|
||||||
|
|
||||||
|
app.dependency_overrides[get_db] = override_get_db
|
||||||
|
client = TestClient(app)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def make_control(id="c1", control_id="CTRL-001", title="Test Control", status="pass", domain="gov"):
|
||||||
|
ctrl = MagicMock()
|
||||||
|
ctrl.id = id
|
||||||
|
ctrl.control_id = control_id
|
||||||
|
ctrl.title = title
|
||||||
|
ctrl.status = MagicMock()
|
||||||
|
ctrl.status.value = status
|
||||||
|
ctrl.domain = MagicMock()
|
||||||
|
ctrl.domain.value = domain
|
||||||
|
return ctrl
|
||||||
|
|
||||||
|
|
||||||
|
def make_evidence(id="e1", control_id="c1", title="Evidence 1", evidence_type="scan_report",
|
||||||
|
confidence="E2", status="valid"):
|
||||||
|
e = MagicMock()
|
||||||
|
e.id = id
|
||||||
|
e.control_id = control_id
|
||||||
|
e.title = title
|
||||||
|
e.evidence_type = evidence_type
|
||||||
|
e.confidence_level = MagicMock()
|
||||||
|
e.confidence_level.value = confidence
|
||||||
|
e.status = MagicMock()
|
||||||
|
e.status.value = status
|
||||||
|
return e
|
||||||
|
|
||||||
|
|
||||||
|
def make_assertion(id="a1", entity_id="e1", sentence_text="System encrypts data at rest.",
|
||||||
|
assertion_type="assertion", confidence=0.85, verified_by=None):
|
||||||
|
a = MagicMock()
|
||||||
|
a.id = id
|
||||||
|
a.entity_id = entity_id
|
||||||
|
a.sentence_text = sentence_text
|
||||||
|
a.assertion_type = assertion_type
|
||||||
|
a.confidence = confidence
|
||||||
|
a.verified_by = verified_by
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
# ===========================================================================
|
||||||
|
# Tests
|
||||||
|
# ===========================================================================
|
||||||
|
|
||||||
|
class TestTraceabilityMatrix:
|
||||||
|
"""Test GET /dashboard/traceability-matrix endpoint."""
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
@patch("compliance.api.dashboard_routes.ControlRepository")
|
||||||
|
def test_empty_db_returns_empty_matrix(self, mock_ctrl_cls, mock_ev_cls):
|
||||||
|
"""Empty DB should return zero controls and zero summary counts."""
|
||||||
|
mock_ctrl = MagicMock()
|
||||||
|
mock_ctrl.get_all.return_value = []
|
||||||
|
mock_ctrl_cls.return_value = mock_ctrl
|
||||||
|
|
||||||
|
mock_ev = MagicMock()
|
||||||
|
mock_ev.get_all.return_value = []
|
||||||
|
mock_ev_cls.return_value = mock_ev
|
||||||
|
|
||||||
|
# Mock db.query(AssertionDB).filter(...).all()
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.filter.return_value.all.return_value = []
|
||||||
|
mock_db.query.return_value = mock_query
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/traceability-matrix")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
assert data["controls"] == []
|
||||||
|
assert data["summary"]["total_controls"] == 0
|
||||||
|
assert data["summary"]["covered_controls"] == 0
|
||||||
|
assert data["summary"]["fully_verified"] == 0
|
||||||
|
assert data["summary"]["uncovered_controls"] == 0
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
@patch("compliance.api.dashboard_routes.ControlRepository")
|
||||||
|
def test_nested_structure(self, mock_ctrl_cls, mock_ev_cls):
|
||||||
|
"""Control with evidence and assertions should return nested structure."""
|
||||||
|
ctrl = make_control(id="c1", control_id="PRIV-001", title="Privacy Control")
|
||||||
|
ev = make_evidence(id="e1", control_id="c1", confidence="E3")
|
||||||
|
assertion = make_assertion(id="a1", entity_id="e1", verified_by="auditor@example.com")
|
||||||
|
|
||||||
|
mock_ctrl = MagicMock()
|
||||||
|
mock_ctrl.get_all.return_value = [ctrl]
|
||||||
|
mock_ctrl_cls.return_value = mock_ctrl
|
||||||
|
|
||||||
|
mock_ev = MagicMock()
|
||||||
|
mock_ev.get_all.return_value = [ev]
|
||||||
|
mock_ev_cls.return_value = mock_ev
|
||||||
|
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.filter.return_value.all.return_value = [assertion]
|
||||||
|
mock_db.query.return_value = mock_query
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/traceability-matrix")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
|
||||||
|
assert len(data["controls"]) == 1
|
||||||
|
c = data["controls"][0]
|
||||||
|
assert c["control_id"] == "PRIV-001"
|
||||||
|
assert len(c["evidence"]) == 1
|
||||||
|
assert c["evidence"][0]["confidence_level"] == "E3"
|
||||||
|
assert len(c["evidence"][0]["assertions"]) == 1
|
||||||
|
assert c["evidence"][0]["assertions"][0]["verified"] is True
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
@patch("compliance.api.dashboard_routes.ControlRepository")
|
||||||
|
def test_assertions_grouped_under_correct_evidence(self, mock_ctrl_cls, mock_ev_cls):
|
||||||
|
"""Assertions should only appear under the evidence they reference."""
|
||||||
|
ctrl = make_control(id="c1")
|
||||||
|
ev1 = make_evidence(id="e1", control_id="c1", title="Evidence A")
|
||||||
|
ev2 = make_evidence(id="e2", control_id="c1", title="Evidence B")
|
||||||
|
a1 = make_assertion(id="a1", entity_id="e1", sentence_text="Assertion for E1")
|
||||||
|
a2 = make_assertion(id="a2", entity_id="e2", sentence_text="Assertion for E2")
|
||||||
|
a3 = make_assertion(id="a3", entity_id="e2", sentence_text="Second assertion for E2")
|
||||||
|
|
||||||
|
mock_ctrl = MagicMock()
|
||||||
|
mock_ctrl.get_all.return_value = [ctrl]
|
||||||
|
mock_ctrl_cls.return_value = mock_ctrl
|
||||||
|
|
||||||
|
mock_ev = MagicMock()
|
||||||
|
mock_ev.get_all.return_value = [ev1, ev2]
|
||||||
|
mock_ev_cls.return_value = mock_ev
|
||||||
|
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.filter.return_value.all.return_value = [a1, a2, a3]
|
||||||
|
mock_db.query.return_value = mock_query
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/traceability-matrix")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
data = resp.json()
|
||||||
|
|
||||||
|
c = data["controls"][0]
|
||||||
|
ev1_data = next(e for e in c["evidence"] if e["id"] == "e1")
|
||||||
|
ev2_data = next(e for e in c["evidence"] if e["id"] == "e2")
|
||||||
|
assert len(ev1_data["assertions"]) == 1
|
||||||
|
assert len(ev2_data["assertions"]) == 2
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
@patch("compliance.api.dashboard_routes.ControlRepository")
|
||||||
|
def test_coverage_flags_correct(self, mock_ctrl_cls, mock_ev_cls):
|
||||||
|
"""Coverage flags should reflect evidence, assertions, and verification state."""
|
||||||
|
ctrl = make_control(id="c1")
|
||||||
|
ev = make_evidence(id="e1", control_id="c1", confidence="E2")
|
||||||
|
# One verified, one not
|
||||||
|
a1 = make_assertion(id="a1", entity_id="e1", verified_by="alice")
|
||||||
|
a2 = make_assertion(id="a2", entity_id="e1", verified_by=None)
|
||||||
|
|
||||||
|
mock_ctrl = MagicMock()
|
||||||
|
mock_ctrl.get_all.return_value = [ctrl]
|
||||||
|
mock_ctrl_cls.return_value = mock_ctrl
|
||||||
|
|
||||||
|
mock_ev = MagicMock()
|
||||||
|
mock_ev.get_all.return_value = [ev]
|
||||||
|
mock_ev_cls.return_value = mock_ev
|
||||||
|
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.filter.return_value.all.return_value = [a1, a2]
|
||||||
|
mock_db.query.return_value = mock_query
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/traceability-matrix")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
|
||||||
|
cov = resp.json()["controls"][0]["coverage"]
|
||||||
|
assert cov["has_evidence"] is True
|
||||||
|
assert cov["has_assertions"] is True
|
||||||
|
assert cov["all_assertions_verified"] is False # a2 not verified
|
||||||
|
assert cov["min_confidence_level"] == "E2"
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
@patch("compliance.api.dashboard_routes.ControlRepository")
|
||||||
|
def test_coverage_without_evidence(self, mock_ctrl_cls, mock_ev_cls):
|
||||||
|
"""Control with no evidence should have all coverage flags False/None."""
|
||||||
|
ctrl = make_control(id="c1")
|
||||||
|
|
||||||
|
mock_ctrl = MagicMock()
|
||||||
|
mock_ctrl.get_all.return_value = [ctrl]
|
||||||
|
mock_ctrl_cls.return_value = mock_ctrl
|
||||||
|
|
||||||
|
mock_ev = MagicMock()
|
||||||
|
mock_ev.get_all.return_value = []
|
||||||
|
mock_ev_cls.return_value = mock_ev
|
||||||
|
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.filter.return_value.all.return_value = []
|
||||||
|
mock_db.query.return_value = mock_query
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/traceability-matrix")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
|
||||||
|
cov = resp.json()["controls"][0]["coverage"]
|
||||||
|
assert cov["has_evidence"] is False
|
||||||
|
assert cov["has_assertions"] is False
|
||||||
|
assert cov["all_assertions_verified"] is False
|
||||||
|
assert cov["min_confidence_level"] is None
|
||||||
|
|
||||||
|
@patch("compliance.api.dashboard_routes.EvidenceRepository")
|
||||||
|
@patch("compliance.api.dashboard_routes.ControlRepository")
|
||||||
|
def test_summary_counts(self, mock_ctrl_cls, mock_ev_cls):
|
||||||
|
"""Summary should count total, covered, fully verified, and uncovered controls."""
|
||||||
|
# c1: has evidence + verified assertions → fully verified
|
||||||
|
# c2: has evidence but no assertions → covered, not fully verified
|
||||||
|
# c3: no evidence → uncovered
|
||||||
|
c1 = make_control(id="c1", control_id="C-001")
|
||||||
|
c2 = make_control(id="c2", control_id="C-002")
|
||||||
|
c3 = make_control(id="c3", control_id="C-003")
|
||||||
|
|
||||||
|
ev1 = make_evidence(id="e1", control_id="c1", confidence="E3")
|
||||||
|
ev2 = make_evidence(id="e2", control_id="c2", confidence="E1")
|
||||||
|
|
||||||
|
a1 = make_assertion(id="a1", entity_id="e1", verified_by="auditor")
|
||||||
|
|
||||||
|
mock_ctrl = MagicMock()
|
||||||
|
mock_ctrl.get_all.return_value = [c1, c2, c3]
|
||||||
|
mock_ctrl_cls.return_value = mock_ctrl
|
||||||
|
|
||||||
|
mock_ev = MagicMock()
|
||||||
|
mock_ev.get_all.return_value = [ev1, ev2]
|
||||||
|
mock_ev_cls.return_value = mock_ev
|
||||||
|
|
||||||
|
mock_db.reset_mock()
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.filter.return_value.all.return_value = [a1]
|
||||||
|
mock_db.query.return_value = mock_query
|
||||||
|
|
||||||
|
resp = client.get("/dashboard/traceability-matrix")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
|
||||||
|
summary = resp.json()["summary"]
|
||||||
|
assert summary["total_controls"] == 3
|
||||||
|
assert summary["covered_controls"] == 2
|
||||||
|
assert summary["fully_verified"] == 1
|
||||||
|
assert summary["uncovered_controls"] == 1
|
||||||
@@ -61,6 +61,7 @@ def make_control(overrides=None):
|
|||||||
c.status = MagicMock()
|
c.status = MagicMock()
|
||||||
c.status.value = "planned"
|
c.status.value = "planned"
|
||||||
c.status_notes = None
|
c.status_notes = None
|
||||||
|
c.status_justification = None
|
||||||
c.last_reviewed_at = None
|
c.last_reviewed_at = None
|
||||||
c.next_review_at = None
|
c.next_review_at = None
|
||||||
c.created_at = NOW
|
c.created_at = NOW
|
||||||
@@ -249,15 +250,15 @@ class TestUpdateControl:
|
|||||||
assert response.status_code == 404
|
assert response.status_code == 404
|
||||||
|
|
||||||
def test_update_status_with_valid_enum(self):
|
def test_update_status_with_valid_enum(self):
|
||||||
"""Status must be a valid ControlStatusEnum value."""
|
"""Status must be a valid ControlStatusEnum value (planned → in_progress is always allowed)."""
|
||||||
updated = make_control()
|
updated = make_control()
|
||||||
updated.status.value = "pass"
|
updated.status.value = "in_progress"
|
||||||
with patch("compliance.api.routes.ControlRepository") as MockRepo:
|
with patch("compliance.api.routes.ControlRepository") as MockRepo:
|
||||||
MockRepo.return_value.get_by_control_id.return_value = make_control()
|
MockRepo.return_value.get_by_control_id.return_value = make_control()
|
||||||
MockRepo.return_value.update.return_value = updated
|
MockRepo.return_value.update.return_value = updated
|
||||||
response = client.put(
|
response = client.put(
|
||||||
"/compliance/controls/GOV-001",
|
"/compliance/controls/GOV-001",
|
||||||
json={"status": "pass"},
|
json={"status": "in_progress"},
|
||||||
)
|
)
|
||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
|
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ from compliance.services.decomposition_pass import (
|
|||||||
_validate_atomic_control,
|
_validate_atomic_control,
|
||||||
_PATTERN_CANDIDATES_MAP,
|
_PATTERN_CANDIDATES_MAP,
|
||||||
_PATTERN_CANDIDATES_BY_ACTION,
|
_PATTERN_CANDIDATES_BY_ACTION,
|
||||||
|
_is_composite_obligation,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -1049,6 +1050,123 @@ class TestOutputValidator:
|
|||||||
issues = _validate_atomic_control(ac, "implement", "policy")
|
issues = _validate_atomic_control(ac, "implement", "policy")
|
||||||
assert not any("raw infinitive" in i for i in issues)
|
assert not any("raw infinitive" in i for i in issues)
|
||||||
|
|
||||||
|
def test_composite_obligation_warns(self):
|
||||||
|
"""Composite obligations produce a WARN in validation."""
|
||||||
|
ac = AtomicControlCandidate(
|
||||||
|
title="CCM-Praktiken", objective="x",
|
||||||
|
test_procedure=["tp"], evidence=["ev"],
|
||||||
|
)
|
||||||
|
ac._is_composite = True # type: ignore[attr-defined]
|
||||||
|
issues = _validate_atomic_control(ac, "implement", "policy")
|
||||||
|
assert any("composite" in i for i in issues)
|
||||||
|
|
||||||
|
def test_non_composite_no_warn(self):
|
||||||
|
"""Non-composite obligations do NOT produce composite WARN."""
|
||||||
|
ac = AtomicControlCandidate(
|
||||||
|
title="MFA", objective="x",
|
||||||
|
test_procedure=["tp"], evidence=["ev"],
|
||||||
|
)
|
||||||
|
ac._is_composite = False # type: ignore[attr-defined]
|
||||||
|
issues = _validate_atomic_control(ac, "implement", "technical_control")
|
||||||
|
assert not any("composite" in i for i in issues)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# COMPOSITE / FRAMEWORK DETECTION TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestCompositeDetection:
|
||||||
|
"""Tests for _is_composite_obligation()."""
|
||||||
|
|
||||||
|
def test_ccm_praktiken_detected(self):
|
||||||
|
"""'CCM-Praktiken für AIS implementieren' is composite."""
|
||||||
|
assert _is_composite_obligation(
|
||||||
|
"CCM-Praktiken für AIS implementieren", "CCM-Praktiken"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_kontrollen_gemaess_nist(self):
|
||||||
|
"""'Kontrollen gemäß NIST umsetzen' is composite."""
|
||||||
|
assert _is_composite_obligation(
|
||||||
|
"Kontrollen gemäß NIST SP 800-53 umsetzen", "Kontrollen"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_iso_27001_referenced(self):
|
||||||
|
"""ISO 27001 reference in object triggers composite."""
|
||||||
|
assert _is_composite_obligation(
|
||||||
|
"Maßnahmen umsetzen", "ISO 27001 Anhang A"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_owasp_framework(self):
|
||||||
|
"""OWASP reference triggers composite."""
|
||||||
|
assert _is_composite_obligation(
|
||||||
|
"OWASP Top 10 Maßnahmen implementieren", "Sicherheitsmaßnahmen"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_bsi_grundschutz(self):
|
||||||
|
"""BSI reference triggers composite."""
|
||||||
|
assert _is_composite_obligation(
|
||||||
|
"BSI-Grundschutz-Kompendium anwenden", "IT-Grundschutz"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_anforderungen_gemaess(self):
|
||||||
|
"""'Anforderungen gemäß X' is composite."""
|
||||||
|
assert _is_composite_obligation(
|
||||||
|
"Anforderungen gemäß EU AI Act umsetzen", "Anforderungen"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_simple_mfa_not_composite(self):
|
||||||
|
"""'MFA implementieren' is atomic, not composite."""
|
||||||
|
assert not _is_composite_obligation(
|
||||||
|
"Multi-Faktor-Authentifizierung implementieren", "MFA"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_simple_policy_not_composite(self):
|
||||||
|
"""'Sicherheitsrichtlinie dokumentieren' is atomic."""
|
||||||
|
assert not _is_composite_obligation(
|
||||||
|
"Eine Sicherheitsrichtlinie dokumentieren und pflegen",
|
||||||
|
"Sicherheitsrichtlinie",
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_encryption_not_composite(self):
|
||||||
|
"""'Daten verschlüsseln' is atomic."""
|
||||||
|
assert not _is_composite_obligation(
|
||||||
|
"Personenbezogene Daten bei der Übertragung verschlüsseln",
|
||||||
|
"Personenbezogene Daten",
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_composite_flags_on_atomic(self):
|
||||||
|
"""_compose_deterministic sets composite flags on the atomic."""
|
||||||
|
atomic = _compose_deterministic(
|
||||||
|
obligation_text="CCM-Praktiken für AIS implementieren",
|
||||||
|
action="implementieren",
|
||||||
|
object_="CCM-Praktiken",
|
||||||
|
parent_title="AI System Controls",
|
||||||
|
parent_severity="high",
|
||||||
|
parent_category="security",
|
||||||
|
is_test=False,
|
||||||
|
is_reporting=False,
|
||||||
|
)
|
||||||
|
assert atomic._is_composite is True # type: ignore[attr-defined]
|
||||||
|
assert atomic._atomicity == "composite" # type: ignore[attr-defined]
|
||||||
|
assert atomic._requires_decomposition is True # type: ignore[attr-defined]
|
||||||
|
|
||||||
|
def test_non_composite_flags_on_atomic(self):
|
||||||
|
"""_compose_deterministic sets atomic flags for non-composite."""
|
||||||
|
atomic = _compose_deterministic(
|
||||||
|
obligation_text="MFA implementieren",
|
||||||
|
action="implementieren",
|
||||||
|
object_="MFA",
|
||||||
|
parent_title="Access Control",
|
||||||
|
parent_severity="high",
|
||||||
|
parent_category="security",
|
||||||
|
is_test=False,
|
||||||
|
is_reporting=False,
|
||||||
|
)
|
||||||
|
assert atomic._is_composite is False # type: ignore[attr-defined]
|
||||||
|
assert atomic._atomicity == "atomic" # type: ignore[attr-defined]
|
||||||
|
assert atomic._requires_decomposition is False # type: ignore[attr-defined]
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# PROMPT BUILDER TESTS
|
# PROMPT BUILDER TESTS
|
||||||
|
|||||||
@@ -56,6 +56,22 @@ def make_evidence(overrides=None):
|
|||||||
e.valid_until = None
|
e.valid_until = None
|
||||||
e.collected_at = NOW
|
e.collected_at = NOW
|
||||||
e.created_at = NOW
|
e.created_at = NOW
|
||||||
|
# Anti-Fake-Evidence fields
|
||||||
|
e.confidence_level = MagicMock()
|
||||||
|
e.confidence_level.value = "E1"
|
||||||
|
e.truth_status = MagicMock()
|
||||||
|
e.truth_status.value = "uploaded"
|
||||||
|
e.generation_mode = None
|
||||||
|
e.may_be_used_as_evidence = True
|
||||||
|
e.reviewed_by = None
|
||||||
|
e.reviewed_at = None
|
||||||
|
# Phase 2 fields
|
||||||
|
e.approval_status = "none"
|
||||||
|
e.first_reviewer = None
|
||||||
|
e.first_reviewed_at = None
|
||||||
|
e.second_reviewer = None
|
||||||
|
e.second_reviewed_at = None
|
||||||
|
e.requires_four_eyes = False
|
||||||
if overrides:
|
if overrides:
|
||||||
for k, v in overrides.items():
|
for k, v in overrides.items():
|
||||||
setattr(e, k, v)
|
setattr(e, k, v)
|
||||||
|
|||||||
453
backend-compliance/tests/test_framework_decomposition.py
Normal file
453
backend-compliance/tests/test_framework_decomposition.py
Normal file
@@ -0,0 +1,453 @@
|
|||||||
|
"""Tests for Framework Decomposition Engine.
|
||||||
|
|
||||||
|
Covers:
|
||||||
|
- Registry loading
|
||||||
|
- Routing classification (atomic / compound / framework_container)
|
||||||
|
- Framework + domain matching
|
||||||
|
- Subcontrol selection
|
||||||
|
- Decomposition into sub-obligations
|
||||||
|
- Quality rules (warnings, errors)
|
||||||
|
- Inference helpers
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from compliance.services.framework_decomposition import (
|
||||||
|
classify_routing,
|
||||||
|
decompose_framework_container,
|
||||||
|
get_registry,
|
||||||
|
registry_stats,
|
||||||
|
reload_registry,
|
||||||
|
DecomposedObligation,
|
||||||
|
FrameworkDecompositionResult,
|
||||||
|
RoutingResult,
|
||||||
|
_detect_framework,
|
||||||
|
_has_framework_keywords,
|
||||||
|
_infer_action,
|
||||||
|
_infer_object,
|
||||||
|
_is_compound_obligation,
|
||||||
|
_match_domain,
|
||||||
|
_select_subcontrols,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# REGISTRY TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestRegistryLoading:
|
||||||
|
|
||||||
|
def test_registry_loads_successfully(self):
|
||||||
|
reg = get_registry()
|
||||||
|
assert len(reg) >= 3
|
||||||
|
|
||||||
|
def test_nist_in_registry(self):
|
||||||
|
reg = get_registry()
|
||||||
|
assert "NIST_SP800_53" in reg
|
||||||
|
|
||||||
|
def test_owasp_asvs_in_registry(self):
|
||||||
|
reg = get_registry()
|
||||||
|
assert "OWASP_ASVS" in reg
|
||||||
|
|
||||||
|
def test_csa_ccm_in_registry(self):
|
||||||
|
reg = get_registry()
|
||||||
|
assert "CSA_CCM" in reg
|
||||||
|
|
||||||
|
def test_nist_has_domains(self):
|
||||||
|
reg = get_registry()
|
||||||
|
nist = reg["NIST_SP800_53"]
|
||||||
|
assert len(nist["domains"]) >= 5
|
||||||
|
|
||||||
|
def test_nist_ac_has_subcontrols(self):
|
||||||
|
reg = get_registry()
|
||||||
|
nist = reg["NIST_SP800_53"]
|
||||||
|
ac = next(d for d in nist["domains"] if d["domain_id"] == "AC")
|
||||||
|
assert len(ac["subcontrols"]) >= 5
|
||||||
|
|
||||||
|
def test_registry_stats(self):
|
||||||
|
stats = registry_stats()
|
||||||
|
assert stats["frameworks"] >= 3
|
||||||
|
assert stats["total_domains"] >= 10
|
||||||
|
assert stats["total_subcontrols"] >= 30
|
||||||
|
|
||||||
|
def test_reload_registry(self):
|
||||||
|
reg = reload_registry()
|
||||||
|
assert len(reg) >= 3
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# ROUTING TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestClassifyRouting:
|
||||||
|
|
||||||
|
def test_atomic_simple_obligation(self):
|
||||||
|
result = classify_routing(
|
||||||
|
obligation_text="Multi-Faktor-Authentifizierung muss implementiert werden",
|
||||||
|
action_raw="implementieren",
|
||||||
|
object_raw="MFA",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "atomic"
|
||||||
|
|
||||||
|
def test_framework_container_ccm_ais(self):
|
||||||
|
result = classify_routing(
|
||||||
|
obligation_text="Die CCM-Praktiken fuer Application and Interface Security (AIS) muessen implementiert werden",
|
||||||
|
action_raw="implementieren",
|
||||||
|
object_raw="CCM-Praktiken fuer AIS",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "framework_container"
|
||||||
|
assert result.framework_ref == "CSA_CCM"
|
||||||
|
assert result.framework_domain == "AIS"
|
||||||
|
|
||||||
|
def test_framework_container_nist_800_53(self):
|
||||||
|
result = classify_routing(
|
||||||
|
obligation_text="Kontrollen gemaess NIST SP 800-53 umsetzen",
|
||||||
|
action_raw="umsetzen",
|
||||||
|
object_raw="Kontrollen gemaess NIST SP 800-53",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "framework_container"
|
||||||
|
assert result.framework_ref == "NIST_SP800_53"
|
||||||
|
|
||||||
|
def test_framework_container_owasp_asvs(self):
|
||||||
|
result = classify_routing(
|
||||||
|
obligation_text="OWASP ASVS Anforderungen muessen implementiert werden",
|
||||||
|
action_raw="implementieren",
|
||||||
|
object_raw="OWASP ASVS Anforderungen",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "framework_container"
|
||||||
|
assert result.framework_ref == "OWASP_ASVS"
|
||||||
|
|
||||||
|
def test_compound_obligation(self):
|
||||||
|
result = classify_routing(
|
||||||
|
obligation_text="Richtlinie erstellen und Schulungen durchfuehren",
|
||||||
|
action_raw="erstellen und durchfuehren",
|
||||||
|
object_raw="Richtlinie",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "compound"
|
||||||
|
|
||||||
|
def test_no_split_phrase_not_compound(self):
|
||||||
|
result = classify_routing(
|
||||||
|
obligation_text="Richtlinie dokumentieren und pflegen",
|
||||||
|
action_raw="dokumentieren und pflegen",
|
||||||
|
object_raw="Richtlinie",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "atomic"
|
||||||
|
|
||||||
|
def test_framework_keywords_in_object(self):
|
||||||
|
result = classify_routing(
|
||||||
|
obligation_text="Massnahmen umsetzen",
|
||||||
|
action_raw="umsetzen",
|
||||||
|
object_raw="Framework-Praktiken und Kontrollen",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "framework_container"
|
||||||
|
|
||||||
|
def test_bsi_grundschutz_detected(self):
|
||||||
|
result = classify_routing(
|
||||||
|
obligation_text="BSI IT-Grundschutz Massnahmen umsetzen",
|
||||||
|
action_raw="umsetzen",
|
||||||
|
object_raw="BSI IT-Grundschutz Massnahmen",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "framework_container"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# FRAMEWORK DETECTION TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestFrameworkDetection:
|
||||||
|
|
||||||
|
def test_detect_csa_ccm_with_domain(self):
|
||||||
|
result = _detect_framework(
|
||||||
|
"CCM-Praktiken fuer AIS implementieren",
|
||||||
|
"CCM-Praktiken",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "framework_container"
|
||||||
|
assert result.framework_ref == "CSA_CCM"
|
||||||
|
assert result.framework_domain == "AIS"
|
||||||
|
|
||||||
|
def test_detect_nist_without_domain(self):
|
||||||
|
result = _detect_framework(
|
||||||
|
"NIST SP 800-53 Kontrollen implementieren",
|
||||||
|
"Kontrollen",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "framework_container"
|
||||||
|
assert result.framework_ref == "NIST_SP800_53"
|
||||||
|
|
||||||
|
def test_no_framework_in_simple_text(self):
|
||||||
|
result = _detect_framework(
|
||||||
|
"Passwortrichtlinie dokumentieren",
|
||||||
|
"Passwortrichtlinie",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "atomic"
|
||||||
|
|
||||||
|
def test_csa_ccm_iam_domain(self):
|
||||||
|
result = _detect_framework(
|
||||||
|
"CSA CCM Identity and Access Management Kontrollen",
|
||||||
|
"IAM-Kontrollen",
|
||||||
|
)
|
||||||
|
assert result.routing_type == "framework_container"
|
||||||
|
assert result.framework_ref == "CSA_CCM"
|
||||||
|
assert result.framework_domain == "IAM"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# DOMAIN MATCHING TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestDomainMatching:
|
||||||
|
|
||||||
|
def test_match_ais_by_id(self):
|
||||||
|
reg = get_registry()
|
||||||
|
ccm = reg["CSA_CCM"]
|
||||||
|
domain_id, title = _match_domain("AIS-Kontrollen implementieren", ccm)
|
||||||
|
assert domain_id == "AIS"
|
||||||
|
|
||||||
|
def test_match_by_full_title(self):
|
||||||
|
reg = get_registry()
|
||||||
|
ccm = reg["CSA_CCM"]
|
||||||
|
domain_id, title = _match_domain(
|
||||||
|
"Application and Interface Security Massnahmen", ccm,
|
||||||
|
)
|
||||||
|
assert domain_id == "AIS"
|
||||||
|
|
||||||
|
def test_match_nist_incident_response(self):
|
||||||
|
reg = get_registry()
|
||||||
|
nist = reg["NIST_SP800_53"]
|
||||||
|
domain_id, title = _match_domain(
|
||||||
|
"Vorfallreaktionsverfahren gemaess NIST IR", nist,
|
||||||
|
)
|
||||||
|
assert domain_id == "IR"
|
||||||
|
|
||||||
|
def test_no_match_generic_text(self):
|
||||||
|
reg = get_registry()
|
||||||
|
nist = reg["NIST_SP800_53"]
|
||||||
|
domain_id, title = _match_domain("etwas Allgemeines", nist)
|
||||||
|
assert domain_id is None
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# SUBCONTROL SELECTION TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestSubcontrolSelection:
|
||||||
|
|
||||||
|
def test_keyword_based_selection(self):
|
||||||
|
subcontrols = [
|
||||||
|
{"subcontrol_id": "SC-1", "title": "X", "keywords": ["api", "schnittstelle"], "object_hint": ""},
|
||||||
|
{"subcontrol_id": "SC-2", "title": "Y", "keywords": ["backup", "sicherung"], "object_hint": ""},
|
||||||
|
]
|
||||||
|
selected = _select_subcontrols("API-Schnittstellen schuetzen", subcontrols)
|
||||||
|
assert len(selected) == 1
|
||||||
|
assert selected[0]["subcontrol_id"] == "SC-1"
|
||||||
|
|
||||||
|
def test_no_keyword_match_returns_empty(self):
|
||||||
|
subcontrols = [
|
||||||
|
{"subcontrol_id": "SC-1", "keywords": ["backup"], "title": "Backup", "object_hint": ""},
|
||||||
|
]
|
||||||
|
selected = _select_subcontrols("Passwort aendern", subcontrols)
|
||||||
|
assert selected == []
|
||||||
|
|
||||||
|
def test_title_match_boosts_score(self):
|
||||||
|
subcontrols = [
|
||||||
|
{"subcontrol_id": "SC-1", "title": "Password Security", "keywords": ["passwort"], "object_hint": ""},
|
||||||
|
{"subcontrol_id": "SC-2", "title": "Network Security", "keywords": ["netzwerk"], "object_hint": ""},
|
||||||
|
]
|
||||||
|
selected = _select_subcontrols("Password Security muss implementiert werden", subcontrols)
|
||||||
|
assert len(selected) >= 1
|
||||||
|
assert selected[0]["subcontrol_id"] == "SC-1"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# DECOMPOSITION TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestDecomposeFrameworkContainer:
|
||||||
|
|
||||||
|
def test_decompose_ccm_ais(self):
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id="OBL-001",
|
||||||
|
parent_control_id="COMP-001",
|
||||||
|
obligation_text="Die CCM-Praktiken fuer AIS muessen implementiert werden",
|
||||||
|
framework_ref="CSA_CCM",
|
||||||
|
framework_domain="AIS",
|
||||||
|
)
|
||||||
|
assert result.release_state == "decomposed"
|
||||||
|
assert result.framework_ref == "CSA_CCM"
|
||||||
|
assert result.framework_domain == "AIS"
|
||||||
|
assert len(result.decomposed_obligations) >= 3
|
||||||
|
assert len(result.matched_subcontrols) >= 3
|
||||||
|
|
||||||
|
def test_decomposed_obligations_have_ids(self):
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id="OBL-001",
|
||||||
|
parent_control_id="COMP-001",
|
||||||
|
obligation_text="CCM-Praktiken fuer AIS",
|
||||||
|
framework_ref="CSA_CCM",
|
||||||
|
framework_domain="AIS",
|
||||||
|
)
|
||||||
|
for d in result.decomposed_obligations:
|
||||||
|
assert d.obligation_candidate_id.startswith("OBL-001-AIS-")
|
||||||
|
assert d.parent_control_id == "COMP-001"
|
||||||
|
assert d.source_ref_law == "Cloud Security Alliance CCM v4"
|
||||||
|
assert d.routing_type == "atomic"
|
||||||
|
assert d.release_state == "decomposed"
|
||||||
|
|
||||||
|
def test_decomposed_have_action_and_object(self):
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id="OBL-002",
|
||||||
|
parent_control_id="COMP-002",
|
||||||
|
obligation_text="CSA CCM AIS Massnahmen implementieren",
|
||||||
|
framework_ref="CSA_CCM",
|
||||||
|
framework_domain="AIS",
|
||||||
|
)
|
||||||
|
for d in result.decomposed_obligations:
|
||||||
|
assert d.action_raw, f"{d.subcontrol_id} missing action_raw"
|
||||||
|
assert d.object_raw, f"{d.subcontrol_id} missing object_raw"
|
||||||
|
|
||||||
|
def test_unknown_framework_returns_unmatched(self):
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id="OBL-003",
|
||||||
|
parent_control_id="COMP-003",
|
||||||
|
obligation_text="XYZ-Framework Controls",
|
||||||
|
framework_ref="NONEXISTENT",
|
||||||
|
framework_domain="ABC",
|
||||||
|
)
|
||||||
|
assert result.release_state == "unmatched"
|
||||||
|
assert any("framework_not_matched" in i for i in result.issues)
|
||||||
|
assert len(result.decomposed_obligations) == 0
|
||||||
|
|
||||||
|
def test_unknown_domain_falls_back_to_full(self):
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id="OBL-004",
|
||||||
|
parent_control_id="COMP-004",
|
||||||
|
obligation_text="CSA CCM Kontrollen implementieren",
|
||||||
|
framework_ref="CSA_CCM",
|
||||||
|
framework_domain=None,
|
||||||
|
)
|
||||||
|
# Should still decompose (falls back to keyword match or all domains)
|
||||||
|
assert result.release_state in ("decomposed", "unmatched")
|
||||||
|
|
||||||
|
def test_nist_incident_response_decomposition(self):
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id="OBL-010",
|
||||||
|
parent_control_id="COMP-010",
|
||||||
|
obligation_text="NIST SP 800-53 Vorfallreaktionsmassnahmen implementieren",
|
||||||
|
framework_ref="NIST_SP800_53",
|
||||||
|
framework_domain="IR",
|
||||||
|
)
|
||||||
|
assert result.release_state == "decomposed"
|
||||||
|
assert len(result.decomposed_obligations) >= 3
|
||||||
|
sc_ids = [d.subcontrol_id for d in result.decomposed_obligations]
|
||||||
|
assert any("IR-" in sc for sc in sc_ids)
|
||||||
|
|
||||||
|
def test_confidence_high_with_full_match(self):
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id="OBL-005",
|
||||||
|
parent_control_id="COMP-005",
|
||||||
|
obligation_text="CSA CCM AIS",
|
||||||
|
framework_ref="CSA_CCM",
|
||||||
|
framework_domain="AIS",
|
||||||
|
)
|
||||||
|
assert result.decomposition_confidence >= 0.7
|
||||||
|
|
||||||
|
def test_confidence_low_without_framework(self):
|
||||||
|
result = decompose_framework_container(
|
||||||
|
obligation_candidate_id="OBL-006",
|
||||||
|
parent_control_id="COMP-006",
|
||||||
|
obligation_text="Unbekannte Massnahmen",
|
||||||
|
framework_ref=None,
|
||||||
|
framework_domain=None,
|
||||||
|
)
|
||||||
|
assert result.decomposition_confidence <= 0.3
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# COMPOUND DETECTION TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestCompoundDetection:
|
||||||
|
|
||||||
|
def test_compound_verb(self):
|
||||||
|
assert _is_compound_obligation(
|
||||||
|
"erstellen und schulen",
|
||||||
|
"Richtlinie erstellen und Schulungen durchfuehren",
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_no_split_phrase(self):
|
||||||
|
assert not _is_compound_obligation(
|
||||||
|
"dokumentieren und pflegen",
|
||||||
|
"Richtlinie dokumentieren und pflegen",
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_no_split_define_and_maintain(self):
|
||||||
|
assert not _is_compound_obligation(
|
||||||
|
"define and maintain",
|
||||||
|
"Define and maintain a security policy",
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_single_verb_not_compound(self):
|
||||||
|
assert not _is_compound_obligation(
|
||||||
|
"implementieren",
|
||||||
|
"MFA implementieren",
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_empty_action_not_compound(self):
|
||||||
|
assert not _is_compound_obligation("", "something")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# FRAMEWORK KEYWORD TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestFrameworkKeywords:
|
||||||
|
|
||||||
|
def test_two_keywords_detected(self):
|
||||||
|
assert _has_framework_keywords("Framework-Praktiken implementieren")
|
||||||
|
|
||||||
|
def test_single_keyword_not_enough(self):
|
||||||
|
assert not _has_framework_keywords("Praktiken implementieren")
|
||||||
|
|
||||||
|
def test_no_keywords(self):
|
||||||
|
assert not _has_framework_keywords("MFA einrichten")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# INFERENCE HELPER TESTS
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestInferAction:
|
||||||
|
|
||||||
|
def test_infer_implementieren(self):
|
||||||
|
assert _infer_action("Massnahmen muessen implementiert werden") == "implementieren"
|
||||||
|
|
||||||
|
def test_infer_dokumentieren(self):
|
||||||
|
assert _infer_action("Richtlinie muss dokumentiert werden") == "dokumentieren"
|
||||||
|
|
||||||
|
def test_infer_testen(self):
|
||||||
|
assert _infer_action("System wird getestet") == "testen"
|
||||||
|
|
||||||
|
def test_infer_ueberwachen(self):
|
||||||
|
assert _infer_action("Logs werden ueberwacht") == "ueberwachen"
|
||||||
|
|
||||||
|
def test_infer_default(self):
|
||||||
|
assert _infer_action("etwas passiert") == "implementieren"
|
||||||
|
|
||||||
|
|
||||||
|
class TestInferObject:
|
||||||
|
|
||||||
|
def test_infer_from_muessen_pattern(self):
|
||||||
|
result = _infer_object("Zugriffsrechte muessen ueberprueft werden")
|
||||||
|
assert "ueberprueft" in result or "Zugriffsrechte" in result
|
||||||
|
|
||||||
|
def test_infer_fallback(self):
|
||||||
|
result = _infer_object("Einfacher Satz ohne Modalverb")
|
||||||
|
assert len(result) > 0
|
||||||
460
docs-src/services/sdk-modules/anti-fake-evidence.md
Normal file
460
docs-src/services/sdk-modules/anti-fake-evidence.md
Normal file
@@ -0,0 +1,460 @@
|
|||||||
|
# Anti-Fake-Evidence Architektur
|
||||||
|
|
||||||
|
**Status:** Phase 2 (aktiv seit 2026-03-23)
|
||||||
|
**Prefix:** CP-AFE
|
||||||
|
**Motivation:** Delve-Vorfall (Maerz 2026) — Compliance-Theater verhindern
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
Der Delve-Vorfall zeigte, wie Compliance-Automation zur Haftungsfalle wird:
|
||||||
|
|
||||||
|
- LLM-generierte Inhalte wurden als echte Nachweise behandelt
|
||||||
|
- Controls ohne Evidence standen auf "pass"
|
||||||
|
- 100%-Compliance-Claims ohne Validierung
|
||||||
|
|
||||||
|
Die Anti-Fake-Evidence Architektur implementiert 6 Guardrails, die sicherstellen, dass nur **nachgewiesene** Compliance als solche dargestellt wird.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Evidence Confidence Levels (E0–E4)
|
||||||
|
|
||||||
|
| Level | Bezeichnung | Beschreibung | Beispiel |
|
||||||
|
|-------|-------------|--------------|----------|
|
||||||
|
| **E0** | Generated | LLM-Output, Platzhalter | KI-generierter Nachweis-Entwurf |
|
||||||
|
| **E1** | Uploaded | Manuell hochgeladen, ungeprüft | PDF ohne Reviewer |
|
||||||
|
| **E2** | Reviewed | Intern geprüft, Hash verifiziert | Dokument von Compliance-Beauftragtem bestätigt |
|
||||||
|
| **E3** | Observed | System-beobachtet (CI/CD, API) | Automatischer SAST-Report mit SHA-256 |
|
||||||
|
| **E4** | Auditor-validated | Extern validiert | Wirtschaftsprüfer hat akzeptiert |
|
||||||
|
|
||||||
|
### Auto-Klassifikation
|
||||||
|
|
||||||
|
| Source | Confidence | Truth Status |
|
||||||
|
|--------|-----------|--------------|
|
||||||
|
| `ci_pipeline` | E3 | observed |
|
||||||
|
| `api` (mit Hash) | E3 | observed |
|
||||||
|
| `manual` / `upload` | E1 | uploaded |
|
||||||
|
| `generated` | E0 | generated |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Evidence Truth-Status Lifecycle
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
stateDiagram-v2
|
||||||
|
[*] --> generated : LLM erzeugt
|
||||||
|
[*] --> uploaded : Manuell hochgeladen
|
||||||
|
[*] --> observed : CI/CD Pipeline
|
||||||
|
|
||||||
|
generated --> rejected : Review abgelehnt
|
||||||
|
uploaded --> validated_internal : Intern geprueft
|
||||||
|
uploaded --> rejected : Review abgelehnt
|
||||||
|
observed --> validated_internal : Intern bestaetigt
|
||||||
|
|
||||||
|
validated_internal --> provided_to_auditor : An Auditor uebergeben
|
||||||
|
provided_to_auditor --> accepted_by_auditor : Auditor akzeptiert
|
||||||
|
provided_to_auditor --> rejected : Auditor lehnt ab
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Control Status-Transition State Machine
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
stateDiagram-v2
|
||||||
|
planned --> in_progress : immer erlaubt
|
||||||
|
in_progress --> pass : Evidence >= E2 + truth_status valid
|
||||||
|
in_progress --> partial : min 1 Evidence (beliebig)
|
||||||
|
in_progress --> fail : immer erlaubt
|
||||||
|
pass --> fail : Degradation (immer)
|
||||||
|
partial --> pass : Evidence >= E2 + truth_status valid
|
||||||
|
|
||||||
|
note right of pass
|
||||||
|
Voraussetzung: min 1 Evidence
|
||||||
|
mit confidence >= E2 UND
|
||||||
|
truth_status in (uploaded,
|
||||||
|
observed, validated_internal,
|
||||||
|
accepted_by_auditor)
|
||||||
|
end note
|
||||||
|
```
|
||||||
|
|
||||||
|
### Transition-Regeln
|
||||||
|
|
||||||
|
| Von | Nach | Voraussetzung |
|
||||||
|
|-----|------|---------------|
|
||||||
|
| planned | in_progress | keine |
|
||||||
|
| in_progress | pass | min 1 Evidence mit confidence >= E2, truth_status valide |
|
||||||
|
| in_progress | partial | min 1 Evidence (beliebig) |
|
||||||
|
| in_progress | fail | immer erlaubt |
|
||||||
|
| pass | fail | immer erlaubt (Degradation) |
|
||||||
|
| * | n/a | erfordert `status_justification` |
|
||||||
|
| * | planned | immer erlaubt (Reset) |
|
||||||
|
|
||||||
|
Bei Verstoß: **HTTP 409 Conflict** mit Liste der Violations.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## LLM Truth-Labels
|
||||||
|
|
||||||
|
Jeder LLM-generierte Inhalt wird mit einem Truth-Label versehen:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"generation_mode": "draft_assistance",
|
||||||
|
"truth_status": "generated",
|
||||||
|
"may_be_used_as_evidence": false,
|
||||||
|
"generated_by": "system"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Audit-Trail
|
||||||
|
|
||||||
|
Tabelle `compliance_llm_generation_audit`:
|
||||||
|
|
||||||
|
| Feld | Typ | Beschreibung |
|
||||||
|
|------|-----|--------------|
|
||||||
|
| entity_type | VARCHAR(50) | 'evidence', 'control', 'document' |
|
||||||
|
| entity_id | VARCHAR(36) | FK zur generierten Entitaet |
|
||||||
|
| generation_mode | VARCHAR(100) | 'draft_assistance', 'auto_generation' |
|
||||||
|
| truth_status | ENUM | generated, uploaded, ... |
|
||||||
|
| may_be_used_as_evidence | BOOLEAN | Default: FALSE |
|
||||||
|
| llm_model | VARCHAR(100) | z.B. 'qwen2.5vl:32b' |
|
||||||
|
| llm_provider | VARCHAR(50) | 'ollama', 'anthropic' |
|
||||||
|
| prompt_hash | VARCHAR(64) | SHA-256 des Prompts |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Multi-dimensionaler Compliance-Score
|
||||||
|
|
||||||
|
Statt einer einzelnen Prozentzahl zeigt der Score 6 Dimensionen:
|
||||||
|
|
||||||
|
| Dimension | Gewicht | Beschreibung |
|
||||||
|
|-----------|---------|--------------|
|
||||||
|
| requirement_coverage | 20% | % Requirements mit verlinktem Control |
|
||||||
|
| evidence_strength | 25% | Gewichteter Durchschnitt der Evidence-Confidence |
|
||||||
|
| validation_quality | 20% | % Evidence mit truth_status >= validated_internal |
|
||||||
|
| evidence_freshness | 10% | % Evidence nicht expired + reviewed < 90 Tage |
|
||||||
|
| control_effectiveness | 25% | Bestehende Formel (pass + partial*0.5) |
|
||||||
|
| **overall_readiness** | — | Gewichteter Composite der 5 Dimensionen |
|
||||||
|
|
||||||
|
### Hard Blocks
|
||||||
|
|
||||||
|
Zusaetzlich werden **Sperrgründe** angezeigt, die eine Audit-Readiness verhindern:
|
||||||
|
|
||||||
|
- Controls auf 'pass' ohne jegliche Evidence
|
||||||
|
- Controls auf 'pass' mit nur E0/E1-Evidence (keine Validierung)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verbotene Formulierungen
|
||||||
|
|
||||||
|
Der Drafting-Engine Validator prueft auf Formulierungen, die **ohne ausreichenden Nachweis** nicht verwendet werden duerfen:
|
||||||
|
|
||||||
|
| Verboten | Sicher stattdessen |
|
||||||
|
|----------|--------------------|
|
||||||
|
| "ist compliant" | "soll compliant sein" |
|
||||||
|
| "erfuellt vollstaendig" | "soll vollstaendig erfuellt werden" |
|
||||||
|
| "wurde geprueft" | "soll geprueft werden" |
|
||||||
|
| "wurde umgesetzt" | "ist zur Umsetzung vorgesehen" |
|
||||||
|
| "ist auditiert" | "soll auditiert werden" |
|
||||||
|
| "vollstaendig implementiert" | "Implementierung ist vorgesehen" |
|
||||||
|
| "nachweislich konform" | "Konformitaet ist nachzuweisen" |
|
||||||
|
|
||||||
|
**Erlaubt nur wenn:** control_status = pass AND confidence >= E2 AND truth_status in (validated_internal, accepted_by_auditor).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## API-Aenderungen
|
||||||
|
|
||||||
|
### Neue Endpoints
|
||||||
|
|
||||||
|
| Methode | Pfad | Beschreibung |
|
||||||
|
|---------|------|--------------|
|
||||||
|
| PATCH | `/evidence/{id}/review` | Evidence reviewen (Confidence upgraden) |
|
||||||
|
| POST | `/llm-audit` | LLM-Generierungs-Audit erstellen |
|
||||||
|
| GET | `/llm-audit` | LLM-Audit-Eintraege auflisten |
|
||||||
|
|
||||||
|
### Erweiterte Responses
|
||||||
|
|
||||||
|
**EvidenceResponse** — 6 neue Felder:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"confidence_level": "E3",
|
||||||
|
"truth_status": "observed",
|
||||||
|
"generation_mode": null,
|
||||||
|
"may_be_used_as_evidence": true,
|
||||||
|
"reviewed_by": null,
|
||||||
|
"reviewed_at": null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**DashboardResponse** — neues Feld `multi_score`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"multi_score": {
|
||||||
|
"requirement_coverage": 85.0,
|
||||||
|
"evidence_strength": 60.0,
|
||||||
|
"validation_quality": 40.0,
|
||||||
|
"evidence_freshness": 90.0,
|
||||||
|
"control_effectiveness": 70.0,
|
||||||
|
"overall_readiness": 65.0,
|
||||||
|
"hard_blocks": ["3 Controls auf 'pass' haben nur E0/E1-Evidence"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**ControlResponse** — neues Feld `status_justification`.
|
||||||
|
|
||||||
|
**ControlUpdate** — neues Feld `status_justification` (Pflicht fuer n/a-Transitions).
|
||||||
|
|
||||||
|
### Status-Transition Fehler (409)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"detail": {
|
||||||
|
"error": "Status transition not allowed",
|
||||||
|
"current_status": "in_progress",
|
||||||
|
"requested_status": "pass",
|
||||||
|
"violations": [
|
||||||
|
"Transition to 'pass' requires at least 1 evidence with confidence >= E2..."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Migration
|
||||||
|
|
||||||
|
### Phase 1
|
||||||
|
**Datei:** `backend-compliance/migrations/076_anti_fake_evidence.sql`
|
||||||
|
|
||||||
|
- Neue ENUM-Typen: `evidence_confidence_level`, `evidence_truth_status`
|
||||||
|
- 6 neue Spalten auf `compliance_evidence`
|
||||||
|
- `in_progress` Wert fuer `controlstatusenum`
|
||||||
|
- `status_justification` auf `compliance_controls`
|
||||||
|
- Neue Tabelle `compliance_llm_generation_audit`
|
||||||
|
- Backfill bestehender Evidence nach Source
|
||||||
|
- Indizes auf neue Spalten
|
||||||
|
|
||||||
|
### Phase 2
|
||||||
|
**Datei:** `backend-compliance/migrations/077_anti_fake_evidence_phase2.sql`
|
||||||
|
|
||||||
|
- Neue Tabelle `compliance_assertions` (Assertion Engine)
|
||||||
|
- 6 neue Spalten auf `compliance_evidence` (Four-Eyes: approval_status, first_reviewer, etc.)
|
||||||
|
- Performance-Index auf `compliance_audit_trail (entity_type, action, performed_at)`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: UI-Badges
|
||||||
|
|
||||||
|
Badges werden auf Evidence-Cards angezeigt und zeigen den Vertrauensstatus auf einen Blick:
|
||||||
|
|
||||||
|
| Badge | Farben | Anzeige |
|
||||||
|
|-------|--------|---------|
|
||||||
|
| **ConfidenceLevelBadge** | E0=rot, E1=gelb, E2=blau, E3=gruen, E4=emerald | Immer |
|
||||||
|
| **TruthStatusBadge** | generated=violet, uploaded=grau, observed=blau, validated=gruen, rejected=rot | Immer |
|
||||||
|
| **GenerationModeBadge** | violet + Sparkles-Icon | Wenn LLM-generiert |
|
||||||
|
| **ApprovalStatusBadge** | pending=gelb, first_approved=blau, approved=gruen, rejected=rot | Nur bei Four-Eyes |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Assertion Engine
|
||||||
|
|
||||||
|
Die Assertion Engine trennt **Behauptungen** von **Fakten** in Compliance-Texten.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
Text[Freitext] --> Split[Satz-Splitting]
|
||||||
|
Split --> Classify{Normativ?}
|
||||||
|
Classify -->|Pflicht| A[Assertion pflicht]
|
||||||
|
Classify -->|Empfehlung| B[Assertion empfehlung]
|
||||||
|
Classify -->|Kann| C[Assertion kann]
|
||||||
|
Classify -->|Begruendung| D[Rationale]
|
||||||
|
Classify -->|Evidence-Keywords| E[Fact tentativ]
|
||||||
|
E --> Verify[Manuell verifizieren]
|
||||||
|
Verify --> Fact[Verified Fact]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Assertion-Typen
|
||||||
|
|
||||||
|
| Typ | Bedeutung | Beispiel |
|
||||||
|
|-----|-----------|----------|
|
||||||
|
| **assertion** | Normative Aussage (unbewiesen) | "Die Organisation muss ein ISMS implementieren" |
|
||||||
|
| **fact** | Verifizierte Tatsache | "ISO-Zertifikat Nr. 12345 liegt vor" |
|
||||||
|
| **rationale** | Begruendung | "Dies ist notwendig, weil..." |
|
||||||
|
|
||||||
|
### Normative Tiers
|
||||||
|
|
||||||
|
| Tier | Signal-Woerter |
|
||||||
|
|------|---------------|
|
||||||
|
| **pflicht** | muss, hat sicherzustellen, ist verpflichtet, shall, must, required |
|
||||||
|
| **empfehlung** | soll, sollte, gewaehrleisten, should, ensure |
|
||||||
|
| **kann** | kann, darf, may, optional |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Four-Eyes-Prinzip
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
stateDiagram-v2
|
||||||
|
[*] --> pending_first : Evidence erstellt (Gov/Priv Domain)
|
||||||
|
pending_first --> first_approved : 1. Reviewer OK
|
||||||
|
first_approved --> approved : 2. Reviewer OK (andere Person!)
|
||||||
|
first_approved --> rejected : 2. Reviewer lehnt ab
|
||||||
|
pending_first --> rejected : 1. Reviewer lehnt ab
|
||||||
|
|
||||||
|
note right of first_approved
|
||||||
|
Zweiter Reviewer MUSS
|
||||||
|
eine andere Person sein
|
||||||
|
als der erste Reviewer
|
||||||
|
end note
|
||||||
|
```
|
||||||
|
|
||||||
|
### Domains mit Four-Eyes-Pflicht
|
||||||
|
|
||||||
|
| Domain | Four-Eyes? | Begruendung |
|
||||||
|
|--------|-----------|-------------|
|
||||||
|
| `gov` | Ja | Governance-Controls sind audit-kritisch |
|
||||||
|
| `priv` | Ja | Datenschutz erfordert unabhaengige Pruefung |
|
||||||
|
| `ops`, `sdlc`, `ai`, ... | Nein | Operationale Controls mit Single-Review |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Audit-Trail-Erweiterung
|
||||||
|
|
||||||
|
Neue Audit-Trail-Eintraege:
|
||||||
|
|
||||||
|
| Entity | Action | Wann |
|
||||||
|
|--------|--------|------|
|
||||||
|
| evidence | create | Bei Evidence-Erstellung |
|
||||||
|
| evidence | review | Bei Confidence/Truth-Status-Aenderung |
|
||||||
|
| evidence | reject | Bei Evidence-Ablehnung |
|
||||||
|
| control | status_change | Bei Control-Status-Aenderung |
|
||||||
|
|
||||||
|
Jeder Eintrag enthaelt `old_value`, `new_value` und einen SHA-256 `checksum`.
|
||||||
|
|
||||||
|
Neuer Query-Endpoint: `GET /audit-trail?entity_type=evidence&entity_id={id}`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Neue API-Endpoints
|
||||||
|
|
||||||
|
| Methode | Pfad | Beschreibung |
|
||||||
|
|---------|------|--------------|
|
||||||
|
| PATCH | `/evidence/{id}/reject` | Evidence ablehnen |
|
||||||
|
| GET | `/audit-trail` | Audit-Trail abfragen (Filter: entity_type, entity_id, action) |
|
||||||
|
| POST | `/assertions` | Assertion manuell erstellen |
|
||||||
|
| GET | `/assertions` | Assertions auflisten (Filter: entity_type, entity_id, assertion_type) |
|
||||||
|
| GET | `/assertions/{id}` | Assertion Detail |
|
||||||
|
| PUT | `/assertions/{id}` | Assertion aktualisieren |
|
||||||
|
| POST | `/assertions/{id}/verify` | Als Fakt markieren |
|
||||||
|
| POST | `/assertions/extract` | Automatische Extraktion aus Freitext |
|
||||||
|
| GET | `/assertions/summary` | Stats (total, facts, rationale, unverified) |
|
||||||
|
|
||||||
|
### Erweiterte EvidenceResponse (Phase 2)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"approval_status": "first_approved",
|
||||||
|
"first_reviewer": "reviewer1@example.com",
|
||||||
|
"first_reviewed_at": "2026-03-23T14:00:00Z",
|
||||||
|
"second_reviewer": null,
|
||||||
|
"second_reviewed_at": null,
|
||||||
|
"requires_four_eyes": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 3: Durchsetzung (UI + Dashboard)
|
||||||
|
|
||||||
|
Phase 3 macht das Anti-Fake-Evidence-System **benutzbar und durchsetzbar** im Frontend.
|
||||||
|
|
||||||
|
### Evidence Review/Reject UI
|
||||||
|
|
||||||
|
Die Evidence-Seite bietet jetzt direkte Buttons fuer Review und Ablehnung:
|
||||||
|
|
||||||
|
- **Reviewen-Button**: Sichtbar wenn `approvalStatus` nicht `approved` oder `rejected`
|
||||||
|
- **Ablehnen-Button**: Sichtbar bei Four-Eyes-Evidence die noch nicht abgeschlossen ist
|
||||||
|
|
||||||
|
**ReviewModal** erlaubt:
|
||||||
|
- Confidence-Level aendern (E0-E4 Dropdown)
|
||||||
|
- Truth-Status aendern (Dropdown)
|
||||||
|
- Reviewer E-Mail angeben
|
||||||
|
- Four-Eyes-Warnung wenn noch ein weiterer Review noetig ist
|
||||||
|
|
||||||
|
**RejectModal** erlaubt:
|
||||||
|
- Ablehnungsgrund als Freitext
|
||||||
|
- Reviewer E-Mail angeben
|
||||||
|
|
||||||
|
Bei Four-Eyes Same-Person-Fehler (HTTP 400) wird eine Fehlermeldung angezeigt.
|
||||||
|
|
||||||
|
### Control Status-Transition Fehlerbehandlung
|
||||||
|
|
||||||
|
Die Controls-Seite zeigt jetzt detaillierte Fehler bei blockierten Status-Transitionen:
|
||||||
|
|
||||||
|
- **Optimistic Update mit Rollback**: UI aktualisiert sofort, rollt bei Fehler zurueck
|
||||||
|
- **TransitionErrorBanner**: Zeigt Violations-Liste bei HTTP 409 Conflict
|
||||||
|
- z.B. "Transition to 'pass' requires at least 1 evidence with confidence >= E2"
|
||||||
|
- **Link zur Evidence-Seite**: "Evidence hinzufuegen" direkt im Fehler-Banner
|
||||||
|
|
||||||
|
### Evidence Audit-Trail Anzeige
|
||||||
|
|
||||||
|
Neuer "Historie"-Button auf jeder Evidence-Card zeigt den vollstaendigen Audit-Trail:
|
||||||
|
|
||||||
|
- Timeline mit Zeitstempel, Aktion und Akteur
|
||||||
|
- Details zu Feldaenderungen (old_value → new_value)
|
||||||
|
- Lazy-Loading: Erst beim Aufklappen wird `GET /audit-trail` abgerufen
|
||||||
|
|
||||||
|
### Evidence Confidence-Filter
|
||||||
|
|
||||||
|
Neue Filter-Pills auf der Evidence-Seite:
|
||||||
|
|
||||||
|
```
|
||||||
|
[Alle] [Gueltig] [Abgelaufen] [Ausstehend] | [E0] [E1] [E2] [E3] [E4]
|
||||||
|
```
|
||||||
|
|
||||||
|
Farbcodierung: E0=rot, E1=gelb, E2=blau, E3=gruen, E4=emerald (passend zu den Badges)
|
||||||
|
|
||||||
|
### Evidence Confidence-Verteilung (Dashboard)
|
||||||
|
|
||||||
|
Neuer Endpoint und Dashboard-Bereich:
|
||||||
|
|
||||||
|
**Endpoint:** `GET /dashboard/evidence-distribution`
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"by_confidence": {"E0": 2, "E1": 5, "E2": 3, "E3": 8, "E4": 1},
|
||||||
|
"four_eyes_pending": 3,
|
||||||
|
"total": 19
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Compliance Hub** zeigt:
|
||||||
|
- Horizontal gestapelter Balken der Confidence-Verteilung (E0 rot → E4 emerald)
|
||||||
|
- Multi-Score Dimensionen als Fortschrittsbalken (5 Dimensionen + Audit-Readiness)
|
||||||
|
- Four-Eyes-Warteschlange (Anzahl pending)
|
||||||
|
- Hard-Blocks-Liste oder "Keine Hard Blocks" Status
|
||||||
|
|
||||||
|
### Assertions-Seite
|
||||||
|
|
||||||
|
Neue Seite unter `/sdk/assertions` mit 3 Tabs:
|
||||||
|
|
||||||
|
| Tab | Inhalt |
|
||||||
|
|-----|--------|
|
||||||
|
| **Uebersicht** | Summary-Stats (Assertions, Facts, Rationale, Unverified) |
|
||||||
|
| **Assertion-Liste** | Filterbarer Tabelle (entity_type, assertion_type) mit AssertionCards |
|
||||||
|
| **Extraktion** | Textfeld + Button → `POST /assertions/extract` |
|
||||||
|
|
||||||
|
**AssertionCard** zeigt:
|
||||||
|
- Normative-Tier als farbiger Badge (Pflicht=rot, Empfehlung=gelb, Kann=blau)
|
||||||
|
- Typ-Badge (Assertion/Fact/Rationale)
|
||||||
|
- "Als Fakt pruefen"-Button → `POST /assertions/{id}/verify`
|
||||||
|
|
||||||
|
### Phase 3: Neue API-Endpoints
|
||||||
|
|
||||||
|
| Methode | Pfad | Beschreibung |
|
||||||
|
|---------|------|--------------|
|
||||||
|
| GET | `/dashboard/evidence-distribution` | Evidence-Verteilung nach Confidence + Four-Eyes-Status |
|
||||||
@@ -88,12 +88,21 @@ compliance_evidence (
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Anti-Fake-Evidence
|
||||||
|
|
||||||
|
Seit Phase 1 (2026-03-23) werden Nachweise automatisch mit **Confidence Levels** (E0–E4) und **Truth Status** klassifiziert. Details: [Anti-Fake-Evidence Architektur](anti-fake-evidence.md)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Tests
|
## Tests
|
||||||
|
|
||||||
**Testdatei:** `backend-compliance/tests/test_evidence_routes.py`
|
**Testdatei:** `backend-compliance/tests/test_evidence_routes.py`
|
||||||
**Anzahl Tests:** 11 · **Status:** ✅ alle bestanden (Stand 2026-03-05)
|
**Anzahl Tests:** 11 · **Status:** ✅ alle bestanden (Stand 2026-03-05)
|
||||||
|
|
||||||
|
**Anti-Fake-Evidence Tests:** `backend-compliance/tests/test_anti_fake_evidence.py`
|
||||||
|
**Anzahl Tests:** ~45 · Confidence-Klassifikation, State Machine, Multi-Score, LLM Audit
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd backend-compliance
|
cd backend-compliance
|
||||||
python3 -m pytest tests/test_evidence_routes.py -v
|
python3 -m pytest tests/test_evidence_routes.py tests/test_anti_fake_evidence.py -v
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -109,6 +109,7 @@ nav:
|
|||||||
- Control Generator Pipeline: services/sdk-modules/control-generator-pipeline.md
|
- Control Generator Pipeline: services/sdk-modules/control-generator-pipeline.md
|
||||||
- Deduplizierungs-Engine: services/sdk-modules/dedup-engine.md
|
- Deduplizierungs-Engine: services/sdk-modules/dedup-engine.md
|
||||||
- Control Provenance Wiki: services/sdk-modules/control-provenance.md
|
- Control Provenance Wiki: services/sdk-modules/control-provenance.md
|
||||||
|
- Anti-Fake-Evidence Architektur: services/sdk-modules/anti-fake-evidence.md
|
||||||
- Strategie:
|
- Strategie:
|
||||||
- Wettbewerbsanalyse & Roadmap: strategy/wettbewerbsanalyse.md
|
- Wettbewerbsanalyse & Roadmap: strategy/wettbewerbsanalyse.md
|
||||||
- Entwicklung:
|
- Entwicklung:
|
||||||
|
|||||||
Reference in New Issue
Block a user