Files
breakpilot-compliance/ai-compliance-sdk/internal/ucca/ai_act_module.go
Sharang Parnerkar c293d76e6b refactor(go/ucca): split policy_engine, legal_rag, ai_act, nis2, financial_policy, dsgvo_module
Split 6 oversized files (719–882 LOC each) into focused files under 500 LOC:
- policy_engine.go → types, loader, eval, gen (4 files)
- legal_rag.go     → types, client, http, context, scroll (5 files)
- ai_act_module.go → module, yaml, obligations (3 files)
- nis2_module.go   → module, yaml, obligations + shared obligation_yaml_types.go (3+1 files)
- financial_policy.go → types, engine (2 files)
- dsgvo_module.go  → module, yaml, obligations (3 files)

All in package ucca, zero exported symbol renames, go test ./internal/ucca/... passes.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-19 09:48:41 +02:00

316 lines
10 KiB
Go

package ucca
// ============================================================================
// AI Act Module
// ============================================================================
//
// This module implements the EU AI Act (Regulation 2024/1689) which establishes
// harmonized rules for artificial intelligence systems in the EU.
//
// The AI Act uses a risk-based approach:
// - Unacceptable Risk: Prohibited practices (Art. 5)
// - High Risk: Annex III systems with strict requirements (Art. 6-49)
// - Limited Risk: Transparency obligations (Art. 50)
// - Minimal Risk: No additional requirements
//
// Split into:
// - ai_act_module.go — struct, constants, classification, decision tree
// - ai_act_yaml.go — YAML loading and conversion helpers
// - ai_act_obligations.go — hardcoded fallback obligations/controls/deadlines
//
// ============================================================================
// AIActRiskLevel represents the AI Act risk classification
type AIActRiskLevel string
const (
AIActUnacceptable AIActRiskLevel = "unacceptable"
AIActHighRisk AIActRiskLevel = "high_risk"
AIActLimitedRisk AIActRiskLevel = "limited_risk"
AIActMinimalRisk AIActRiskLevel = "minimal_risk"
AIActNotApplicable AIActRiskLevel = "not_applicable"
)
// AIActModule implements the RegulationModule interface for the AI Act
type AIActModule struct {
obligations []Obligation
controls []ObligationControl
incidentDeadlines []IncidentDeadline
decisionTree *DecisionTree
loaded bool
}
// AIActAnnexIIICategories contains Annex III High-Risk AI Categories
var AIActAnnexIIICategories = map[string]string{
"biometric": "Biometrische Identifizierung und Kategorisierung",
"critical_infrastructure": "Verwaltung und Betrieb kritischer Infrastruktur",
"education": "Allgemeine und berufliche Bildung",
"employment": "Beschaeftigung, Personalverwaltung, Zugang zu Selbststaendigkeit",
"essential_services": "Zugang zu wesentlichen privaten/oeffentlichen Diensten",
"law_enforcement": "Strafverfolgung",
"migration": "Migration, Asyl und Grenzkontrolle",
"justice": "Rechtspflege und demokratische Prozesse",
}
// NewAIActModule creates a new AI Act module, loading obligations from YAML
func NewAIActModule() (*AIActModule, error) {
m := &AIActModule{
obligations: []Obligation{},
controls: []ObligationControl{},
incidentDeadlines: []IncidentDeadline{},
}
if err := m.loadFromYAML(); err != nil {
m.loadHardcodedObligations()
}
m.buildDecisionTree()
m.loaded = true
return m, nil
}
// ID returns the module identifier
func (m *AIActModule) ID() string { return "ai_act" }
// Name returns the human-readable name
func (m *AIActModule) Name() string { return "AI Act (EU KI-Verordnung)" }
// Description returns a brief description
func (m *AIActModule) Description() string {
return "EU-Verordnung 2024/1689 zur Festlegung harmonisierter Vorschriften fuer kuenstliche Intelligenz"
}
// IsApplicable checks if the AI Act applies to the organization
func (m *AIActModule) IsApplicable(facts *UnifiedFacts) bool {
if !facts.AIUsage.UsesAI {
return false
}
if !facts.Organization.EUMember && !facts.DataProtection.OffersToEU {
return false
}
return true
}
// GetClassification returns the AI Act risk classification as string
func (m *AIActModule) GetClassification(facts *UnifiedFacts) string {
return string(m.ClassifyRisk(facts))
}
// ClassifyRisk determines the highest applicable AI Act risk level
func (m *AIActModule) ClassifyRisk(facts *UnifiedFacts) AIActRiskLevel {
if !facts.AIUsage.UsesAI {
return AIActNotApplicable
}
if m.hasProhibitedPractice(facts) {
return AIActUnacceptable
}
if m.hasHighRiskAI(facts) {
return AIActHighRisk
}
if m.hasLimitedRiskAI(facts) {
return AIActLimitedRisk
}
if facts.AIUsage.UsesAI {
return AIActMinimalRisk
}
return AIActNotApplicable
}
func (m *AIActModule) hasProhibitedPractice(facts *UnifiedFacts) bool {
if facts.AIUsage.SocialScoring {
return true
}
if facts.AIUsage.EmotionRecognition && (facts.Sector.PrimarySector == "education" ||
facts.AIUsage.EmploymentDecisions) {
return true
}
if facts.AIUsage.PredictivePolicingIndividual {
return true
}
if facts.AIUsage.BiometricIdentification && facts.AIUsage.LawEnforcement {
return true
}
return false
}
func (m *AIActModule) hasHighRiskAI(facts *UnifiedFacts) bool {
if facts.AIUsage.HasHighRiskAI {
return true
}
if facts.AIUsage.BiometricIdentification || facts.AIUsage.CriticalInfrastructure ||
facts.AIUsage.EducationAccess || facts.AIUsage.EmploymentDecisions ||
facts.AIUsage.EssentialServices || facts.AIUsage.LawEnforcement ||
facts.AIUsage.MigrationAsylum || facts.AIUsage.JusticeAdministration {
return true
}
if facts.Sector.IsKRITIS && facts.AIUsage.UsesAI {
return true
}
return false
}
func (m *AIActModule) hasLimitedRiskAI(facts *UnifiedFacts) bool {
if facts.AIUsage.HasLimitedRiskAI {
return true
}
if facts.AIUsage.AIInteractsWithNaturalPersons || facts.AIUsage.GeneratesDeepfakes {
return true
}
if facts.AIUsage.EmotionRecognition &&
facts.Sector.PrimarySector != "education" &&
!facts.AIUsage.EmploymentDecisions {
return true
}
return false
}
func (m *AIActModule) isProvider(facts *UnifiedFacts) bool {
return facts.AIUsage.IsAIProvider
}
func (m *AIActModule) isDeployer(facts *UnifiedFacts) bool {
return facts.AIUsage.IsAIDeployer || (facts.AIUsage.UsesAI && !facts.AIUsage.IsAIProvider)
}
func (m *AIActModule) isGPAIProvider(facts *UnifiedFacts) bool {
return facts.AIUsage.UsesGPAI && facts.AIUsage.IsAIProvider
}
func (m *AIActModule) hasSystemicRiskGPAI(facts *UnifiedFacts) bool {
return facts.AIUsage.GPAIWithSystemicRisk
}
func (m *AIActModule) requiresFRIA(facts *UnifiedFacts) bool {
if !m.hasHighRiskAI(facts) {
return false
}
if facts.Organization.IsPublicAuthority {
return true
}
if facts.AIUsage.EssentialServices || facts.AIUsage.EmploymentDecisions || facts.AIUsage.EducationAccess {
return true
}
return false
}
// DeriveObligations derives all applicable AI Act obligations
func (m *AIActModule) DeriveObligations(facts *UnifiedFacts) []Obligation {
if !m.IsApplicable(facts) {
return []Obligation{}
}
riskLevel := m.ClassifyRisk(facts)
var result []Obligation
for _, obl := range m.obligations {
if m.obligationApplies(obl, riskLevel, facts) {
customized := obl
customized.RegulationID = m.ID()
result = append(result, customized)
}
}
return result
}
func (m *AIActModule) obligationApplies(obl Obligation, riskLevel AIActRiskLevel, facts *UnifiedFacts) bool {
switch obl.AppliesWhen {
case "uses_ai":
return facts.AIUsage.UsesAI
case "high_risk":
return riskLevel == AIActHighRisk || riskLevel == AIActUnacceptable
case "high_risk_provider":
return (riskLevel == AIActHighRisk || riskLevel == AIActUnacceptable) && m.isProvider(facts)
case "high_risk_deployer":
return (riskLevel == AIActHighRisk || riskLevel == AIActUnacceptable) && m.isDeployer(facts)
case "high_risk_deployer_fria":
return (riskLevel == AIActHighRisk || riskLevel == AIActUnacceptable) && m.isDeployer(facts) && m.requiresFRIA(facts)
case "limited_risk":
return riskLevel == AIActLimitedRisk || riskLevel == AIActHighRisk
case "gpai_provider":
return m.isGPAIProvider(facts)
case "gpai_systemic_risk":
return m.hasSystemicRiskGPAI(facts)
case "":
return facts.AIUsage.UsesAI
default:
return facts.AIUsage.UsesAI
}
}
// DeriveControls derives all applicable AI Act controls
func (m *AIActModule) DeriveControls(facts *UnifiedFacts) []ObligationControl {
if !m.IsApplicable(facts) {
return []ObligationControl{}
}
var result []ObligationControl
for _, ctrl := range m.controls {
ctrl.RegulationID = m.ID()
result = append(result, ctrl)
}
return result
}
// GetDecisionTree returns the AI Act applicability decision tree
func (m *AIActModule) GetDecisionTree() *DecisionTree { return m.decisionTree }
// GetIncidentDeadlines returns AI Act incident reporting deadlines
func (m *AIActModule) GetIncidentDeadlines(facts *UnifiedFacts) []IncidentDeadline {
riskLevel := m.ClassifyRisk(facts)
if riskLevel != AIActHighRisk && riskLevel != AIActUnacceptable {
return []IncidentDeadline{}
}
return m.incidentDeadlines
}
func (m *AIActModule) buildDecisionTree() {
m.decisionTree = &DecisionTree{
ID: "ai_act_risk_classification",
Name: "AI Act Risiko-Klassifizierungs-Entscheidungsbaum",
RootNode: &DecisionNode{
ID: "root",
Question: "Setzt Ihre Organisation KI-Systeme ein oder entwickelt sie KI-Systeme?",
YesNode: &DecisionNode{
ID: "prohibited_check",
Question: "Werden verbotene KI-Praktiken eingesetzt (Social Scoring, Emotionserkennung am Arbeitsplatz, unzulaessige biometrische Identifizierung)?",
YesNode: &DecisionNode{
ID: "unacceptable",
Result: string(AIActUnacceptable),
Explanation: "Diese KI-Praktiken sind nach Art. 5 AI Act verboten und muessen unverzueglich eingestellt werden.",
},
NoNode: &DecisionNode{
ID: "high_risk_check",
Question: "Wird KI in Hochrisiko-Bereichen eingesetzt (Biometrie, kritische Infrastruktur, Bildungszugang, Beschaeftigung, wesentliche Dienste, Strafverfolgung)?",
YesNode: &DecisionNode{
ID: "high_risk",
Result: string(AIActHighRisk),
Explanation: "Hochrisiko-KI-Systeme nach Anhang III unterliegen umfassenden Anforderungen an Risikomanagemment, Dokumentation, Transparenz und menschliche Aufsicht.",
},
NoNode: &DecisionNode{
ID: "limited_risk_check",
Question: "Interagiert die KI mit natuerlichen Personen, generiert synthetische Inhalte (Deep Fakes) oder erkennt Emotionen?",
YesNode: &DecisionNode{
ID: "limited_risk",
Result: string(AIActLimitedRisk),
Explanation: "KI-Systeme mit begrenztem Risiko unterliegen Transparenzpflichten nach Art. 50 AI Act.",
},
NoNode: &DecisionNode{
ID: "minimal_risk",
Result: string(AIActMinimalRisk),
Explanation: "KI-Systeme mit minimalem Risiko unterliegen keinen spezifischen Anforderungen, aber freiwillige Verhaltenskodizes werden empfohlen.",
},
},
},
},
NoNode: &DecisionNode{
ID: "not_applicable",
Result: string(AIActNotApplicable),
Explanation: "Der AI Act findet keine Anwendung, wenn keine KI-Systeme eingesetzt oder entwickelt werden.",
},
},
}
}