This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/ai-compliance-sdk/internal/ucca/ai_act_module.go
Benjamin Admin 21a844cb8a fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 09:51:32 +01:00

770 lines
28 KiB
Go

package ucca
import (
"fmt"
"os"
"path/filepath"
"time"
"gopkg.in/yaml.v3"
)
// ============================================================================
// AI Act Module
// ============================================================================
//
// This module implements the EU AI Act (Regulation 2024/1689) which establishes
// harmonized rules for artificial intelligence systems in the EU.
//
// The AI Act uses a risk-based approach:
// - Unacceptable Risk: Prohibited practices (Art. 5)
// - High Risk: Annex III systems with strict requirements (Art. 6-49)
// - Limited Risk: Transparency obligations (Art. 50)
// - Minimal Risk: No additional requirements
//
// Key roles:
// - Provider: Develops or places AI on market
// - Deployer: Uses AI systems in professional activity
// - Distributor: Makes AI available on market
// - Importer: Brings AI from third countries
//
// ============================================================================
// AIActRiskLevel represents the AI Act risk classification
type AIActRiskLevel string
const (
AIActUnacceptable AIActRiskLevel = "unacceptable"
AIActHighRisk AIActRiskLevel = "high_risk"
AIActLimitedRisk AIActRiskLevel = "limited_risk"
AIActMinimalRisk AIActRiskLevel = "minimal_risk"
AIActNotApplicable AIActRiskLevel = "not_applicable"
)
// AIActModule implements the RegulationModule interface for the AI Act
type AIActModule struct {
obligations []Obligation
controls []ObligationControl
incidentDeadlines []IncidentDeadline
decisionTree *DecisionTree
loaded bool
}
// Annex III High-Risk AI Categories
var AIActAnnexIIICategories = map[string]string{
"biometric": "Biometrische Identifizierung und Kategorisierung",
"critical_infrastructure": "Verwaltung und Betrieb kritischer Infrastruktur",
"education": "Allgemeine und berufliche Bildung",
"employment": "Beschaeftigung, Personalverwaltung, Zugang zu Selbststaendigkeit",
"essential_services": "Zugang zu wesentlichen privaten/oeffentlichen Diensten",
"law_enforcement": "Strafverfolgung",
"migration": "Migration, Asyl und Grenzkontrolle",
"justice": "Rechtspflege und demokratische Prozesse",
}
// NewAIActModule creates a new AI Act module, loading obligations from YAML
func NewAIActModule() (*AIActModule, error) {
m := &AIActModule{
obligations: []Obligation{},
controls: []ObligationControl{},
incidentDeadlines: []IncidentDeadline{},
}
// Try to load from YAML, fall back to hardcoded if not found
if err := m.loadFromYAML(); err != nil {
// Use hardcoded defaults
m.loadHardcodedObligations()
}
m.buildDecisionTree()
m.loaded = true
return m, nil
}
// ID returns the module identifier
func (m *AIActModule) ID() string {
return "ai_act"
}
// Name returns the human-readable name
func (m *AIActModule) Name() string {
return "AI Act (EU KI-Verordnung)"
}
// Description returns a brief description
func (m *AIActModule) Description() string {
return "EU-Verordnung 2024/1689 zur Festlegung harmonisierter Vorschriften fuer kuenstliche Intelligenz"
}
// IsApplicable checks if the AI Act applies to the organization
func (m *AIActModule) IsApplicable(facts *UnifiedFacts) bool {
// AI Act applies if organization uses, provides, or deploys AI systems in the EU
if !facts.AIUsage.UsesAI {
return false
}
// Check if in EU or offering to EU
if !facts.Organization.EUMember && !facts.DataProtection.OffersToEU {
return false
}
return true
}
// GetClassification returns the AI Act risk classification as string
func (m *AIActModule) GetClassification(facts *UnifiedFacts) string {
return string(m.ClassifyRisk(facts))
}
// ClassifyRisk determines the highest applicable AI Act risk level
func (m *AIActModule) ClassifyRisk(facts *UnifiedFacts) AIActRiskLevel {
if !facts.AIUsage.UsesAI {
return AIActNotApplicable
}
// Check for prohibited practices (Art. 5)
if m.hasProhibitedPractice(facts) {
return AIActUnacceptable
}
// Check for high-risk (Annex III)
if m.hasHighRiskAI(facts) {
return AIActHighRisk
}
// Check for limited risk (transparency requirements)
if m.hasLimitedRiskAI(facts) {
return AIActLimitedRisk
}
// Minimal risk - general AI usage
if facts.AIUsage.UsesAI {
return AIActMinimalRisk
}
return AIActNotApplicable
}
// hasProhibitedPractice checks if any prohibited AI practices are present
func (m *AIActModule) hasProhibitedPractice(facts *UnifiedFacts) bool {
// Art. 5 AI Act - Prohibited practices
if facts.AIUsage.SocialScoring {
return true
}
if facts.AIUsage.EmotionRecognition && (facts.Sector.PrimarySector == "education" ||
facts.AIUsage.EmploymentDecisions) {
// Emotion recognition in workplace/education
return true
}
if facts.AIUsage.PredictivePolicingIndividual {
return true
}
// Biometric real-time remote identification in public spaces (with limited exceptions)
if facts.AIUsage.BiometricIdentification && facts.AIUsage.LawEnforcement {
// Generally prohibited, exceptions for specific law enforcement scenarios
return true
}
return false
}
// hasHighRiskAI checks if any Annex III high-risk AI categories apply
func (m *AIActModule) hasHighRiskAI(facts *UnifiedFacts) bool {
// Explicit high-risk flag
if facts.AIUsage.HasHighRiskAI {
return true
}
// Annex III categories
if facts.AIUsage.BiometricIdentification {
return true
}
if facts.AIUsage.CriticalInfrastructure {
return true
}
if facts.AIUsage.EducationAccess {
return true
}
if facts.AIUsage.EmploymentDecisions {
return true
}
if facts.AIUsage.EssentialServices {
return true
}
if facts.AIUsage.LawEnforcement {
return true
}
if facts.AIUsage.MigrationAsylum {
return true
}
if facts.AIUsage.JusticeAdministration {
return true
}
// Also check if in critical infrastructure sector with AI
if facts.Sector.IsKRITIS && facts.AIUsage.UsesAI {
return true
}
return false
}
// hasLimitedRiskAI checks if limited risk transparency requirements apply
func (m *AIActModule) hasLimitedRiskAI(facts *UnifiedFacts) bool {
// Explicit limited-risk flag
if facts.AIUsage.HasLimitedRiskAI {
return true
}
// AI that interacts with natural persons
if facts.AIUsage.AIInteractsWithNaturalPersons {
return true
}
// Deepfake generation
if facts.AIUsage.GeneratesDeepfakes {
return true
}
// Emotion recognition (not in prohibited contexts)
if facts.AIUsage.EmotionRecognition &&
facts.Sector.PrimarySector != "education" &&
!facts.AIUsage.EmploymentDecisions {
return true
}
// Chatbots and AI assistants typically fall here
return false
}
// isProvider checks if organization is an AI provider
func (m *AIActModule) isProvider(facts *UnifiedFacts) bool {
return facts.AIUsage.IsAIProvider
}
// isDeployer checks if organization is an AI deployer
func (m *AIActModule) isDeployer(facts *UnifiedFacts) bool {
return facts.AIUsage.IsAIDeployer || (facts.AIUsage.UsesAI && !facts.AIUsage.IsAIProvider)
}
// isGPAIProvider checks if organization provides General Purpose AI
func (m *AIActModule) isGPAIProvider(facts *UnifiedFacts) bool {
return facts.AIUsage.UsesGPAI && facts.AIUsage.IsAIProvider
}
// hasSystemicRiskGPAI checks if GPAI has systemic risk
func (m *AIActModule) hasSystemicRiskGPAI(facts *UnifiedFacts) bool {
return facts.AIUsage.GPAIWithSystemicRisk
}
// requiresFRIA checks if Fundamental Rights Impact Assessment is required
func (m *AIActModule) requiresFRIA(facts *UnifiedFacts) bool {
// FRIA required for public bodies and certain high-risk deployers
if !m.hasHighRiskAI(facts) {
return false
}
// Public authorities using high-risk AI
if facts.Organization.IsPublicAuthority {
return true
}
// Certain categories always require FRIA
if facts.AIUsage.EssentialServices {
return true
}
if facts.AIUsage.EmploymentDecisions {
return true
}
if facts.AIUsage.EducationAccess {
return true
}
return false
}
// DeriveObligations derives all applicable AI Act obligations
func (m *AIActModule) DeriveObligations(facts *UnifiedFacts) []Obligation {
if !m.IsApplicable(facts) {
return []Obligation{}
}
riskLevel := m.ClassifyRisk(facts)
var result []Obligation
for _, obl := range m.obligations {
if m.obligationApplies(obl, riskLevel, facts) {
// Copy and customize obligation
customized := obl
customized.RegulationID = m.ID()
result = append(result, customized)
}
}
return result
}
// obligationApplies checks if a specific obligation applies
func (m *AIActModule) obligationApplies(obl Obligation, riskLevel AIActRiskLevel, facts *UnifiedFacts) bool {
switch obl.AppliesWhen {
case "uses_ai":
return facts.AIUsage.UsesAI
case "high_risk":
return riskLevel == AIActHighRisk || riskLevel == AIActUnacceptable
case "high_risk_provider":
return (riskLevel == AIActHighRisk || riskLevel == AIActUnacceptable) && m.isProvider(facts)
case "high_risk_deployer":
return (riskLevel == AIActHighRisk || riskLevel == AIActUnacceptable) && m.isDeployer(facts)
case "high_risk_deployer_fria":
return (riskLevel == AIActHighRisk || riskLevel == AIActUnacceptable) && m.isDeployer(facts) && m.requiresFRIA(facts)
case "limited_risk":
return riskLevel == AIActLimitedRisk || riskLevel == AIActHighRisk
case "gpai_provider":
return m.isGPAIProvider(facts)
case "gpai_systemic_risk":
return m.hasSystemicRiskGPAI(facts)
case "":
// No condition = applies to all AI users
return facts.AIUsage.UsesAI
default:
return facts.AIUsage.UsesAI
}
}
// DeriveControls derives all applicable AI Act controls
func (m *AIActModule) DeriveControls(facts *UnifiedFacts) []ObligationControl {
if !m.IsApplicable(facts) {
return []ObligationControl{}
}
var result []ObligationControl
for _, ctrl := range m.controls {
ctrl.RegulationID = m.ID()
result = append(result, ctrl)
}
return result
}
// GetDecisionTree returns the AI Act applicability decision tree
func (m *AIActModule) GetDecisionTree() *DecisionTree {
return m.decisionTree
}
// GetIncidentDeadlines returns AI Act incident reporting deadlines
func (m *AIActModule) GetIncidentDeadlines(facts *UnifiedFacts) []IncidentDeadline {
riskLevel := m.ClassifyRisk(facts)
if riskLevel != AIActHighRisk && riskLevel != AIActUnacceptable {
return []IncidentDeadline{}
}
return m.incidentDeadlines
}
// ============================================================================
// YAML Loading
// ============================================================================
func (m *AIActModule) loadFromYAML() error {
// Search paths for YAML file
searchPaths := []string{
"policies/obligations/ai_act_obligations.yaml",
filepath.Join(".", "policies", "obligations", "ai_act_obligations.yaml"),
filepath.Join("..", "policies", "obligations", "ai_act_obligations.yaml"),
filepath.Join("..", "..", "policies", "obligations", "ai_act_obligations.yaml"),
"/app/policies/obligations/ai_act_obligations.yaml",
}
var data []byte
var err error
for _, path := range searchPaths {
data, err = os.ReadFile(path)
if err == nil {
break
}
}
if err != nil {
return fmt.Errorf("AI Act obligations YAML not found: %w", err)
}
var config NIS2ObligationsConfig // Reuse same config structure
if err := yaml.Unmarshal(data, &config); err != nil {
return fmt.Errorf("failed to parse AI Act YAML: %w", err)
}
// Convert YAML to internal structures
m.convertObligations(config.Obligations)
m.convertControls(config.Controls)
m.convertIncidentDeadlines(config.IncidentDeadlines)
return nil
}
func (m *AIActModule) convertObligations(yamlObls []ObligationYAML) {
for _, y := range yamlObls {
obl := Obligation{
ID: y.ID,
RegulationID: "ai_act",
Title: y.Title,
Description: y.Description,
AppliesWhen: y.AppliesWhen,
Category: ObligationCategory(y.Category),
Responsible: ResponsibleRole(y.Responsible),
Priority: ObligationPriority(y.Priority),
ISO27001Mapping: y.ISO27001,
HowToImplement: y.HowTo,
}
// Convert legal basis
for _, lb := range y.LegalBasis {
obl.LegalBasis = append(obl.LegalBasis, LegalReference{
Norm: lb.Norm,
Article: lb.Article,
})
}
// Convert deadline
if y.Deadline != nil {
obl.Deadline = &Deadline{
Type: DeadlineType(y.Deadline.Type),
Duration: y.Deadline.Duration,
}
if y.Deadline.Date != "" {
if t, err := time.Parse("2006-01-02", y.Deadline.Date); err == nil {
obl.Deadline.Date = &t
}
}
}
// Convert sanctions
if y.Sanctions != nil {
obl.Sanctions = &SanctionInfo{
MaxFine: y.Sanctions.MaxFine,
PersonalLiability: y.Sanctions.PersonalLiability,
}
}
// Convert evidence
for _, e := range y.Evidence {
obl.Evidence = append(obl.Evidence, EvidenceItem{Name: e, Required: true})
}
m.obligations = append(m.obligations, obl)
}
}
func (m *AIActModule) convertControls(yamlCtrls []ControlYAML) {
for _, y := range yamlCtrls {
ctrl := ObligationControl{
ID: y.ID,
RegulationID: "ai_act",
Name: y.Name,
Description: y.Description,
Category: y.Category,
WhatToDo: y.WhatToDo,
ISO27001Mapping: y.ISO27001,
Priority: ObligationPriority(y.Priority),
}
m.controls = append(m.controls, ctrl)
}
}
func (m *AIActModule) convertIncidentDeadlines(yamlDeadlines []IncidentDeadlineYAML) {
for _, y := range yamlDeadlines {
deadline := IncidentDeadline{
RegulationID: "ai_act",
Phase: y.Phase,
Deadline: y.Deadline,
Content: y.Content,
Recipient: y.Recipient,
}
for _, lb := range y.LegalBasis {
deadline.LegalBasis = append(deadline.LegalBasis, LegalReference{
Norm: lb.Norm,
Article: lb.Article,
})
}
m.incidentDeadlines = append(m.incidentDeadlines, deadline)
}
}
// ============================================================================
// Hardcoded Fallback
// ============================================================================
func (m *AIActModule) loadHardcodedObligations() {
// Key AI Act deadlines
prohibitedPracticesDeadline := time.Date(2025, 2, 2, 0, 0, 0, 0, time.UTC)
transparencyDeadline := time.Date(2026, 8, 2, 0, 0, 0, 0, time.UTC)
gpaiDeadline := time.Date(2025, 8, 2, 0, 0, 0, 0, time.UTC)
m.obligations = []Obligation{
{
ID: "AIACT-OBL-001",
RegulationID: "ai_act",
Title: "Verbotene KI-Praktiken vermeiden",
Description: "Sicherstellung, dass keine verbotenen KI-Praktiken eingesetzt werden (Social Scoring, Ausnutzung von Schwaechen, unterschwellige Manipulation, unzulaessige biometrische Identifizierung).",
LegalBasis: []LegalReference{{Norm: "Art. 5 AI Act", Article: "Verbotene Praktiken"}},
Category: CategoryCompliance,
Responsible: RoleManagement,
Deadline: &Deadline{Type: DeadlineAbsolute, Date: &prohibitedPracticesDeadline},
Sanctions: &SanctionInfo{MaxFine: "35 Mio. EUR oder 7% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "KI-Inventar mit Risikobewertung", Required: true}, {Name: "Dokumentierte Pruefung auf verbotene Praktiken", Required: true}},
Priority: PriorityCritical,
AppliesWhen: "uses_ai",
},
{
ID: "AIACT-OBL-002",
RegulationID: "ai_act",
Title: "Risikomanagementsystem fuer Hochrisiko-KI",
Description: "Einrichtung eines Risikomanagementsystems fuer Hochrisiko-KI-Systeme: Risikoidentifikation, -bewertung, -minderung und kontinuierliche Ueberwachung.",
LegalBasis: []LegalReference{{Norm: "Art. 9 AI Act", Article: "Risikomanagementsystem"}},
Category: CategoryGovernance,
Responsible: RoleKIVerantwortlicher,
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "Risikomanagement-Dokumentation", Required: true}, {Name: "Risikobewertungen pro KI-System", Required: true}},
Priority: PriorityCritical,
AppliesWhen: "high_risk",
ISO27001Mapping: []string{"A.5.1.1", "A.8.2"},
},
{
ID: "AIACT-OBL-003",
RegulationID: "ai_act",
Title: "Technische Dokumentation erstellen",
Description: "Erstellung umfassender technischer Dokumentation vor Inverkehrbringen: Systembeschreibung, Design-Spezifikationen, Entwicklungsprozess, Leistungsmetriken.",
LegalBasis: []LegalReference{{Norm: "Art. 11 AI Act", Article: "Technische Dokumentation"}},
Category: CategoryGovernance,
Responsible: RoleKIVerantwortlicher,
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "Technische Dokumentation nach Anhang IV", Required: true}, {Name: "Systemarchitektur-Dokumentation", Required: true}},
Priority: PriorityHigh,
AppliesWhen: "high_risk_provider",
},
{
ID: "AIACT-OBL-004",
RegulationID: "ai_act",
Title: "Protokollierungsfunktion implementieren",
Description: "Hochrisiko-KI-Systeme muessen automatische Protokolle (Logs) erstellen: Nutzungszeitraum, Eingabedaten, Identitaet der verifizierenden Personen.",
LegalBasis: []LegalReference{{Norm: "Art. 12 AI Act", Article: "Aufzeichnungspflichten"}},
Category: CategoryTechnical,
Responsible: RoleITLeitung,
Deadline: &Deadline{Type: DeadlineRelative, Duration: "Aufbewahrung mindestens 6 Monate"},
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "Log-System-Dokumentation", Required: true}, {Name: "Aufbewahrungsrichtlinie", Required: true}},
Priority: PriorityHigh,
AppliesWhen: "high_risk",
ISO27001Mapping: []string{"A.12.4"},
},
{
ID: "AIACT-OBL-005",
RegulationID: "ai_act",
Title: "Menschliche Aufsicht sicherstellen",
Description: "Hochrisiko-KI muss menschliche Aufsicht ermoeglichen: Verstehen von Faehigkeiten und Grenzen, Ueberwachung, Eingreifen oder Abbrechen koennen.",
LegalBasis: []LegalReference{{Norm: "Art. 14 AI Act", Article: "Menschliche Aufsicht"}},
Category: CategoryOrganizational,
Responsible: RoleKIVerantwortlicher,
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "Aufsichtskonzept", Required: true}, {Name: "Schulungsnachweise fuer Bediener", Required: true}, {Name: "Notfall-Abschaltprozedur", Required: true}},
Priority: PriorityCritical,
AppliesWhen: "high_risk",
},
{
ID: "AIACT-OBL-006",
RegulationID: "ai_act",
Title: "Betreiberpflichten fuer Hochrisiko-KI",
Description: "Betreiber von Hochrisiko-KI muessen: Technische und organisatorische Massnahmen treffen, Eingabedaten pruefen, Betrieb ueberwachen, Protokolle aufbewahren.",
LegalBasis: []LegalReference{{Norm: "Art. 26 AI Act", Article: "Pflichten der Betreiber"}},
Category: CategoryOrganizational,
Responsible: RoleKIVerantwortlicher,
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "Betriebskonzept", Required: true}, {Name: "Monitoring-Dokumentation", Required: true}},
Priority: PriorityHigh,
AppliesWhen: "high_risk_deployer",
},
{
ID: "AIACT-OBL-007",
RegulationID: "ai_act",
Title: "Grundrechte-Folgenabschaetzung (FRIA)",
Description: "Betreiber von Hochrisiko-KI in sensiblen Bereichen muessen vor Einsatz eine Grundrechte-Folgenabschaetzung durchfuehren.",
LegalBasis: []LegalReference{{Norm: "Art. 27 AI Act", Article: "Grundrechte-Folgenabschaetzung"}},
Category: CategoryGovernance,
Responsible: RoleKIVerantwortlicher,
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "FRIA-Dokumentation", Required: true}, {Name: "Risikobewertung Grundrechte", Required: true}},
Priority: PriorityCritical,
AppliesWhen: "high_risk_deployer_fria",
},
{
ID: "AIACT-OBL-008",
RegulationID: "ai_act",
Title: "Transparenzpflichten fuer KI-Interaktionen",
Description: "Bei KI-Systemen, die mit natuerlichen Personen interagieren: Kennzeichnung der KI-Interaktion, Information ueber KI-generierte Inhalte, Kennzeichnung von Deep Fakes.",
LegalBasis: []LegalReference{{Norm: "Art. 50 AI Act", Article: "Transparenzpflichten"}},
Category: CategoryOrganizational,
Responsible: RoleKIVerantwortlicher,
Deadline: &Deadline{Type: DeadlineAbsolute, Date: &transparencyDeadline},
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "Kennzeichnungskonzept", Required: true}, {Name: "Nutzerhinweise", Required: true}},
Priority: PriorityHigh,
AppliesWhen: "limited_risk",
},
{
ID: "AIACT-OBL-009",
RegulationID: "ai_act",
Title: "GPAI-Modell Dokumentation",
Description: "Anbieter von GPAI-Modellen muessen technische Dokumentation erstellen, Informationen fuer nachgelagerte Anbieter bereitstellen und Urheberrechtsrichtlinie einhalten.",
LegalBasis: []LegalReference{{Norm: "Art. 53 AI Act", Article: "Pflichten der Anbieter von GPAI-Modellen"}},
Category: CategoryGovernance,
Responsible: RoleKIVerantwortlicher,
Deadline: &Deadline{Type: DeadlineAbsolute, Date: &gpaiDeadline},
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "GPAI-Dokumentation", Required: true}, {Name: "Trainingsdaten-Summary", Required: true}},
Priority: PriorityHigh,
AppliesWhen: "gpai_provider",
},
{
ID: "AIACT-OBL-010",
RegulationID: "ai_act",
Title: "KI-Kompetenz sicherstellen",
Description: "Anbieter und Betreiber muessen sicherstellen, dass Personal mit ausreichender KI-Kompetenz ausgestattet ist.",
LegalBasis: []LegalReference{{Norm: "Art. 4 AI Act", Article: "KI-Kompetenz"}},
Category: CategoryTraining,
Responsible: RoleManagement,
Deadline: &Deadline{Type: DeadlineAbsolute, Date: &prohibitedPracticesDeadline},
Sanctions: &SanctionInfo{MaxFine: "7,5 Mio. EUR oder 1% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "Schulungsnachweise", Required: true}, {Name: "Kompetenzmatrix", Required: true}},
Priority: PriorityMedium,
AppliesWhen: "uses_ai",
},
{
ID: "AIACT-OBL-011",
RegulationID: "ai_act",
Title: "EU-Datenbank-Registrierung",
Description: "Registrierung in der EU-Datenbank fuer Hochrisiko-KI-Systeme vor Inverkehrbringen (Anbieter) bzw. Inbetriebnahme (Betreiber).",
LegalBasis: []LegalReference{{Norm: "Art. 49 AI Act", Article: "Registrierung"}},
Category: CategoryMeldepflicht,
Responsible: RoleKIVerantwortlicher,
Deadline: &Deadline{Type: DeadlineRelative, Duration: "Vor Inverkehrbringen/Inbetriebnahme"},
Sanctions: &SanctionInfo{MaxFine: "15 Mio. EUR oder 3% Jahresumsatz", PersonalLiability: false},
Evidence: []EvidenceItem{{Name: "Registrierungsbestaetigung", Required: true}, {Name: "EU-Datenbank-Eintrag", Required: true}},
Priority: PriorityHigh,
AppliesWhen: "high_risk",
},
}
// Hardcoded controls
m.controls = []ObligationControl{
{
ID: "AIACT-CTRL-001",
RegulationID: "ai_act",
Name: "KI-Inventar",
Description: "Fuehrung eines vollstaendigen Inventars aller KI-Systeme",
Category: "Governance",
WhatToDo: "Erfassung aller KI-Systeme mit Risikoeinstufung, Zweck, Anbieter, Betreiber",
ISO27001Mapping: []string{"A.8.1"},
Priority: PriorityCritical,
},
{
ID: "AIACT-CTRL-002",
RegulationID: "ai_act",
Name: "KI-Governance-Struktur",
Description: "Etablierung einer KI-Governance mit klaren Verantwortlichkeiten",
Category: "Governance",
WhatToDo: "Benennung eines KI-Verantwortlichen, Einrichtung eines KI-Boards",
Priority: PriorityHigh,
},
{
ID: "AIACT-CTRL-003",
RegulationID: "ai_act",
Name: "Bias-Testing und Fairness",
Description: "Regelmaessige Pruefung auf Verzerrungen und Diskriminierung",
Category: "Technisch",
WhatToDo: "Implementierung von Bias-Detection, Fairness-Metriken, Datensatz-Audits",
Priority: PriorityHigh,
},
{
ID: "AIACT-CTRL-004",
RegulationID: "ai_act",
Name: "Model Monitoring",
Description: "Kontinuierliche Ueberwachung der KI-Modellleistung",
Category: "Technisch",
WhatToDo: "Drift-Detection, Performance-Monitoring, Anomalie-Erkennung",
Priority: PriorityHigh,
},
}
// Hardcoded incident deadlines
m.incidentDeadlines = []IncidentDeadline{
{
RegulationID: "ai_act",
Phase: "Schwerwiegender Vorfall melden",
Deadline: "unverzueglich",
Content: "Meldung schwerwiegender Vorfaelle bei Hochrisiko-KI-Systemen: Tod, schwere Gesundheitsschaeden, schwerwiegende Grundrechtsverletzungen, schwere Schaeden an Eigentum oder Umwelt.",
Recipient: "Zustaendige Marktaufsichtsbehoerde",
LegalBasis: []LegalReference{{Norm: "Art. 73 AI Act"}},
},
{
RegulationID: "ai_act",
Phase: "Fehlfunktion melden (Anbieter)",
Deadline: "15 Tage",
Content: "Anbieter von Hochrisiko-KI melden Fehlfunktionen, die einen schwerwiegenden Vorfall darstellen koennten.",
Recipient: "Marktaufsichtsbehoerde des Herkunftslandes",
LegalBasis: []LegalReference{{Norm: "Art. 73 Abs. 1 AI Act"}},
},
}
}
// ============================================================================
// Decision Tree
// ============================================================================
func (m *AIActModule) buildDecisionTree() {
m.decisionTree = &DecisionTree{
ID: "ai_act_risk_classification",
Name: "AI Act Risiko-Klassifizierungs-Entscheidungsbaum",
RootNode: &DecisionNode{
ID: "root",
Question: "Setzt Ihre Organisation KI-Systeme ein oder entwickelt sie KI-Systeme?",
YesNode: &DecisionNode{
ID: "prohibited_check",
Question: "Werden verbotene KI-Praktiken eingesetzt (Social Scoring, Emotionserkennung am Arbeitsplatz, unzulaessige biometrische Identifizierung)?",
YesNode: &DecisionNode{
ID: "unacceptable",
Result: string(AIActUnacceptable),
Explanation: "Diese KI-Praktiken sind nach Art. 5 AI Act verboten und muessen unverzueglich eingestellt werden.",
},
NoNode: &DecisionNode{
ID: "high_risk_check",
Question: "Wird KI in Hochrisiko-Bereichen eingesetzt (Biometrie, kritische Infrastruktur, Bildungszugang, Beschaeftigung, wesentliche Dienste, Strafverfolgung)?",
YesNode: &DecisionNode{
ID: "high_risk",
Result: string(AIActHighRisk),
Explanation: "Hochrisiko-KI-Systeme nach Anhang III unterliegen umfassenden Anforderungen an Risikomanagemment, Dokumentation, Transparenz und menschliche Aufsicht.",
},
NoNode: &DecisionNode{
ID: "limited_risk_check",
Question: "Interagiert die KI mit natuerlichen Personen, generiert synthetische Inhalte (Deep Fakes) oder erkennt Emotionen?",
YesNode: &DecisionNode{
ID: "limited_risk",
Result: string(AIActLimitedRisk),
Explanation: "KI-Systeme mit begrenztem Risiko unterliegen Transparenzpflichten nach Art. 50 AI Act.",
},
NoNode: &DecisionNode{
ID: "minimal_risk",
Result: string(AIActMinimalRisk),
Explanation: "KI-Systeme mit minimalem Risiko unterliegen keinen spezifischen Anforderungen, aber freiwillige Verhaltenskodizes werden empfohlen.",
},
},
},
},
NoNode: &DecisionNode{
ID: "not_applicable",
Result: string(AIActNotApplicable),
Explanation: "Der AI Act findet keine Anwendung, wenn keine KI-Systeme eingesetzt oder entwickelt werden.",
},
},
}
}