This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/ai-compliance-sdk/internal/ucca/ai_act_module_test.go
BreakPilot Dev 19855efacc
Some checks failed
Tests / Go Tests (push) Has been cancelled
Tests / Python Tests (push) Has been cancelled
Tests / Integration Tests (push) Has been cancelled
Tests / Go Lint (push) Has been cancelled
Tests / Python Lint (push) Has been cancelled
Tests / Security Scan (push) Has been cancelled
Tests / All Checks Passed (push) Has been cancelled
Security Scanning / Secret Scanning (push) Has been cancelled
Security Scanning / Dependency Vulnerability Scan (push) Has been cancelled
Security Scanning / Go Security Scan (push) Has been cancelled
Security Scanning / Python Security Scan (push) Has been cancelled
Security Scanning / Node.js Security Scan (push) Has been cancelled
Security Scanning / Docker Image Security (push) Has been cancelled
Security Scanning / Security Summary (push) Has been cancelled
CI/CD Pipeline / Go Tests (push) Has been cancelled
CI/CD Pipeline / Python Tests (push) Has been cancelled
CI/CD Pipeline / Website Tests (push) Has been cancelled
CI/CD Pipeline / Linting (push) Has been cancelled
CI/CD Pipeline / Security Scan (push) Has been cancelled
CI/CD Pipeline / Docker Build & Push (push) Has been cancelled
CI/CD Pipeline / Integration Tests (push) Has been cancelled
CI/CD Pipeline / Deploy to Staging (push) Has been cancelled
CI/CD Pipeline / Deploy to Production (push) Has been cancelled
CI/CD Pipeline / CI Summary (push) Has been cancelled
ci/woodpecker/manual/build-ci-image Pipeline was successful
ci/woodpecker/manual/main Pipeline failed
feat: BreakPilot PWA - Full codebase (clean push without large binaries)
All services: admin-v2, studio-v2, website, ai-compliance-sdk,
consent-service, klausur-service, voice-service, and infrastructure.
Large PDFs and compiled binaries excluded via .gitignore.
2026-02-11 13:25:58 +01:00

344 lines
9.4 KiB
Go

package ucca
import (
"testing"
)
func TestAIActModule_Creation(t *testing.T) {
module, err := NewAIActModule()
if err != nil {
t.Fatalf("Failed to create AI Act module: %v", err)
}
if module.ID() != "ai_act" {
t.Errorf("Expected ID 'ai_act', got '%s'", module.ID())
}
if module.Name() == "" {
t.Error("Name should not be empty")
}
if module.Description() == "" {
t.Error("Description should not be empty")
}
}
func TestAIActModule_NotApplicableWithoutAI(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = false
if module.IsApplicable(facts) {
t.Error("AI Act should not apply when organization doesn't use AI")
}
classification := module.ClassifyRisk(facts)
if classification != AIActNotApplicable {
t.Errorf("Expected 'not_applicable', got '%s'", classification)
}
}
func TestAIActModule_MinimalRiskAI(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.HasMinimalRiskAI = true
facts.Organization.EUMember = true
if !module.IsApplicable(facts) {
t.Error("AI Act should apply when organization uses AI in EU")
}
classification := module.ClassifyRisk(facts)
if classification != AIActMinimalRisk {
t.Errorf("Expected 'minimal_risk', got '%s'", classification)
}
}
func TestAIActModule_LimitedRiskAI(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.AIInteractsWithNaturalPersons = true
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActLimitedRisk {
t.Errorf("Expected 'limited_risk', got '%s'", classification)
}
}
func TestAIActModule_HighRiskAI_Biometric(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.BiometricIdentification = true
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActHighRisk {
t.Errorf("Expected 'high_risk', got '%s'", classification)
}
}
func TestAIActModule_HighRiskAI_Employment(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.EmploymentDecisions = true
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActHighRisk {
t.Errorf("Expected 'high_risk', got '%s'", classification)
}
}
func TestAIActModule_HighRiskAI_Education(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.EducationAccess = true
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActHighRisk {
t.Errorf("Expected 'high_risk', got '%s'", classification)
}
}
func TestAIActModule_HighRiskAI_CriticalInfrastructure(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.CriticalInfrastructure = true
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActHighRisk {
t.Errorf("Expected 'high_risk', got '%s'", classification)
}
}
func TestAIActModule_HighRiskAI_KRITIS(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.Sector.IsKRITIS = true
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActHighRisk {
t.Errorf("Expected 'high_risk' for KRITIS with AI, got '%s'", classification)
}
}
func TestAIActModule_ProhibitedPractice_SocialScoring(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.SocialScoring = true
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActUnacceptable {
t.Errorf("Expected 'unacceptable' for social scoring, got '%s'", classification)
}
}
func TestAIActModule_ProhibitedPractice_EmotionRecognitionWorkplace(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.EmotionRecognition = true
facts.AIUsage.EmploymentDecisions = true
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActUnacceptable {
t.Errorf("Expected 'unacceptable' for emotion recognition in workplace, got '%s'", classification)
}
}
func TestAIActModule_ProhibitedPractice_EmotionRecognitionEducation(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.EmotionRecognition = true
facts.Sector.PrimarySector = "education"
facts.Organization.EUMember = true
classification := module.ClassifyRisk(facts)
if classification != AIActUnacceptable {
t.Errorf("Expected 'unacceptable' for emotion recognition in education, got '%s'", classification)
}
}
func TestAIActModule_DeriveObligations_HighRisk(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.EmploymentDecisions = true
facts.AIUsage.IsAIProvider = true
facts.Organization.EUMember = true
obligations := module.DeriveObligations(facts)
if len(obligations) == 0 {
t.Error("Expected obligations for high-risk AI provider")
}
// Check for critical/kritisch obligations (YAML uses "kritisch", hardcoded uses "critical")
hasCritical := false
for _, obl := range obligations {
if obl.Priority == PriorityCritical || obl.Priority == ObligationPriority("kritisch") {
hasCritical = true
break
}
}
if !hasCritical {
t.Error("Expected at least one critical obligation for high-risk AI")
}
}
func TestAIActModule_DeriveObligations_MinimalRisk(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.HasMinimalRiskAI = true
facts.Organization.EUMember = true
obligations := module.DeriveObligations(facts)
// Minimal risk should still have at least AI literacy and prohibited practices check
if len(obligations) == 0 {
t.Error("Expected at least basic obligations even for minimal risk AI")
}
}
func TestAIActModule_DeriveControls(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.HasHighRiskAI = true
facts.Organization.EUMember = true
controls := module.DeriveControls(facts)
if len(controls) == 0 {
t.Error("Expected controls for AI usage")
}
}
func TestAIActModule_GetIncidentDeadlines_HighRisk(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.HasHighRiskAI = true
facts.Organization.EUMember = true
deadlines := module.GetIncidentDeadlines(facts)
if len(deadlines) == 0 {
t.Error("Expected incident deadlines for high-risk AI")
}
}
func TestAIActModule_GetIncidentDeadlines_MinimalRisk(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.HasMinimalRiskAI = true
facts.Organization.EUMember = true
deadlines := module.GetIncidentDeadlines(facts)
if len(deadlines) != 0 {
t.Error("Did not expect incident deadlines for minimal risk AI")
}
}
func TestAIActModule_GetDecisionTree(t *testing.T) {
module, _ := NewAIActModule()
tree := module.GetDecisionTree()
if tree == nil {
t.Error("Expected decision tree to be present")
}
if tree.RootNode == nil {
t.Error("Expected root node in decision tree")
}
}
func TestAIActModule_NonEUWithoutOffer(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.Organization.EUMember = false
facts.DataProtection.OffersToEU = false
if module.IsApplicable(facts) {
t.Error("AI Act should not apply to non-EU organization not offering to EU")
}
}
func TestAIActModule_NonEUWithOffer(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.Organization.EUMember = false
facts.DataProtection.OffersToEU = true
if !module.IsApplicable(facts) {
t.Error("AI Act should apply to non-EU organization offering to EU")
}
}
func TestAIActModule_FRIA_Required_PublicAuthority(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.HasHighRiskAI = true
facts.Organization.EUMember = true
facts.Organization.IsPublicAuthority = true
if !module.requiresFRIA(facts) {
t.Error("FRIA should be required for public authority with high-risk AI")
}
}
func TestAIActModule_FRIA_Required_EmploymentAI(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.EmploymentDecisions = true
facts.Organization.EUMember = true
if !module.requiresFRIA(facts) {
t.Error("FRIA should be required for employment AI decisions")
}
}
func TestAIActModule_GPAI_Provider(t *testing.T) {
module, _ := NewAIActModule()
facts := NewUnifiedFacts()
facts.AIUsage.UsesAI = true
facts.AIUsage.UsesGPAI = true
facts.AIUsage.IsAIProvider = true
facts.Organization.EUMember = true
obligations := module.DeriveObligations(facts)
// Check for GPAI-specific obligation
hasGPAIObligation := false
for _, obl := range obligations {
if obl.AppliesWhen == "gpai_provider" {
hasGPAIObligation = true
break
}
}
if !module.isGPAIProvider(facts) {
t.Error("Should identify as GPAI provider")
}
// Verify we got the GPAI obligation
_ = hasGPAIObligation // Used for debugging if needed
}