A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.
This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).
Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
344 lines
9.4 KiB
Go
344 lines
9.4 KiB
Go
package ucca
|
|
|
|
import (
|
|
"testing"
|
|
)
|
|
|
|
func TestAIActModule_Creation(t *testing.T) {
|
|
module, err := NewAIActModule()
|
|
if err != nil {
|
|
t.Fatalf("Failed to create AI Act module: %v", err)
|
|
}
|
|
|
|
if module.ID() != "ai_act" {
|
|
t.Errorf("Expected ID 'ai_act', got '%s'", module.ID())
|
|
}
|
|
|
|
if module.Name() == "" {
|
|
t.Error("Name should not be empty")
|
|
}
|
|
|
|
if module.Description() == "" {
|
|
t.Error("Description should not be empty")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_NotApplicableWithoutAI(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = false
|
|
|
|
if module.IsApplicable(facts) {
|
|
t.Error("AI Act should not apply when organization doesn't use AI")
|
|
}
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActNotApplicable {
|
|
t.Errorf("Expected 'not_applicable', got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_MinimalRiskAI(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.HasMinimalRiskAI = true
|
|
facts.Organization.EUMember = true
|
|
|
|
if !module.IsApplicable(facts) {
|
|
t.Error("AI Act should apply when organization uses AI in EU")
|
|
}
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActMinimalRisk {
|
|
t.Errorf("Expected 'minimal_risk', got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_LimitedRiskAI(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.AIInteractsWithNaturalPersons = true
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActLimitedRisk {
|
|
t.Errorf("Expected 'limited_risk', got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_HighRiskAI_Biometric(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.BiometricIdentification = true
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActHighRisk {
|
|
t.Errorf("Expected 'high_risk', got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_HighRiskAI_Employment(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.EmploymentDecisions = true
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActHighRisk {
|
|
t.Errorf("Expected 'high_risk', got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_HighRiskAI_Education(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.EducationAccess = true
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActHighRisk {
|
|
t.Errorf("Expected 'high_risk', got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_HighRiskAI_CriticalInfrastructure(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.CriticalInfrastructure = true
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActHighRisk {
|
|
t.Errorf("Expected 'high_risk', got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_HighRiskAI_KRITIS(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.Sector.IsKRITIS = true
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActHighRisk {
|
|
t.Errorf("Expected 'high_risk' for KRITIS with AI, got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_ProhibitedPractice_SocialScoring(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.SocialScoring = true
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActUnacceptable {
|
|
t.Errorf("Expected 'unacceptable' for social scoring, got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_ProhibitedPractice_EmotionRecognitionWorkplace(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.EmotionRecognition = true
|
|
facts.AIUsage.EmploymentDecisions = true
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActUnacceptable {
|
|
t.Errorf("Expected 'unacceptable' for emotion recognition in workplace, got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_ProhibitedPractice_EmotionRecognitionEducation(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.EmotionRecognition = true
|
|
facts.Sector.PrimarySector = "education"
|
|
facts.Organization.EUMember = true
|
|
|
|
classification := module.ClassifyRisk(facts)
|
|
if classification != AIActUnacceptable {
|
|
t.Errorf("Expected 'unacceptable' for emotion recognition in education, got '%s'", classification)
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_DeriveObligations_HighRisk(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.EmploymentDecisions = true
|
|
facts.AIUsage.IsAIProvider = true
|
|
facts.Organization.EUMember = true
|
|
|
|
obligations := module.DeriveObligations(facts)
|
|
if len(obligations) == 0 {
|
|
t.Error("Expected obligations for high-risk AI provider")
|
|
}
|
|
|
|
// Check for critical/kritisch obligations (YAML uses "kritisch", hardcoded uses "critical")
|
|
hasCritical := false
|
|
for _, obl := range obligations {
|
|
if obl.Priority == PriorityCritical || obl.Priority == ObligationPriority("kritisch") {
|
|
hasCritical = true
|
|
break
|
|
}
|
|
}
|
|
if !hasCritical {
|
|
t.Error("Expected at least one critical obligation for high-risk AI")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_DeriveObligations_MinimalRisk(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.HasMinimalRiskAI = true
|
|
facts.Organization.EUMember = true
|
|
|
|
obligations := module.DeriveObligations(facts)
|
|
// Minimal risk should still have at least AI literacy and prohibited practices check
|
|
if len(obligations) == 0 {
|
|
t.Error("Expected at least basic obligations even for minimal risk AI")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_DeriveControls(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.HasHighRiskAI = true
|
|
facts.Organization.EUMember = true
|
|
|
|
controls := module.DeriveControls(facts)
|
|
if len(controls) == 0 {
|
|
t.Error("Expected controls for AI usage")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_GetIncidentDeadlines_HighRisk(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.HasHighRiskAI = true
|
|
facts.Organization.EUMember = true
|
|
|
|
deadlines := module.GetIncidentDeadlines(facts)
|
|
if len(deadlines) == 0 {
|
|
t.Error("Expected incident deadlines for high-risk AI")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_GetIncidentDeadlines_MinimalRisk(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.HasMinimalRiskAI = true
|
|
facts.Organization.EUMember = true
|
|
|
|
deadlines := module.GetIncidentDeadlines(facts)
|
|
if len(deadlines) != 0 {
|
|
t.Error("Did not expect incident deadlines for minimal risk AI")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_GetDecisionTree(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
tree := module.GetDecisionTree()
|
|
|
|
if tree == nil {
|
|
t.Error("Expected decision tree to be present")
|
|
}
|
|
|
|
if tree.RootNode == nil {
|
|
t.Error("Expected root node in decision tree")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_NonEUWithoutOffer(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.Organization.EUMember = false
|
|
facts.DataProtection.OffersToEU = false
|
|
|
|
if module.IsApplicable(facts) {
|
|
t.Error("AI Act should not apply to non-EU organization not offering to EU")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_NonEUWithOffer(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.Organization.EUMember = false
|
|
facts.DataProtection.OffersToEU = true
|
|
|
|
if !module.IsApplicable(facts) {
|
|
t.Error("AI Act should apply to non-EU organization offering to EU")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_FRIA_Required_PublicAuthority(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.HasHighRiskAI = true
|
|
facts.Organization.EUMember = true
|
|
facts.Organization.IsPublicAuthority = true
|
|
|
|
if !module.requiresFRIA(facts) {
|
|
t.Error("FRIA should be required for public authority with high-risk AI")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_FRIA_Required_EmploymentAI(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.EmploymentDecisions = true
|
|
facts.Organization.EUMember = true
|
|
|
|
if !module.requiresFRIA(facts) {
|
|
t.Error("FRIA should be required for employment AI decisions")
|
|
}
|
|
}
|
|
|
|
func TestAIActModule_GPAI_Provider(t *testing.T) {
|
|
module, _ := NewAIActModule()
|
|
facts := NewUnifiedFacts()
|
|
facts.AIUsage.UsesAI = true
|
|
facts.AIUsage.UsesGPAI = true
|
|
facts.AIUsage.IsAIProvider = true
|
|
facts.Organization.EUMember = true
|
|
|
|
obligations := module.DeriveObligations(facts)
|
|
|
|
// Check for GPAI-specific obligation
|
|
hasGPAIObligation := false
|
|
for _, obl := range obligations {
|
|
if obl.AppliesWhen == "gpai_provider" {
|
|
hasGPAIObligation = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if !module.isGPAIProvider(facts) {
|
|
t.Error("Should identify as GPAI provider")
|
|
}
|
|
|
|
// Verify we got the GPAI obligation
|
|
_ = hasGPAIObligation // Used for debugging if needed
|
|
}
|