feat: DSFA Section 8 KI-Anwendungsfälle + Bundesland RAG-Ingest
All checks were successful
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-ai-compliance (push) Successful in 38s
CI / test-python-backend-compliance (push) Successful in 33s
CI / test-python-document-crawler (push) Successful in 24s
CI / test-python-dsms-gateway (push) Successful in 19s
All checks were successful
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-ai-compliance (push) Successful in 38s
CI / test-python-backend-compliance (push) Successful in 33s
CI / test-python-document-crawler (push) Successful in 24s
CI / test-python-dsms-gateway (push) Successful in 19s
- Migration 028: ai_use_case_modules JSONB + section_8_complete auf compliance_dsfas - Neues ai-use-case-types.ts: AIUseCaseModule Interface, 8 Typen, Art22Assessment, AI Act Risikoklassen, WP248-Kriterien, Privacy by Design, createEmptyModule() Helper - types.ts: Section 8 in DSFA_SECTIONS, ai_use_case_modules im DSFA Interface, section_8_complete in DSFASectionProgress - api.ts: addAIUseCaseModule, updateAIUseCaseModule, removeAIUseCaseModule - 5 neue UI-Komponenten: AIUseCaseTypeSelector, Art22AssessmentPanel, AIRiskCriteriaChecklist, AIUseCaseModuleEditor (7 Tabs), AIUseCaseSection - DSFASidebar: Section 8 Eintrag + calculateSectionProgress case 8 - ReviewScheduleSection: ai_use_case_module Trigger-Typ ergänzt - page.tsx: Section 8 Rendering + Weiter-Button auf activeSection < 8 + KI-Module Counter - scripts/ingest-dsfa-bundesland.sh: WP248 + alle 17 Behörden → bp_dsfa_corpus - Docs: dsfa.md Section 8 + RAG-Corpus, Developer Portal DSFA mit AI-Modul-Code-Beispielen Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
448
admin-compliance/lib/sdk/dsfa/ai-use-case-types.ts
Normal file
448
admin-compliance/lib/sdk/dsfa/ai-use-case-types.ts
Normal file
@@ -0,0 +1,448 @@
|
||||
/**
|
||||
* DSFA AI Use Case Types
|
||||
*
|
||||
* Type definitions, constants, and helpers for Section 8 "KI-Anwendungsfälle"
|
||||
* of the DSFA module. Covers Art. 22 DSGVO, EU AI Act risk classification,
|
||||
* and WP248-based risk criteria for AI/ML processing activities.
|
||||
*/
|
||||
|
||||
import type { DSFARiskLevel } from './types'
|
||||
|
||||
// =============================================================================
|
||||
// ENUMS & UNION TYPES
|
||||
// =============================================================================
|
||||
|
||||
export type AIUseCaseType =
|
||||
| 'chatbot_nlp'
|
||||
| 'recommendation'
|
||||
| 'adm_scoring'
|
||||
| 'video_image'
|
||||
| 'biometrics'
|
||||
| 'iot_sensors'
|
||||
| 'generative_ai'
|
||||
| 'custom'
|
||||
|
||||
export type AIActRiskClass =
|
||||
| 'unacceptable'
|
||||
| 'high_risk'
|
||||
| 'limited'
|
||||
| 'minimal'
|
||||
|
||||
export type Art22ExceptionType =
|
||||
| 'contract' // Art. 22 Abs. 2 lit. a – Vertragserfüllung
|
||||
| 'legal' // Art. 22 Abs. 2 lit. b – gesetzliche Verpflichtung
|
||||
| 'consent' // Art. 22 Abs. 2 lit. c – explizite Einwilligung
|
||||
|
||||
export type PrivacyByDesignCategory =
|
||||
| 'data_minimisation'
|
||||
| 'pseudonymisation'
|
||||
| 'encryption'
|
||||
| 'purpose_limitation'
|
||||
| 'access_control'
|
||||
| 'audit_logging'
|
||||
| 'explainability'
|
||||
| 'human_oversight'
|
||||
| 'fairness_testing'
|
||||
| 'model_governance'
|
||||
|
||||
export type AIModuleReviewTriggerType =
|
||||
| 'model_update'
|
||||
| 'data_drift'
|
||||
| 'accuracy_drop'
|
||||
| 'new_use_case'
|
||||
| 'regulatory_change'
|
||||
| 'incident'
|
||||
| 'periodic'
|
||||
|
||||
// =============================================================================
|
||||
// INTERFACES
|
||||
// =============================================================================
|
||||
|
||||
export interface Art22Assessment {
|
||||
applies: boolean
|
||||
justification?: string
|
||||
exception_type?: Art22ExceptionType
|
||||
safeguards: Art22Safeguard[]
|
||||
}
|
||||
|
||||
export interface Art22Safeguard {
|
||||
id: string
|
||||
label: string
|
||||
implemented: boolean
|
||||
description?: string
|
||||
}
|
||||
|
||||
export interface AIUseCaseRiskCriterion {
|
||||
id: string
|
||||
applies: boolean
|
||||
justification?: string
|
||||
severity: 'low' | 'medium' | 'high'
|
||||
}
|
||||
|
||||
export interface PrivacyByDesignMeasure {
|
||||
category: PrivacyByDesignCategory
|
||||
description: string
|
||||
implemented: boolean
|
||||
implementation_date?: string
|
||||
}
|
||||
|
||||
export interface AIModuleReviewTrigger {
|
||||
type: AIModuleReviewTriggerType
|
||||
description: string
|
||||
threshold?: string // e.g. "accuracy < 80%"
|
||||
monitoring_interval?: string // e.g. "weekly"
|
||||
}
|
||||
|
||||
export interface AIUseCaseModuleRisk {
|
||||
risk_id: string // Reference to DSFA risk catalog entry
|
||||
description: string
|
||||
likelihood: 'low' | 'medium' | 'high'
|
||||
impact: 'low' | 'medium' | 'high'
|
||||
mitigation_ids: string[]
|
||||
}
|
||||
|
||||
export interface AIUseCaseModuleMitigation {
|
||||
mitigation_id: string // Reference to DSFA mitigation library
|
||||
description: string
|
||||
status: 'planned' | 'in_progress' | 'implemented'
|
||||
}
|
||||
|
||||
export interface AIUseCaseModule {
|
||||
id: string
|
||||
use_case_type: AIUseCaseType
|
||||
created_at: string
|
||||
updated_at: string
|
||||
|
||||
// Tab 1: Systembeschreibung
|
||||
name: string
|
||||
model_description: string // Art und Weise des KI-Systems
|
||||
model_type?: string // z.B. "Random Forest", "GPT-based LLM"
|
||||
data_flow_description?: string // Datenfluss-Beschreibung
|
||||
provider?: string // Anbieter / Dienstleister
|
||||
provider_country?: string // Datenübermittlung Drittland?
|
||||
third_country_transfer: boolean
|
||||
|
||||
// Tab 2: Daten & Betroffene
|
||||
input_data_categories: string[]
|
||||
output_data_categories: string[]
|
||||
involves_special_categories: boolean
|
||||
special_categories_justification?: string
|
||||
data_subjects: string[]
|
||||
estimated_volume?: string // z.B. ">10.000 Personen"
|
||||
data_retention_months?: number
|
||||
|
||||
// Tab 3: Zweck & Rechtsgrundlage
|
||||
processing_purpose: string
|
||||
legal_basis: string
|
||||
legal_basis_details?: string
|
||||
art22_assessment: Art22Assessment
|
||||
|
||||
// Tab 4: KI-Kriterien & AI Act
|
||||
risk_criteria: AIUseCaseRiskCriterion[]
|
||||
ai_act_risk_class: AIActRiskClass
|
||||
ai_act_justification?: string
|
||||
wp248_criteria_met: string[] // IDs der relevanten WP248-Kriterien
|
||||
|
||||
// Tab 5: Risikoanalyse
|
||||
risks: AIUseCaseModuleRisk[]
|
||||
|
||||
// Tab 6: Maßnahmen & Privacy by Design
|
||||
mitigations: AIUseCaseModuleMitigation[]
|
||||
privacy_by_design_measures: PrivacyByDesignMeasure[]
|
||||
|
||||
// Tab 7: Review-Trigger & Monitoring
|
||||
review_triggers: AIModuleReviewTrigger[]
|
||||
monitoring_description?: string
|
||||
last_reviewed_at?: string
|
||||
next_review_date?: string
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// CONSTANTS
|
||||
// =============================================================================
|
||||
|
||||
export const AI_USE_CASE_TYPES: Record<AIUseCaseType, {
|
||||
label: string
|
||||
icon: string
|
||||
description: string
|
||||
typical_risks: string[]
|
||||
wp248_criteria: string[]
|
||||
}> = {
|
||||
chatbot_nlp: {
|
||||
label: 'Chatbot / NLP',
|
||||
icon: '💬',
|
||||
description: 'Natürliche Sprachverarbeitung, Konversationssysteme, automatisierte Kundenkommunikation',
|
||||
typical_risks: ['Fehlinformation', 'Datenpersistenz im Verlauf', 'Unbeabsichtigte Datenoffenbarung'],
|
||||
wp248_criteria: ['K1', 'K4'],
|
||||
},
|
||||
recommendation: {
|
||||
label: 'Empfehlungssystem',
|
||||
icon: '🎯',
|
||||
description: 'Personalisierung, Inhaltsempfehlungen, Produktvorschläge basierend auf Nutzerverhalten',
|
||||
typical_risks: ['Profiling', 'Verhaltensmanipulation', 'Filterblasen'],
|
||||
wp248_criteria: ['K1', 'K3', 'K6'],
|
||||
},
|
||||
adm_scoring: {
|
||||
label: 'ADM / Scoring',
|
||||
icon: '⚖️',
|
||||
description: 'Automatisierte Entscheidungsfindung mit Rechtswirkung, Bonitätsprüfung, HR-Scoring',
|
||||
typical_risks: ['Diskriminierung', 'Fehlende Erklärbarkeit', 'Art. 22-Verletzung'],
|
||||
wp248_criteria: ['K1', 'K2', 'K3', 'K5'],
|
||||
},
|
||||
video_image: {
|
||||
label: 'Video / Bildanalyse',
|
||||
icon: '📹',
|
||||
description: 'Videoüberwachung, Bildklassifikation, Objekterkennung, Verhaltensanalyse',
|
||||
typical_risks: ['Systematische Überwachung', 'Verdeckte Verarbeitung', 'Biometrische Ableitung'],
|
||||
wp248_criteria: ['K2', 'K3', 'K6', 'K7'],
|
||||
},
|
||||
biometrics: {
|
||||
label: 'Biometrische Daten',
|
||||
icon: '👁️',
|
||||
description: 'Gesichtserkennung, Fingerabdruck, Stimmerkennung, biometrische Authentifizierung',
|
||||
typical_risks: ['Art. 9-Daten', 'Unwiderruflichkeit biometrischer Merkmale', 'Identitätsdiebstahl'],
|
||||
wp248_criteria: ['K2', 'K3', 'K4', 'K7'],
|
||||
},
|
||||
iot_sensors: {
|
||||
label: 'IoT / Sensordaten',
|
||||
icon: '📡',
|
||||
description: 'Standortverfolgung, Verhaltenssensoren, Smart Home, Wearables',
|
||||
typical_risks: ['Lückenlose Überwachung', 'Kontextinferenz', 'Datenmenge'],
|
||||
wp248_criteria: ['K2', 'K3', 'K5'],
|
||||
},
|
||||
generative_ai: {
|
||||
label: 'Generative KI',
|
||||
icon: '🤖',
|
||||
description: 'LLMs, Bildgenerierung, Code-Generierung, synthetische Daten, RAG-Systeme',
|
||||
typical_risks: ['Halluzinationen', 'Datenleckage im Prompt', 'Urheberrecht', 'Deepfakes'],
|
||||
wp248_criteria: ['K1', 'K4', 'K6', 'K9'],
|
||||
},
|
||||
custom: {
|
||||
label: 'Sonstige KI',
|
||||
icon: '⚙️',
|
||||
description: 'Sonstige KI-gestützte Verarbeitung personenbezogener Daten',
|
||||
typical_risks: ['Unbekannte Nebenwirkungen', 'Datenschutz durch Design fehlt'],
|
||||
wp248_criteria: [],
|
||||
},
|
||||
}
|
||||
|
||||
export const AI_RISK_CRITERIA: Array<{
|
||||
id: string
|
||||
label: string
|
||||
description: string
|
||||
gdpr_ref: string
|
||||
default_severity: 'low' | 'medium' | 'high'
|
||||
}> = [
|
||||
{
|
||||
id: 'adm_profiling',
|
||||
label: 'ADM / Profiling',
|
||||
description: 'Automatisierte Entscheidungen oder systematisches Profiling mit Auswirkungen auf Personen',
|
||||
gdpr_ref: 'Art. 22 DSGVO, Erwägungsgrund 71, WP248 K1',
|
||||
default_severity: 'high',
|
||||
},
|
||||
{
|
||||
id: 'systematic_monitoring',
|
||||
label: 'Systematische Überwachung',
|
||||
description: 'Überwachung öffentlicher Bereiche oder systematische Beobachtung von Personen',
|
||||
gdpr_ref: 'Art. 35 Abs. 3 lit. c DSGVO, WP248 K2',
|
||||
default_severity: 'high',
|
||||
},
|
||||
{
|
||||
id: 'large_scale',
|
||||
label: 'Großflächige Verarbeitung',
|
||||
description: 'Verarbeitung in großem Umfang nach Art. 9 oder Strafverfolgungsdaten',
|
||||
gdpr_ref: 'Art. 35 Abs. 3 lit. b DSGVO, WP248 K3',
|
||||
default_severity: 'medium',
|
||||
},
|
||||
{
|
||||
id: 'innovative_tech',
|
||||
label: 'Innovative Technologie',
|
||||
description: 'Einsatz neuer Technologien mit unbekannten Risiken für die betroffenen Personen',
|
||||
gdpr_ref: 'Art. 35 Abs. 1 DSGVO, Erwägungsgrund 89, WP248 K9',
|
||||
default_severity: 'medium',
|
||||
},
|
||||
{
|
||||
id: 'change_trigger',
|
||||
label: 'Wesentliche Änderung',
|
||||
description: 'Änderung des Verarbeitungszwecks, Datenbasis oder Modells seit letzter DSFA',
|
||||
gdpr_ref: 'Art. 35 Abs. 11 DSGVO',
|
||||
default_severity: 'low',
|
||||
},
|
||||
{
|
||||
id: 'dpo_consultation',
|
||||
label: 'DSB-Konsultation empfohlen',
|
||||
description: 'KI-System fällt in die Blacklist der Aufsichtsbehörde oder hat >= 2 Risikopunkte',
|
||||
gdpr_ref: 'Art. 35 Abs. 4 DSGVO, DSK-Blacklist',
|
||||
default_severity: 'high',
|
||||
},
|
||||
]
|
||||
|
||||
export const AI_ACT_RISK_CLASSES: Record<AIActRiskClass, {
|
||||
label: string
|
||||
labelDE: string
|
||||
color: string
|
||||
description: string
|
||||
requirements: string[]
|
||||
}> = {
|
||||
unacceptable: {
|
||||
label: 'Unacceptable Risk',
|
||||
labelDE: 'Unannehmbares Risiko',
|
||||
color: 'red',
|
||||
description: 'KI-Systeme mit unannehmbarem Risiko sind vollständig verboten (z.B. Social Scoring, subliminale Manipulation)',
|
||||
requirements: ['VERBOTEN – Betrieb nicht gestattet'],
|
||||
},
|
||||
high_risk: {
|
||||
label: 'High Risk',
|
||||
labelDE: 'Hochrisiko-KI',
|
||||
color: 'orange',
|
||||
description: 'Hochrisiko-KI in Anhang III EU AI Act (z.B. Bildung, Beschäftigung, kritische Infrastruktur)',
|
||||
requirements: [
|
||||
'Risikomanagementsystem (Art. 9 AI Act)',
|
||||
'Datenverwaltung und -qualität (Art. 10)',
|
||||
'Technische Dokumentation (Art. 11)',
|
||||
'Aufzeichnungspflicht / Logging (Art. 12)',
|
||||
'Transparenz gegenüber Nutzern (Art. 13)',
|
||||
'Menschliche Aufsicht (Art. 14)',
|
||||
'Genauigkeit, Robustheit, Cybersicherheit (Art. 15)',
|
||||
],
|
||||
},
|
||||
limited: {
|
||||
label: 'Limited Risk',
|
||||
labelDE: 'Begrenztes Risiko',
|
||||
color: 'yellow',
|
||||
description: 'Transparenzpflichten: Nutzer müssen wissen, dass sie mit KI interagieren',
|
||||
requirements: [
|
||||
'Offenlegungspflicht bei KI-Interaktion (Art. 52 AI Act)',
|
||||
'Kennzeichnung KI-generierter Inhalte',
|
||||
],
|
||||
},
|
||||
minimal: {
|
||||
label: 'Minimal Risk',
|
||||
labelDE: 'Minimales Risiko',
|
||||
color: 'green',
|
||||
description: 'KI mit minimalem Risiko (Spam-Filter, Empfehlungssysteme ohne wesentliche Auswirkung)',
|
||||
requirements: ['Keine spezifischen Anforderungen aus AI Act'],
|
||||
},
|
||||
}
|
||||
|
||||
export const PRIVACY_BY_DESIGN_CATEGORIES: Record<PrivacyByDesignCategory, {
|
||||
label: string
|
||||
icon: string
|
||||
description: string
|
||||
}> = {
|
||||
data_minimisation: { label: 'Datenminimierung', icon: '✂️', description: 'Nur notwendige Daten verarbeiten' },
|
||||
pseudonymisation: { label: 'Pseudonymisierung', icon: '🔒', description: 'Personenbezug auflösen' },
|
||||
encryption: { label: 'Verschlüsselung', icon: '🔐', description: 'Daten in Ruhe und in Übertragung verschlüsseln' },
|
||||
purpose_limitation: { label: 'Zweckbindung', icon: '🎯', description: 'Strikte Zweckbegrenzung durchsetzen' },
|
||||
access_control: { label: 'Zugriffssteuerung', icon: '👥', description: 'Least Privilege für Modell und Daten' },
|
||||
audit_logging: { label: 'Audit-Logging', icon: '📋', description: 'Alle Modellentscheidungen protokollieren' },
|
||||
explainability: { label: 'Erklärbarkeit', icon: '💡', description: 'Entscheidungen nachvollziehbar machen' },
|
||||
human_oversight: { label: 'Menschliche Kontrolle', icon: '👤', description: 'Human-in-the-Loop / Human-on-the-Loop' },
|
||||
fairness_testing: { label: 'Fairness-Tests', icon: '⚖️', description: 'Diskriminierung erkennen und vermeiden' },
|
||||
model_governance: { label: 'Modell-Governance', icon: '📊', description: 'Versionierung, Drift-Monitoring, Re-Training' },
|
||||
}
|
||||
|
||||
export const ART22_SAFEGUARDS: Art22Safeguard[] = [
|
||||
{
|
||||
id: 'human_review',
|
||||
label: 'Recht auf menschliche Überprüfung',
|
||||
implemented: false,
|
||||
description: 'Betroffene können verlangen, dass die Entscheidung von einem Menschen überprüft wird',
|
||||
},
|
||||
{
|
||||
id: 'contestation_right',
|
||||
label: 'Anfechtungsrecht',
|
||||
implemented: false,
|
||||
description: 'Betroffene können die automatisierte Entscheidung anfechten',
|
||||
},
|
||||
{
|
||||
id: 'explanation_duty',
|
||||
label: 'Erklärungspflicht',
|
||||
implemented: false,
|
||||
description: 'Betroffene werden über die Logik und Auswirkung der Entscheidung informiert',
|
||||
},
|
||||
{
|
||||
id: 'prior_information',
|
||||
label: 'Vorabinformation',
|
||||
implemented: false,
|
||||
description: 'Betroffene werden über ADM informiert, bevor es angewandt wird',
|
||||
},
|
||||
]
|
||||
|
||||
// =============================================================================
|
||||
// HELPER FUNCTIONS
|
||||
// =============================================================================
|
||||
|
||||
export function createEmptyModule(type: AIUseCaseType): AIUseCaseModule {
|
||||
const typeInfo = AI_USE_CASE_TYPES[type]
|
||||
const now = new Date().toISOString()
|
||||
|
||||
return {
|
||||
id: crypto.randomUUID(),
|
||||
use_case_type: type,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
|
||||
name: typeInfo.label,
|
||||
model_description: '',
|
||||
third_country_transfer: false,
|
||||
|
||||
input_data_categories: [],
|
||||
output_data_categories: [],
|
||||
involves_special_categories: false,
|
||||
data_subjects: [],
|
||||
|
||||
processing_purpose: '',
|
||||
legal_basis: '',
|
||||
art22_assessment: {
|
||||
applies: false,
|
||||
safeguards: ART22_SAFEGUARDS.map(s => ({ ...s })),
|
||||
},
|
||||
|
||||
risk_criteria: AI_RISK_CRITERIA.map(c => ({
|
||||
id: c.id,
|
||||
applies: false,
|
||||
severity: c.default_severity,
|
||||
})),
|
||||
ai_act_risk_class: 'minimal',
|
||||
wp248_criteria_met: typeInfo.wp248_criteria,
|
||||
|
||||
risks: [],
|
||||
mitigations: [],
|
||||
privacy_by_design_measures: [],
|
||||
review_triggers: [],
|
||||
}
|
||||
}
|
||||
|
||||
export function calculateModuleRiskLevel(module: AIUseCaseModule): DSFARiskLevel {
|
||||
const appliedCriteria = module.risk_criteria.filter(c => c.applies)
|
||||
const highCount = appliedCriteria.filter(c => c.severity === 'high').length
|
||||
const mediumCount = appliedCriteria.filter(c => c.severity === 'medium').length
|
||||
|
||||
if (module.ai_act_risk_class === 'unacceptable') return 'very_high'
|
||||
if (module.ai_act_risk_class === 'high_risk' || highCount >= 2) return 'high'
|
||||
if (highCount === 1 || mediumCount >= 2) return 'medium'
|
||||
if (mediumCount === 1 || appliedCriteria.length > 0) return 'low'
|
||||
return 'low'
|
||||
}
|
||||
|
||||
export function checkArt22Applicability(module: AIUseCaseModule): boolean {
|
||||
return module.use_case_type === 'adm_scoring' ||
|
||||
module.risk_criteria.some(c => c.id === 'adm_profiling' && c.applies)
|
||||
}
|
||||
|
||||
export function getModuleCompletionPercentage(module: AIUseCaseModule): number {
|
||||
const checks = [
|
||||
!!module.name,
|
||||
!!module.model_description,
|
||||
module.input_data_categories.length > 0,
|
||||
module.data_subjects.length > 0,
|
||||
!!module.processing_purpose,
|
||||
!!module.legal_basis,
|
||||
module.risk_criteria.some(c => c.applies !== undefined),
|
||||
!!module.ai_act_risk_class,
|
||||
module.risks.length > 0,
|
||||
module.privacy_by_design_measures.length > 0,
|
||||
]
|
||||
return Math.round((checks.filter(Boolean).length / checks.length) * 100)
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import type {
|
||||
ApproveDSFARequest,
|
||||
DSFATriggerInfo,
|
||||
} from './types'
|
||||
import type { AIUseCaseModule } from './ai-use-case-types'
|
||||
|
||||
// =============================================================================
|
||||
// CONFIGURATION
|
||||
@@ -386,6 +387,44 @@ export async function updateDSFAMitigationStatus(
|
||||
// HELPER FUNCTIONS
|
||||
// =============================================================================
|
||||
|
||||
// =============================================================================
|
||||
// AI USE CASE MODULE OPERATIONS
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Add a new AI use case module to a DSFA (Section 8)
|
||||
*/
|
||||
export async function addAIUseCaseModule(dsfaId: string, module: AIUseCaseModule): Promise<DSFA> {
|
||||
const dsfa = await getDSFA(dsfaId)
|
||||
const existing = dsfa.ai_use_case_modules || []
|
||||
return updateDSFA(dsfaId, { ai_use_case_modules: [...existing, module] } as Partial<DSFA>)
|
||||
}
|
||||
|
||||
/**
|
||||
* Update an existing AI use case module in a DSFA
|
||||
*/
|
||||
export async function updateAIUseCaseModule(
|
||||
dsfaId: string,
|
||||
moduleId: string,
|
||||
updates: Partial<AIUseCaseModule>
|
||||
): Promise<DSFA> {
|
||||
const dsfa = await getDSFA(dsfaId)
|
||||
const existing = dsfa.ai_use_case_modules || []
|
||||
const updated = existing.map(m =>
|
||||
m.id === moduleId ? { ...m, ...updates, updated_at: new Date().toISOString() } : m
|
||||
)
|
||||
return updateDSFA(dsfaId, { ai_use_case_modules: updated } as Partial<DSFA>)
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an AI use case module from a DSFA
|
||||
*/
|
||||
export async function removeAIUseCaseModule(dsfaId: string, moduleId: string): Promise<DSFA> {
|
||||
const dsfa = await getDSFA(dsfaId)
|
||||
const updated = (dsfa.ai_use_case_modules || []).filter(m => m.id !== moduleId)
|
||||
return updateDSFA(dsfaId, { ai_use_case_modules: updated } as Partial<DSFA>)
|
||||
}
|
||||
|
||||
function calculateRiskLevelString(
|
||||
likelihood: 'low' | 'medium' | 'high',
|
||||
impact: 'low' | 'medium' | 'high'
|
||||
|
||||
@@ -8,3 +8,4 @@ export * from './types'
|
||||
export * from './api'
|
||||
export * from './risk-catalog'
|
||||
export * from './mitigation-library'
|
||||
export * from './ai-use-case-types'
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
* aligned with the backend Go models.
|
||||
*/
|
||||
|
||||
import type { AIUseCaseModule } from './ai-use-case-types'
|
||||
export type { AIUseCaseModule } from './ai-use-case-types'
|
||||
|
||||
// =============================================================================
|
||||
// SDM GEWAEHRLEISTUNGSZIELE (Standard-Datenschutzmodell V2.0)
|
||||
// =============================================================================
|
||||
@@ -503,6 +506,7 @@ export interface DSFASectionProgress {
|
||||
section_5_complete: boolean // Betroffenenperspektive (optional)
|
||||
section_6_complete: boolean // DSB & Behördenkonsultation
|
||||
section_7_complete: boolean // Fortschreibung & Review
|
||||
section_8_complete?: boolean // KI-Anwendungsfälle (optional)
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
@@ -637,6 +641,9 @@ export interface DSFA {
|
||||
involves_ai?: boolean
|
||||
ai_trigger_ids?: string[] // IDs der ausgelösten KI-Trigger
|
||||
|
||||
// Section 8: KI-Anwendungsfälle (NEU)
|
||||
ai_use_case_modules?: AIUseCaseModule[]
|
||||
|
||||
// Section 4: Abhilfemaßnahmen (Art. 35 Abs. 7 lit. d)
|
||||
mitigations: DSFAMitigation[]
|
||||
tom_references?: string[]
|
||||
@@ -873,6 +880,15 @@ export const DSFA_SECTIONS: DSFASectionConfig[] = [
|
||||
fields: ['review_schedule', 'review_triggers', 'version'],
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
number: 8,
|
||||
title: 'AI Use Cases',
|
||||
titleDE: 'KI-Anwendungsfälle',
|
||||
description: 'Modulare Anhänge für KI-spezifische Risiken und Maßnahmen nach Art. 22 DSGVO und EU AI Act.',
|
||||
gdprRef: 'Art. 35 DSGVO, Art. 22 DSGVO, EU AI Act',
|
||||
fields: ['ai_use_case_modules'],
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
|
||||
// =============================================================================
|
||||
|
||||
Reference in New Issue
Block a user