fix(quality): Ruff/CVE/TS-Fixes, 104 neue Tests, Complexity-Refactoring
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-ai-compliance (push) Failing after 30s
CI / test-python-backend-compliance (push) Successful in 30s
CI / test-python-document-crawler (push) Successful in 21s
CI / test-python-dsms-gateway (push) Successful in 17s

- Ruff: 144 auto-fixes (unused imports, == None → is None), F821/F811/F841 manuell
- CVEs: python-multipart>=0.0.22, weasyprint>=68.0, pillow>=12.1.1, npm audit fix (0 vulns)
- TS: 5 tote Drafting-Engine-Dateien entfernt, allowed-facts/sanitizer/StepHeader/context fixes
- Tests: +104 (ISMS 58, Evidence 18, VVT 14, Generation 14) → 1449 passed
- Refactoring: collect_ci_evidence (F→A), row_to_response (E→A), extract_requirements (E→A)
- Dead Code: pca-platform, 7 Go-Handler, dsr_api.py, duplicate Schemas entfernt

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-03-07 19:00:33 +01:00
parent 6509e64dd9
commit 95fcba34cd
124 changed files with 2533 additions and 15709 deletions

View File

@@ -515,7 +515,7 @@ export function setContextPath(ctx: TemplateContext, dotPath: string, value: unk
return {
...ctx,
[section]: {
...(ctx[section] as Record<string, unknown>),
...(ctx[section] as unknown as Record<string, unknown>),
[key]: value,
},
}
@@ -526,6 +526,6 @@ export function setContextPath(ctx: TemplateContext, dotPath: string, value: unk
*/
export function getContextPath(ctx: TemplateContext, dotPath: string): unknown {
const [section, ...rest] = dotPath.split('.') as [keyof TemplateContext, ...string[]]
const sectionObj = ctx[section] as Record<string, unknown>
const sectionObj = ctx[section] as unknown as Record<string, unknown>
return sectionObj?.[rest.join('.')]
}

View File

@@ -313,7 +313,7 @@ function ContextSectionForm({
onChange: (section: keyof TemplateContext, key: string, value: unknown) => void
}) {
const fields = SECTION_FIELDS[section]
const sectionData = context[section] as Record<string, unknown>
const sectionData = context[section] as unknown as Record<string, unknown>
return (
<div className="grid grid-cols-1 md:grid-cols-2 gap-3">
@@ -523,7 +523,7 @@ function GeneratorSection({
}, [template.id]) // eslint-disable-line react-hooks/exhaustive-deps
// Computed flags pills config
const flagPills: { key: keyof typeof ruleResult.computedFlags; label: string; color: string }[] = ruleResult ? [
const flagPills: { key: string; label: string; color: string }[] = ruleResult ? [
{ key: 'IS_B2C', label: 'B2C', color: 'bg-blue-100 text-blue-700' },
{ key: 'SERVICE_IS_SAAS', label: 'SaaS', color: 'bg-green-100 text-green-700' },
{ key: 'HAS_PENALTY', label: 'Vertragsstrafe', color: 'bg-orange-100 text-orange-700' },
@@ -842,7 +842,7 @@ function DocumentGeneratorPageInner() {
useEffect(() => {
if (state?.companyProfile) {
const profile = state.companyProfile
const p = profile as Record<string, string>
const p = profile as unknown as Record<string, string>
setContext((prev) => ({
...prev,
PROVIDER: {
@@ -919,7 +919,7 @@ function DocumentGeneratorPageInner() {
(section: keyof TemplateContext, key: string, value: unknown) => {
setContext((prev) => ({
...prev,
[section]: { ...(prev[section] as Record<string, unknown>), [key]: value },
[section]: { ...(prev[section] as unknown as Record<string, unknown>), [key]: value },
}))
},
[]

View File

@@ -2,7 +2,7 @@
import React, { useState, useEffect, useCallback } from 'react'
import { useSDK } from '@/lib/sdk'
import { StepHeader, STEP_EXPLANATIONS } from '@/components/sdk/StepHeader'
import { StepHeader } from '@/components/sdk/StepHeader'
// =============================================================================
// TYPES
@@ -321,16 +321,7 @@ export default function EmailTemplatesPage() {
return (
<div className="space-y-6">
<StepHeader stepId="email-templates" explanation={STEP_EXPLANATIONS['email-templates'] || {
title: 'E-Mail-Templates',
description: 'Verwalten Sie Vorlagen fuer alle DSGVO-relevanten Benachrichtigungen.',
steps: [
'Template-Typen und Variablen pruefen',
'Inhalte im Editor anpassen',
'Vorschau pruefen und publizieren',
'Branding-Einstellungen konfigurieren',
],
}} />
<StepHeader stepId="email-templates" />
{error && (
<div className="bg-red-50 border border-red-200 rounded-lg p-3 text-sm text-red-700">

View File

@@ -15,7 +15,6 @@ interface Message {
interface ComplianceAdvisorWidgetProps {
currentStep?: string
enableDraftingEngine?: boolean
}
// =============================================================================
@@ -68,13 +67,7 @@ const COUNTRIES: { code: Country; label: string }[] = [
{ code: 'EU', label: 'EU' },
]
export function ComplianceAdvisorWidget({ currentStep = 'default', enableDraftingEngine = false }: ComplianceAdvisorWidgetProps) {
// Feature-flag: If Drafting Engine enabled, render DraftingEngineWidget instead
if (enableDraftingEngine) {
const { DraftingEngineWidget } = require('./DraftingEngineWidget')
return <DraftingEngineWidget currentStep={currentStep} enableDraftingEngine />
}
export function ComplianceAdvisorWidget({ currentStep = 'default' }: ComplianceAdvisorWidgetProps) {
const [isOpen, setIsOpen] = useState(false)
const [isExpanded, setIsExpanded] = useState(false)
const [messages, setMessages] = useState<Message[]>([])

View File

@@ -1,300 +0,0 @@
'use client'
/**
* DraftEditor - Split-Pane Editor fuer Compliance-Dokument-Entwuerfe
*
* Links (2/3): Gerenderter Draft mit Section-Headern
* Rechts (1/3): Chat-Panel fuer iterative Verfeinerung
* Oben: Document-Type Label, Depth-Level Badge, Constraint-Compliance
*/
import { useState, useRef, useCallback } from 'react'
import { DOCUMENT_TYPE_LABELS } from '@/lib/sdk/compliance-scope-types'
import type { ScopeDocumentType } from '@/lib/sdk/compliance-scope-types'
import type {
DraftRevision,
ConstraintCheckResult,
ValidationResult,
} from '@/lib/sdk/drafting-engine/types'
interface DraftEditorProps {
draft: DraftRevision
documentType: ScopeDocumentType | null
constraintCheck: ConstraintCheckResult | null
validationResult: ValidationResult | null
isTyping: boolean
onAccept: () => void
onValidate: () => void
onClose: () => void
onRefine: (instruction: string) => void
}
export function DraftEditor({
draft,
documentType,
constraintCheck,
validationResult,
isTyping,
onAccept,
onValidate,
onClose,
onRefine,
}: DraftEditorProps) {
const [refineInput, setRefineInput] = useState('')
const [activeSection, setActiveSection] = useState<string | null>(null)
const contentRef = useRef<HTMLDivElement>(null)
const handleRefine = useCallback(() => {
if (!refineInput.trim() || isTyping) return
onRefine(refineInput.trim())
setRefineInput('')
}, [refineInput, isTyping, onRefine])
const handleRefineKeyDown = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
handleRefine()
}
}
const docLabel = documentType
? DOCUMENT_TYPE_LABELS[documentType]?.split(' (')[0] || documentType
: 'Dokument'
return (
<div className="fixed inset-0 bg-gray-900/50 z-50 flex items-center justify-center p-4">
<div className="bg-white rounded-2xl shadow-2xl w-full max-w-6xl h-[85vh] flex flex-col overflow-hidden">
{/* Header */}
<div className="bg-gradient-to-r from-blue-600 to-indigo-600 text-white px-6 py-3 flex items-center justify-between shrink-0">
<div className="flex items-center gap-3">
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M11 5H6a2 2 0 00-2 2v11a2 2 0 002 2h11a2 2 0 002-2v-5m-1.414-9.414a2 2 0 112.828 2.828L11.828 15H9v-2.828l8.586-8.586z" />
</svg>
<div>
<div className="font-semibold text-sm">{docLabel} - Entwurf</div>
<div className="text-xs text-white/70">
{draft.sections.length} Sections | Erstellt {new Date(draft.createdAt).toLocaleTimeString('de-DE', { hour: '2-digit', minute: '2-digit' })}
</div>
</div>
</div>
<div className="flex items-center gap-2">
{/* Constraint Badge */}
{constraintCheck && (
<span className={`px-2 py-0.5 rounded-full text-xs font-medium ${
constraintCheck.allowed
? 'bg-green-500/20 text-green-100'
: 'bg-red-500/20 text-red-100'
}`}>
{constraintCheck.allowed ? 'Constraints OK' : 'Constraint-Verletzung'}
</span>
)}
{/* Validation Badge */}
{validationResult && (
<span className={`px-2 py-0.5 rounded-full text-xs font-medium ${
validationResult.passed
? 'bg-green-500/20 text-green-100'
: 'bg-amber-500/20 text-amber-100'
}`}>
{validationResult.passed ? 'Validiert' : `${validationResult.errors.length} Fehler`}
</span>
)}
<button
onClick={onClose}
className="text-white/80 hover:text-white transition-colors p-1"
aria-label="Editor schliessen"
>
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
</div>
</div>
{/* Adjustment Warnings */}
{constraintCheck && constraintCheck.adjustments.length > 0 && (
<div className="px-6 py-2 bg-amber-50 border-b border-amber-200 shrink-0">
{constraintCheck.adjustments.map((adj, i) => (
<p key={i} className="text-xs text-amber-700 flex items-start gap-1">
<svg className="w-3.5 h-3.5 mt-0.5 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-2.5L13.732 4c-.77-.833-1.964-.833-2.732 0L3.732 16.5c-.77.833.192 2.5 1.732 2.5z" />
</svg>
{adj}
</p>
))}
</div>
)}
{/* Main Content: 2/3 Editor + 1/3 Chat */}
<div className="flex-1 flex overflow-hidden">
{/* Left: Draft Content (2/3) */}
<div className="w-2/3 border-r border-gray-200 overflow-y-auto" ref={contentRef}>
{/* Section Navigation */}
<div className="sticky top-0 bg-white border-b border-gray-100 px-4 py-2 flex items-center gap-1 overflow-x-auto z-10">
{draft.sections.map((section) => (
<button
key={section.id}
onClick={() => {
setActiveSection(section.id)
document.getElementById(`section-${section.id}`)?.scrollIntoView({ behavior: 'smooth' })
}}
className={`px-2.5 py-1 rounded-md text-xs font-medium whitespace-nowrap transition-colors ${
activeSection === section.id
? 'bg-blue-100 text-blue-700'
: 'text-gray-500 hover:bg-gray-100'
}`}
>
{section.title}
</button>
))}
</div>
{/* Sections */}
<div className="p-6 space-y-6">
{draft.sections.map((section) => (
<div
key={section.id}
id={`section-${section.id}`}
className={`rounded-lg border transition-colors ${
activeSection === section.id
? 'border-blue-300 bg-blue-50/30'
: 'border-gray-200 bg-white'
}`}
>
<div className="px-4 py-2.5 border-b border-gray-100 flex items-center justify-between">
<h3 className="text-sm font-semibold text-gray-900">{section.title}</h3>
{section.schemaField && (
<span className="text-xs text-gray-400 font-mono">{section.schemaField}</span>
)}
</div>
<div className="px-4 py-3">
<div className="text-sm text-gray-700 whitespace-pre-wrap leading-relaxed">
{section.content}
</div>
</div>
</div>
))}
</div>
</div>
{/* Right: Refinement Chat (1/3) */}
<div className="w-1/3 flex flex-col bg-gray-50">
<div className="px-4 py-3 border-b border-gray-200 bg-white">
<h3 className="text-sm font-semibold text-gray-900">Verfeinerung</h3>
<p className="text-xs text-gray-500">Geben Sie Anweisungen zur Verbesserung</p>
</div>
{/* Validation Summary (if present) */}
{validationResult && (
<div className="px-4 py-3 border-b border-gray-100">
<div className="space-y-1.5">
{validationResult.errors.length > 0 && (
<div className="flex items-center gap-1.5 text-xs text-red-600">
<span className="w-2 h-2 rounded-full bg-red-500" />
{validationResult.errors.length} Fehler
</div>
)}
{validationResult.warnings.length > 0 && (
<div className="flex items-center gap-1.5 text-xs text-amber-600">
<span className="w-2 h-2 rounded-full bg-amber-500" />
{validationResult.warnings.length} Warnungen
</div>
)}
{validationResult.suggestions.length > 0 && (
<div className="flex items-center gap-1.5 text-xs text-blue-600">
<span className="w-2 h-2 rounded-full bg-blue-500" />
{validationResult.suggestions.length} Vorschlaege
</div>
)}
</div>
</div>
)}
{/* Refinement Area */}
<div className="flex-1 p-4 overflow-y-auto">
<div className="space-y-3">
<p className="text-xs text-gray-500">
Beschreiben Sie, was geaendert werden soll. Der Agent erstellt eine ueberarbeitete Version unter Beachtung der Scope-Constraints.
</p>
{/* Quick Refinement Buttons */}
<div className="space-y-1.5">
{[
'Mehr Details hinzufuegen',
'Platzhalter ausfuellen',
'Rechtliche Referenzen ergaenzen',
'Sprache vereinfachen',
].map((suggestion) => (
<button
key={suggestion}
onClick={() => onRefine(suggestion)}
disabled={isTyping}
className="w-full text-left px-3 py-1.5 text-xs bg-white hover:bg-blue-50 border border-gray-200 rounded-md transition-colors text-gray-600 disabled:opacity-50"
>
{suggestion}
</button>
))}
</div>
</div>
</div>
{/* Refinement Input */}
<div className="border-t border-gray-200 p-3 bg-white">
<div className="flex gap-2">
<input
type="text"
value={refineInput}
onChange={(e) => setRefineInput(e.target.value)}
onKeyDown={handleRefineKeyDown}
placeholder="Anweisung eingeben..."
disabled={isTyping}
className="flex-1 px-3 py-2 text-sm border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent disabled:opacity-50"
/>
<button
onClick={handleRefine}
disabled={!refineInput.trim() || isTyping}
className="px-3 py-2 bg-blue-600 text-white rounded-lg hover:bg-blue-700 disabled:opacity-50 disabled:cursor-not-allowed transition-colors"
>
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 19l9 2-9-18-9 18 9-2zm0 0v-8" />
</svg>
</button>
</div>
</div>
</div>
</div>
{/* Footer Actions */}
<div className="border-t border-gray-200 px-6 py-3 bg-white flex items-center justify-between shrink-0">
<div className="flex items-center gap-2">
<button
onClick={onValidate}
disabled={isTyping}
className="px-4 py-2 text-sm font-medium text-green-700 bg-green-50 border border-green-200 rounded-lg hover:bg-green-100 disabled:opacity-50 transition-colors"
>
Validieren
</button>
</div>
<div className="flex items-center gap-2">
<button
onClick={onClose}
className="px-4 py-2 text-sm font-medium text-gray-600 bg-gray-50 border border-gray-200 rounded-lg hover:bg-gray-100 transition-colors"
>
Abbrechen
</button>
<button
onClick={onAccept}
disabled={isTyping}
className="px-4 py-2 text-sm font-medium text-white bg-blue-600 rounded-lg hover:bg-blue-700 disabled:opacity-50 transition-colors"
>
Draft akzeptieren
</button>
</div>
</div>
</div>
</div>
)
}

View File

@@ -1,443 +0,0 @@
'use client'
/**
* DraftingEngineWidget - Erweitert den ComplianceAdvisor um 4 Modi
*
* Mode-Indicator Pills: Explain / Ask / Draft / Validate
* Document-Type Selector aus requiredDocuments der ScopeDecision
* Feature-Flag enableDraftingEngine fuer schrittweises Rollout
*/
import { useState, useEffect, useRef, useCallback } from 'react'
import { useSDK } from '@/lib/sdk/context'
import { useDraftingEngine } from '@/lib/sdk/drafting-engine/use-drafting-engine'
import { DOCUMENT_TYPE_LABELS } from '@/lib/sdk/compliance-scope-types'
import type { AgentMode } from '@/lib/sdk/drafting-engine/types'
import type { ScopeDocumentType } from '@/lib/sdk/compliance-scope-types'
import { DraftEditor } from './DraftEditor'
import { ValidationReport } from './ValidationReport'
interface DraftingEngineWidgetProps {
currentStep?: string
enableDraftingEngine?: boolean
}
const MODE_CONFIG: Record<AgentMode, { label: string; color: string; activeColor: string; icon: string }> = {
explain: { label: 'Explain', color: 'bg-gray-100 text-gray-600', activeColor: 'bg-purple-100 text-purple-700 ring-1 ring-purple-300', icon: 'M13 16h-1v-4h-1m1-4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z' },
ask: { label: 'Ask', color: 'bg-gray-100 text-gray-600', activeColor: 'bg-amber-100 text-amber-700 ring-1 ring-amber-300', icon: 'M8.228 9c.549-1.165 2.03-2 3.772-2 2.21 0 4 1.343 4 3 0 1.4-1.278 2.575-3.006 2.907-.542.104-.994.54-.994 1.093m0 3h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z' },
draft: { label: 'Draft', color: 'bg-gray-100 text-gray-600', activeColor: 'bg-blue-100 text-blue-700 ring-1 ring-blue-300', icon: 'M11 5H6a2 2 0 00-2 2v11a2 2 0 002 2h11a2 2 0 002-2v-5m-1.414-9.414a2 2 0 112.828 2.828L11.828 15H9v-2.828l8.586-8.586z' },
validate: { label: 'Validate', color: 'bg-gray-100 text-gray-600', activeColor: 'bg-green-100 text-green-700 ring-1 ring-green-300', icon: 'M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z' },
}
const EXAMPLE_QUESTIONS: Record<AgentMode, string[]> = {
explain: [
'Was ist ein Verarbeitungsverzeichnis?',
'Wann brauche ich eine DSFA?',
'Was sind TOM nach Art. 32 DSGVO?',
],
ask: [
'Welche Luecken hat mein Compliance-Profil?',
'Was fehlt noch fuer die Zertifizierung?',
'Welche Dokumente muss ich noch erstellen?',
],
draft: [
'Erstelle einen VVT-Eintrag fuer unseren Hauptprozess',
'Erstelle TOM fuer unsere Cloud-Infrastruktur',
'Erstelle eine Datenschutzerklaerung',
],
validate: [
'Pruefe die Konsistenz meiner Dokumente',
'Stimmen VVT und TOM ueberein?',
'Gibt es Luecken bei den Loeschfristen?',
],
}
export function DraftingEngineWidget({
currentStep = 'default',
enableDraftingEngine = true,
}: DraftingEngineWidgetProps) {
const { state } = useSDK()
const engine = useDraftingEngine()
const [isOpen, setIsOpen] = useState(false)
const [isExpanded, setIsExpanded] = useState(false)
const [inputValue, setInputValue] = useState('')
const [showDraftEditor, setShowDraftEditor] = useState(false)
const [showValidationReport, setShowValidationReport] = useState(false)
const messagesEndRef = useRef<HTMLDivElement>(null)
// Available document types from scope decision
const availableDocumentTypes: ScopeDocumentType[] =
state.complianceScope?.decision?.requiredDocuments
?.filter(d => d.required)
.map(d => d.documentType as ScopeDocumentType) ?? ['vvt', 'tom', 'lf']
// Auto-scroll
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' })
}, [engine.messages])
// Open draft editor when a new draft arrives
useEffect(() => {
if (engine.currentDraft) {
setShowDraftEditor(true)
}
}, [engine.currentDraft])
// Open validation report when new results arrive
useEffect(() => {
if (engine.validationResult) {
setShowValidationReport(true)
}
}, [engine.validationResult])
const handleSendMessage = useCallback(
(content: string) => {
if (!content.trim()) return
setInputValue('')
engine.sendMessage(content)
},
[engine]
)
const handleKeyDown = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
handleSendMessage(inputValue)
}
}
const exampleQuestions = EXAMPLE_QUESTIONS[engine.currentMode]
if (!isOpen) {
return (
<button
onClick={() => setIsOpen(true)}
className="fixed bottom-6 right-[5.5rem] w-14 h-14 bg-indigo-600 hover:bg-indigo-700 text-white rounded-full shadow-lg flex items-center justify-center transition-all duration-200 hover:scale-110 z-50"
aria-label="Drafting Engine oeffnen"
>
<svg className="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M11 5H6a2 2 0 00-2 2v11a2 2 0 002 2h11a2 2 0 002-2v-5m-1.414-9.414a2 2 0 112.828 2.828L11.828 15H9v-2.828l8.586-8.586z" />
</svg>
</button>
)
}
// Draft Editor full-screen overlay
if (showDraftEditor && engine.currentDraft) {
return (
<DraftEditor
draft={engine.currentDraft}
documentType={engine.activeDocumentType}
constraintCheck={engine.constraintCheck}
onAccept={() => {
engine.acceptDraft()
setShowDraftEditor(false)
}}
onValidate={() => {
engine.validateDraft()
}}
onClose={() => setShowDraftEditor(false)}
onRefine={(instruction: string) => {
engine.requestDraft(instruction)
}}
validationResult={engine.validationResult}
isTyping={engine.isTyping}
/>
)
}
return (
<div className={`fixed bottom-6 right-6 ${isExpanded ? 'w-[700px] h-[80vh]' : 'w-[420px] h-[560px]'} max-h-screen bg-white rounded-2xl shadow-2xl flex flex-col z-50 border border-gray-200 transition-all duration-200`}>
{/* Header */}
<div className="bg-gradient-to-r from-purple-600 to-indigo-600 text-white px-4 py-3 rounded-t-2xl flex items-center justify-between">
<div className="flex items-center gap-2">
<div className="w-8 h-8 bg-white/20 rounded-full flex items-center justify-center">
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M11 5H6a2 2 0 00-2 2v11a2 2 0 002 2h11a2 2 0 002-2v-5m-1.414-9.414a2 2 0 112.828 2.828L11.828 15H9v-2.828l8.586-8.586z" />
</svg>
</div>
<div>
<div className="font-semibold text-sm">Drafting Engine</div>
<div className="text-xs text-white/80">Compliance-Dokumententwurf</div>
</div>
</div>
<div className="flex items-center gap-1">
<button
onClick={() => setIsExpanded(!isExpanded)}
className="text-white/80 hover:text-white transition-colors p-1"
aria-label={isExpanded ? 'Verkleinern' : 'Vergroessern'}
>
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
{isExpanded ? (
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9 9L4 4m0 0v4m0-4h4m6 6l5 5m0 0v-4m0 4h-4" />
) : (
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 8V4m0 0h4M4 4l5 5m11-1V4m0 0h-4m4 0l-5 5M4 16v4m0 0h4m-4 0l5-5m11 5v-4m0 4h-4m4 0l-5-5" />
)}
</svg>
</button>
<button
onClick={() => {
engine.clearMessages()
setIsOpen(false)
}}
className="text-white/80 hover:text-white transition-colors p-1"
aria-label="Schliessen"
>
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
</div>
</div>
{/* Mode Pills */}
<div className="flex items-center gap-1 px-3 py-2 border-b border-gray-100 bg-white">
{(Object.keys(MODE_CONFIG) as AgentMode[]).map((mode) => {
const config = MODE_CONFIG[mode]
const isActive = engine.currentMode === mode
return (
<button
key={mode}
onClick={() => engine.setMode(mode)}
className={`flex items-center gap-1 px-2.5 py-1 rounded-full text-xs font-medium transition-all ${isActive ? config.activeColor : config.color} hover:opacity-80`}
>
<svg className="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d={config.icon} />
</svg>
{config.label}
</button>
)
})}
</div>
{/* Document Type Selector (visible in draft/validate mode) */}
{(engine.currentMode === 'draft' || engine.currentMode === 'validate') && (
<div className="px-3 py-2 border-b border-gray-100 bg-gray-50/50">
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500 shrink-0">Dokument:</span>
<select
value={engine.activeDocumentType || ''}
onChange={(e) => engine.setDocumentType(e.target.value as ScopeDocumentType)}
className="flex-1 text-xs border border-gray-200 rounded-md px-2 py-1 bg-white focus:outline-none focus:ring-1 focus:ring-purple-400"
>
<option value="">Dokumenttyp waehlen...</option>
{availableDocumentTypes.map((dt) => (
<option key={dt} value={dt}>
{DOCUMENT_TYPE_LABELS[dt] || dt}
</option>
))}
</select>
</div>
</div>
)}
{/* Gap Banner */}
{(() => {
const gaps = state.complianceScope?.decision?.gaps?.filter(
(g: { severity: string }) => g.severity === 'HIGH' || g.severity === 'CRITICAL'
) ?? []
if (gaps.length > 0) {
return (
<div className="mx-3 mt-2 px-3 py-2 bg-amber-50 border border-amber-200 rounded-lg flex items-center justify-between">
<div className="flex items-center gap-2 text-xs text-amber-800">
<svg className="w-4 h-4 text-amber-500 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-2.5L13.732 4c-.77-.833-1.964-.833-2.732 0L4.082 16.5c-.77.833.192 2.5 1.732 2.5z" />
</svg>
<span className="font-medium">{gaps.length} kritische Luecke{gaps.length !== 1 ? 'n' : ''} erkannt</span>
</div>
<button
onClick={() => handleSendMessage('Was fehlt noch in meinem Compliance-Profil?')}
className="text-xs font-medium text-amber-700 hover:text-amber-900 px-2 py-0.5 rounded hover:bg-amber-100 transition-colors"
>
Analysieren
</button>
</div>
)
}
return null
})()}
{/* Error Banner */}
{engine.error && (
<div className="mx-3 mt-2 px-3 py-2 bg-red-50 border border-red-200 rounded-lg text-xs text-red-700 flex items-center justify-between">
<span>{engine.error}</span>
<button onClick={() => engine.clearMessages()} className="text-red-500 hover:text-red-700 ml-2">
<svg className="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
</div>
)}
{/* Validation Report Inline */}
{showValidationReport && engine.validationResult && (
<div className="mx-3 mt-2 max-h-48 overflow-y-auto">
<ValidationReport
result={engine.validationResult}
onClose={() => setShowValidationReport(false)}
compact
/>
</div>
)}
{/* Messages Area */}
<div className="flex-1 overflow-y-auto p-4 space-y-3 bg-gray-50">
{engine.messages.length === 0 ? (
<div className="text-center py-6">
<div className="w-14 h-14 bg-purple-100 rounded-full flex items-center justify-center mx-auto mb-3">
<svg className="w-7 h-7 text-purple-600" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d={MODE_CONFIG[engine.currentMode].icon} />
</svg>
</div>
<h3 className="text-sm font-medium text-gray-900 mb-1">
{engine.currentMode === 'explain' && 'Fragen beantworten'}
{engine.currentMode === 'ask' && 'Luecken erkennen'}
{engine.currentMode === 'draft' && 'Dokumente entwerfen'}
{engine.currentMode === 'validate' && 'Konsistenz pruefen'}
</h3>
<p className="text-xs text-gray-500 mb-4">
{engine.currentMode === 'explain' && 'Stellen Sie Fragen zu DSGVO, AI Act und Compliance.'}
{engine.currentMode === 'ask' && 'Identifiziert Luecken in Ihrem Compliance-Profil.'}
{engine.currentMode === 'draft' && 'Erstellt strukturierte Compliance-Dokumente.'}
{engine.currentMode === 'validate' && 'Prueft Cross-Dokument-Konsistenz.'}
</p>
<div className="text-left space-y-2">
<p className="text-xs font-medium text-gray-700 mb-2">Beispiele:</p>
{exampleQuestions.map((q, idx) => (
<button
key={idx}
onClick={() => handleSendMessage(q)}
className="w-full text-left px-3 py-2 text-xs bg-white hover:bg-purple-50 border border-gray-200 rounded-lg transition-colors text-gray-700"
>
{q}
</button>
))}
</div>
{/* Quick Actions for Draft/Validate */}
{engine.currentMode === 'draft' && engine.activeDocumentType && (
<button
onClick={() => engine.requestDraft()}
className="mt-4 px-4 py-2 bg-blue-600 text-white text-xs font-medium rounded-lg hover:bg-blue-700 transition-colors"
>
Draft fuer {DOCUMENT_TYPE_LABELS[engine.activeDocumentType]?.split(' (')[0] || engine.activeDocumentType} erstellen
</button>
)}
{engine.currentMode === 'validate' && (
<button
onClick={() => engine.validateDraft()}
className="mt-4 px-4 py-2 bg-green-600 text-white text-xs font-medium rounded-lg hover:bg-green-700 transition-colors"
>
Validierung starten
</button>
)}
</div>
) : (
<>
{engine.messages.map((message, idx) => (
<div
key={idx}
className={`flex ${message.role === 'user' ? 'justify-end' : 'justify-start'}`}
>
<div
className={`max-w-[85%] rounded-lg px-3 py-2 ${
message.role === 'user'
? 'bg-indigo-600 text-white'
: 'bg-white border border-gray-200 text-gray-800'
}`}
>
<p className={`text-sm ${message.role === 'assistant' ? 'whitespace-pre-wrap' : ''}`}>
{message.content}
</p>
{/* Draft ready indicator */}
{message.metadata?.hasDraft && engine.currentDraft && (
<button
onClick={() => setShowDraftEditor(true)}
className="mt-2 flex items-center gap-1.5 px-3 py-1.5 bg-blue-50 border border-blue-200 rounded-md text-xs text-blue-700 hover:bg-blue-100 transition-colors"
>
<svg className="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M10 6H6a2 2 0 00-2 2v10a2 2 0 002 2h10a2 2 0 002-2v-4M14 4h6m0 0v6m0-6L10 14" />
</svg>
Im Editor oeffnen
</button>
)}
{/* Validation ready indicator */}
{message.metadata?.hasValidation && engine.validationResult && (
<button
onClick={() => setShowValidationReport(true)}
className="mt-2 flex items-center gap-1.5 px-3 py-1.5 bg-green-50 border border-green-200 rounded-md text-xs text-green-700 hover:bg-green-100 transition-colors"
>
<svg className="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z" />
</svg>
Validierungsbericht anzeigen
</button>
)}
</div>
</div>
))}
{engine.isTyping && (
<div className="flex justify-start">
<div className="bg-white border border-gray-200 rounded-lg px-3 py-2">
<div className="flex space-x-1">
<div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce" />
<div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce" style={{ animationDelay: '0.1s' }} />
<div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce" style={{ animationDelay: '0.2s' }} />
</div>
</div>
</div>
)}
<div ref={messagesEndRef} />
</>
)}
</div>
{/* Input Area */}
<div className="border-t border-gray-200 p-3 bg-white rounded-b-2xl">
<div className="flex gap-2">
<input
type="text"
value={inputValue}
onChange={(e) => setInputValue(e.target.value)}
onKeyDown={handleKeyDown}
placeholder={
engine.currentMode === 'draft'
? 'Anweisung fuer den Entwurf...'
: engine.currentMode === 'validate'
? 'Validierungsfrage...'
: 'Frage eingeben...'
}
disabled={engine.isTyping}
className="flex-1 px-3 py-2 text-sm border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-purple-500 focus:border-transparent disabled:opacity-50"
/>
{engine.isTyping ? (
<button
onClick={engine.stopGeneration}
className="px-3 py-2 bg-red-500 text-white rounded-lg hover:bg-red-600 transition-colors"
title="Generierung stoppen"
>
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 6h12v12H6z" />
</svg>
</button>
) : (
<button
onClick={() => handleSendMessage(inputValue)}
disabled={!inputValue.trim()}
className="px-3 py-2 bg-indigo-600 text-white rounded-lg hover:bg-indigo-700 disabled:opacity-50 disabled:cursor-not-allowed transition-colors"
>
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 19l9 2-9-18-9 18 9-2zm0 0v-8" />
</svg>
</button>
)}
</div>
</div>
</div>
)
}

View File

@@ -17,9 +17,9 @@ export interface StepTip {
interface StepHeaderProps {
stepId: string
title: string
description: string
explanation: string
title?: string
description?: string
explanation?: string
tips?: StepTip[]
showNavigation?: boolean
showProgress?: boolean
@@ -95,10 +95,10 @@ const tipIconColors = {
export function StepHeader({
stepId,
title,
description,
explanation,
tips = [],
title: titleProp,
description: descriptionProp,
explanation: explanationProp,
tips: tipsProp,
showNavigation = true,
showProgress = true,
onComplete,
@@ -109,6 +109,13 @@ export function StepHeader({
const { state, dispatch } = useSDK()
const [showHelp, setShowHelp] = useState(false)
// Look up defaults from STEP_EXPLANATIONS when props are not provided
const preset = STEP_EXPLANATIONS[stepId as keyof typeof STEP_EXPLANATIONS]
const title = titleProp ?? preset?.title ?? stepId
const description = descriptionProp ?? preset?.description ?? ''
const explanation = explanationProp ?? preset?.explanation ?? ''
const tips = tipsProp ?? preset?.tips ?? []
const currentStep = getStepById(stepId)
const prevStep = getPreviousStep(stepId)
const nextStep = getNextStep(stepId)
@@ -996,6 +1003,50 @@ export const STEP_EXPLANATIONS = {
},
],
},
}
'email-templates': {
title: 'E-Mail-Templates',
description: 'Verwalten Sie Vorlagen fuer alle DSGVO-relevanten Benachrichtigungen',
explanation: 'E-Mail-Templates definieren die Texte und das Layout fuer automatisierte DSGVO-Benachrichtigungen: Einwilligungsbestaetigung, Widerrufsbestaetigung, Auskunftsantwort, Loeschbestaetigung und weitere Lifecycle-E-Mails. Alle 16 Template-Typen koennen individuell angepasst und mit Variablen personalisiert werden.',
tips: [
{
icon: 'info' as const,
title: '16 Lifecycle-E-Mails',
description: 'Von der Registrierungsbestaetigung bis zur Kontoloeschung — alle relevanten Touchpoints sind mit Vorlagen abgedeckt.',
},
{
icon: 'warning' as const,
title: 'Pflichtangaben',
description: 'Stellen Sie sicher, dass jede E-Mail die gesetzlich vorgeschriebenen Angaben enthaelt: Impressum, Datenschutzhinweis und Widerrufsmoeglichkeit.',
},
{
icon: 'lightbulb' as const,
title: 'Variablen',
description: 'Nutzen Sie Platzhalter wie {{name}}, {{email}} und {{company}} fuer automatische Personalisierung.',
},
],
},
'use-case-workshop': {
title: 'Use Case Workshop',
description: 'Erfassen und bewerten Sie Ihre KI-Anwendungsfaelle im Workshop-Format',
explanation: 'Im Use Case Workshop erfassen Sie Ihre KI-Anwendungsfaelle strukturiert in einem gefuehrten Prozess. Der Workshop leitet Sie durch Identifikation, Beschreibung, Datenkategorien, Risikobewertung und Stakeholder-Analyse. Die Ergebnisse fliessen direkt in die Compliance-Bewertung ein.',
tips: [
{
icon: 'lightbulb' as const,
title: 'Vollstaendigkeit',
description: 'Erfassen Sie alle KI-Anwendungsfaelle — auch solche, die nur intern genutzt werden oder sich noch in der Planungsphase befinden.',
},
{
icon: 'info' as const,
title: 'Stakeholder einbeziehen',
description: 'Beziehen Sie Fachbereiche und IT in den Workshop ein, um alle Anwendungsfaelle zu identifizieren.',
},
{
icon: 'warning' as const,
title: 'Risikobewertung',
description: 'Jeder Anwendungsfall wird nach EU AI Act Risikostufen klassifiziert. Hochrisiko-Systeme erfordern zusaetzliche Dokumentation.',
},
],
},
} satisfies Record<string, { title: string; description: string; explanation: string; tips: StepTip[] }>
export default StepHeader

View File

@@ -1,220 +0,0 @@
'use client'
/**
* ValidationReport - Strukturierte Anzeige von Validierungsergebnissen
*
* Errors (Scope-Violations) in Rot
* Warnings (Inkonsistenzen) in Amber
* Suggestions in Blau
*/
import { DOCUMENT_TYPE_LABELS } from '@/lib/sdk/compliance-scope-types'
import type { ValidationResult, ValidationFinding } from '@/lib/sdk/drafting-engine/types'
interface ValidationReportProps {
result: ValidationResult
onClose: () => void
/** Compact mode for inline display in widget */
compact?: boolean
}
const SEVERITY_CONFIG = {
error: {
bg: 'bg-red-50',
border: 'border-red-200',
text: 'text-red-700',
icon: 'M10 14l2-2m0 0l2-2m-2 2l-2-2m2 2l2 2m7-2a9 9 0 11-18 0 9 9 0 0118 0z',
label: 'Fehler',
dotColor: 'bg-red-500',
},
warning: {
bg: 'bg-amber-50',
border: 'border-amber-200',
text: 'text-amber-700',
icon: 'M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-2.5L13.732 4c-.77-.833-1.964-.833-2.732 0L3.732 16.5c-.77.833.192 2.5 1.732 2.5z',
label: 'Warnungen',
dotColor: 'bg-amber-500',
},
suggestion: {
bg: 'bg-blue-50',
border: 'border-blue-200',
text: 'text-blue-700',
icon: 'M13 16h-1v-4h-1m1-4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z',
label: 'Vorschlaege',
dotColor: 'bg-blue-500',
},
}
function FindingCard({ finding, compact }: { finding: ValidationFinding; compact?: boolean }) {
const config = SEVERITY_CONFIG[finding.severity]
const docLabel = DOCUMENT_TYPE_LABELS[finding.documentType]?.split(' (')[0] || finding.documentType
if (compact) {
return (
<div className={`flex items-start gap-2 px-2.5 py-1.5 ${config.bg} rounded-md border ${config.border}`}>
<span className={`w-1.5 h-1.5 rounded-full mt-1.5 shrink-0 ${config.dotColor}`} />
<div className="min-w-0">
<p className={`text-xs font-medium ${config.text}`}>{finding.title}</p>
<p className="text-xs text-gray-500 truncate">{finding.description}</p>
</div>
</div>
)
}
return (
<div className={`${config.bg} rounded-lg border ${config.border} p-3`}>
<div className="flex items-start gap-2">
<svg className={`w-4 h-4 mt-0.5 shrink-0 ${config.text}`} fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d={config.icon} />
</svg>
<div className="flex-1 min-w-0">
<div className="flex items-center gap-2">
<h4 className={`text-sm font-medium ${config.text}`}>{finding.title}</h4>
<span className="text-xs text-gray-400 bg-gray-100 px-1.5 py-0.5 rounded">{docLabel}</span>
</div>
<p className="text-xs text-gray-600 mt-1">{finding.description}</p>
{finding.crossReferenceType && (
<p className="text-xs text-gray-500 mt-1">
Cross-Referenz: {DOCUMENT_TYPE_LABELS[finding.crossReferenceType]?.split(' (')[0] || finding.crossReferenceType}
</p>
)}
{finding.legalReference && (
<p className="text-xs text-gray-500 mt-1 font-mono">{finding.legalReference}</p>
)}
{finding.suggestion && (
<div className="mt-2 flex items-start gap-1.5 px-2.5 py-1.5 bg-white/60 rounded border border-gray-100">
<svg className="w-3.5 h-3.5 mt-0.5 text-gray-400 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M13 7h8m0 0v8m0-8l-8 8-4-4-6 6" />
</svg>
<p className="text-xs text-gray-600">{finding.suggestion}</p>
</div>
)}
</div>
</div>
</div>
)
}
export function ValidationReport({ result, onClose, compact }: ValidationReportProps) {
const totalFindings = result.errors.length + result.warnings.length + result.suggestions.length
if (compact) {
return (
<div className="rounded-lg border border-gray-200 bg-white overflow-hidden">
<div className="flex items-center justify-between px-3 py-2 bg-gray-50 border-b border-gray-100">
<div className="flex items-center gap-2">
<span className={`w-2 h-2 rounded-full ${result.passed ? 'bg-green-500' : 'bg-red-500'}`} />
<span className="text-xs font-medium text-gray-700">
{result.passed ? 'Validierung bestanden' : 'Validierung fehlgeschlagen'}
</span>
<span className="text-xs text-gray-400">
({totalFindings} {totalFindings === 1 ? 'Fund' : 'Funde'})
</span>
</div>
<button onClick={onClose} className="text-gray-400 hover:text-gray-600">
<svg className="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
</div>
<div className="p-2 space-y-1.5 max-h-36 overflow-y-auto">
{result.errors.map((f) => <FindingCard key={f.id} finding={f} compact />)}
{result.warnings.map((f) => <FindingCard key={f.id} finding={f} compact />)}
{result.suggestions.map((f) => <FindingCard key={f.id} finding={f} compact />)}
</div>
</div>
)
}
return (
<div className="space-y-4">
{/* Summary Header */}
<div className={`rounded-lg border p-4 ${result.passed ? 'bg-green-50 border-green-200' : 'bg-red-50 border-red-200'}`}>
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<div className={`w-10 h-10 rounded-full flex items-center justify-center ${result.passed ? 'bg-green-100' : 'bg-red-100'}`}>
<svg className={`w-5 h-5 ${result.passed ? 'text-green-600' : 'text-red-600'}`} fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d={result.passed ? 'M5 13l4 4L19 7' : 'M6 18L18 6M6 6l12 12'} />
</svg>
</div>
<div>
<h3 className={`text-sm font-semibold ${result.passed ? 'text-green-800' : 'text-red-800'}`}>
{result.passed ? 'Validierung bestanden' : 'Validierung fehlgeschlagen'}
</h3>
<p className="text-xs text-gray-500">
Level {result.scopeLevel} | {new Date(result.timestamp).toLocaleString('de-DE')}
</p>
</div>
</div>
{/* Stats */}
<div className="flex items-center gap-3">
{result.errors.length > 0 && (
<div className="flex items-center gap-1">
<span className="w-2 h-2 rounded-full bg-red-500" />
<span className="text-xs font-medium text-red-700">{result.errors.length}</span>
</div>
)}
{result.warnings.length > 0 && (
<div className="flex items-center gap-1">
<span className="w-2 h-2 rounded-full bg-amber-500" />
<span className="text-xs font-medium text-amber-700">{result.warnings.length}</span>
</div>
)}
{result.suggestions.length > 0 && (
<div className="flex items-center gap-1">
<span className="w-2 h-2 rounded-full bg-blue-500" />
<span className="text-xs font-medium text-blue-700">{result.suggestions.length}</span>
</div>
)}
<button onClick={onClose} className="text-gray-400 hover:text-gray-600 ml-2">
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
</div>
</div>
</div>
{/* Errors */}
{result.errors.length > 0 && (
<div>
<h4 className="text-xs font-semibold text-red-700 uppercase tracking-wide mb-2">
Fehler ({result.errors.length})
</h4>
<div className="space-y-2">
{result.errors.map((f) => <FindingCard key={f.id} finding={f} />)}
</div>
</div>
)}
{/* Warnings */}
{result.warnings.length > 0 && (
<div>
<h4 className="text-xs font-semibold text-amber-700 uppercase tracking-wide mb-2">
Warnungen ({result.warnings.length})
</h4>
<div className="space-y-2">
{result.warnings.map((f) => <FindingCard key={f.id} finding={f} />)}
</div>
</div>
)}
{/* Suggestions */}
{result.suggestions.length > 0 && (
<div>
<h4 className="text-xs font-semibold text-blue-700 uppercase tracking-wide mb-2">
Vorschlaege ({result.suggestions.length})
</h4>
<div className="space-y-2">
{result.suggestions.map((f) => <FindingCard key={f.id} finding={f} />)}
</div>
</div>
)}
</div>
)
}

View File

@@ -1,12 +1,14 @@
import { describe, it, expect } from 'vitest'
import { STEP_EXPLANATIONS } from '../StepHeader'
type StepExplanationKey = keyof typeof STEP_EXPLANATIONS
// Focus on testing the STEP_EXPLANATIONS data structure
// Component tests require more complex SDK context mocking
describe('STEP_EXPLANATIONS', () => {
it('should have explanations for all Phase 1 steps', () => {
const phase1Steps = [
const phase1Steps: StepExplanationKey[] = [
'use-case-workshop',
'screening',
'modules',
@@ -29,7 +31,7 @@ describe('STEP_EXPLANATIONS', () => {
})
it('should have explanations for all Phase 2 steps', () => {
const phase2Steps = [
const phase2Steps: StepExplanationKey[] = [
'ai-act',
'obligations',
'dsfa',
@@ -93,8 +95,8 @@ describe('STEP_EXPLANATIONS', () => {
expect(dsfa.explanation.length).toBeGreaterThan(50)
})
it('should cover all 19 SDK steps', () => {
const allStepIds = [
it('should cover all core SDK steps', () => {
const coreStepIds: StepExplanationKey[] = [
// Phase 1
'use-case-workshop',
'screening',
@@ -118,10 +120,11 @@ describe('STEP_EXPLANATIONS', () => {
'escalations',
]
expect(Object.keys(STEP_EXPLANATIONS).length).toBe(allStepIds.length)
allStepIds.forEach(stepId => {
coreStepIds.forEach(stepId => {
expect(STEP_EXPLANATIONS[stepId]).toBeDefined()
})
// Ensure we have at least the core steps plus additional module explanations
expect(Object.keys(STEP_EXPLANATIONS).length).toBeGreaterThanOrEqual(coreStepIds.length)
})
})

View File

@@ -1,722 +0,0 @@
import type { ScopeProfilingAnswer, ComplianceDepthLevel, ScopeDocumentType } from './compliance-scope-types'
export interface GoldenTest {
id: string
name: string
description: string
answers: ScopeProfilingAnswer[]
expectedLevel: ComplianceDepthLevel | null // null for prefill tests
expectedMinDocuments?: ScopeDocumentType[]
expectedHardTriggerIds?: string[]
expectedDsfaRequired?: boolean
tags: string[]
}
export const GOLDEN_TESTS: GoldenTest[] = [
// GT-01: 2-Person Freelancer, nur B2B, DE-Hosting → L1
{
id: 'GT-01',
name: '2-Person Freelancer B2B',
description: 'Kleinstes Setup ohne besondere Risiken',
answers: [
{ questionId: 'org_employee_count', value: '2' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'consulting' },
{ questionId: 'data_health', value: false },
{ questionId: 'data_genetic', value: false },
{ questionId: 'data_biometric', value: false },
{ questionId: 'data_racial_ethnic', value: false },
{ questionId: 'data_political_opinion', value: false },
{ questionId: 'data_religious', value: false },
{ questionId: 'data_union_membership', value: false },
{ questionId: 'data_sexual_orientation', value: false },
{ questionId: 'data_criminal', value: false },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
{ questionId: 'process_has_dsfa', value: true },
{ questionId: 'process_has_incident_plan', value: true },
{ questionId: 'data_volume', value: '<1000' },
{ questionId: 'org_customer_count', value: '<100' },
],
expectedLevel: 'L1',
expectedMinDocuments: ['VVT', 'TOM', 'COOKIE_BANNER'],
expectedHardTriggerIds: [],
expectedDsfaRequired: false,
tags: ['baseline', 'freelancer', 'b2b'],
},
// GT-02: Solo IT-Berater → L1
{
id: 'GT-02',
name: 'Solo IT-Berater',
description: 'Einzelperson, minimale Datenverarbeitung',
answers: [
{ questionId: 'org_employee_count', value: '1' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'it_services' },
{ questionId: 'data_health', value: false },
{ questionId: 'data_genetic', value: false },
{ questionId: 'data_biometric', value: false },
{ questionId: 'data_volume', value: '<1000' },
{ questionId: 'org_customer_count', value: '<50' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L1',
expectedHardTriggerIds: [],
tags: ['baseline', 'solo', 'minimal'],
},
// GT-03: 5-Person Agentur, Website, kein Tracking → L1
{
id: 'GT-03',
name: '5-Person Agentur ohne Tracking',
description: 'Kleine Agentur, einfache Website ohne Analytics',
answers: [
{ questionId: 'org_employee_count', value: '5' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'marketing' },
{ questionId: 'tech_has_website', value: true },
{ questionId: 'tech_has_tracking', value: false },
{ questionId: 'data_volume', value: '1000-10000' },
{ questionId: 'org_customer_count', value: '100-1000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L1',
expectedMinDocuments: ['VVT', 'TOM', 'COOKIE_BANNER'],
tags: ['baseline', 'agency', 'simple'],
},
// GT-04: 30-Person SaaS B2B, EU-Cloud → L2 (scale trigger)
{
id: 'GT-04',
name: '30-Person SaaS B2B',
description: 'Scale-Trigger durch Mitarbeiterzahl',
answers: [
{ questionId: 'org_employee_count', value: '30' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'software' },
{ questionId: 'tech_has_cloud', value: true },
{ questionId: 'data_volume', value: '10000-100000' },
{ questionId: 'org_customer_count', value: '1000-10000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
{ questionId: 'process_has_dsfa', value: false },
],
expectedLevel: 'L2',
expectedMinDocuments: ['VVT', 'TOM', 'AVV', 'COOKIE_BANNER'],
tags: ['scale', 'saas', 'growth'],
},
// GT-05: 50-Person Handel B2C, Webshop → L2 (B2C+Webshop)
{
id: 'GT-05',
name: '50-Person E-Commerce B2C',
description: 'B2C mit Webshop erhöht Anforderungen',
answers: [
{ questionId: 'org_employee_count', value: '50' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'retail' },
{ questionId: 'tech_has_webshop', value: true },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'org_customer_count', value: '10000-100000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L2',
expectedHardTriggerIds: ['HT-H01'],
expectedMinDocuments: ['VVT', 'TOM', 'AVV', 'COOKIE_BANNER', 'EINWILLIGUNG'],
tags: ['b2c', 'webshop', 'retail'],
},
// GT-06: 80-Person Dienstleister, Cloud → L2 (scale)
{
id: 'GT-06',
name: '80-Person Dienstleister',
description: 'Größerer Betrieb mit Cloud-Services',
answers: [
{ questionId: 'org_employee_count', value: '80' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'professional_services' },
{ questionId: 'tech_has_cloud', value: true },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'org_customer_count', value: '1000-10000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L2',
expectedMinDocuments: ['VVT', 'TOM', 'AVV'],
tags: ['scale', 'services'],
},
// GT-07: 20-Person Startup mit GA4 Tracking → L2 (tracking)
{
id: 'GT-07',
name: 'Startup mit Google Analytics',
description: 'Tracking-Tools erhöhen Compliance-Anforderungen',
answers: [
{ questionId: 'org_employee_count', value: '20' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'technology' },
{ questionId: 'tech_has_website', value: true },
{ questionId: 'tech_has_tracking', value: true },
{ questionId: 'tech_tracking_tools', value: 'google_analytics' },
{ questionId: 'data_volume', value: '10000-100000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L2',
expectedMinDocuments: ['VVT', 'TOM', 'COOKIE_BANNER', 'EINWILLIGUNG'],
tags: ['tracking', 'analytics', 'startup'],
},
// GT-08: Kita-App (Minderjaehrige) → L3 (HT-B01)
{
id: 'GT-08',
name: 'Kita-App für Eltern',
description: 'Datenverarbeitung von Minderjährigen unter 16',
answers: [
{ questionId: 'org_employee_count', value: '15' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'education' },
{ questionId: 'data_subjects_minors', value: true },
{ questionId: 'data_subjects_minors_age', value: '<16' },
{ questionId: 'data_volume', value: '1000-10000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-B01'],
expectedDsfaRequired: true,
expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'EINWILLIGUNG', 'AVV'],
tags: ['hard-trigger', 'minors', 'education'],
},
// GT-09: Krankenhaus-Software → L3 (HT-A01)
{
id: 'GT-09',
name: 'Krankenhaus-Verwaltungssoftware',
description: 'Gesundheitsdaten Art. 9 DSGVO',
answers: [
{ questionId: 'org_employee_count', value: '200' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'healthcare' },
{ questionId: 'data_health', value: true },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'org_customer_count', value: '10-50' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-A01'],
expectedDsfaRequired: true,
expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'AVV'],
tags: ['hard-trigger', 'health', 'art9'],
},
// GT-10: HR-Scoring-Plattform → L3 (HT-C01)
{
id: 'GT-10',
name: 'HR-Scoring für Bewerbungen',
description: 'Automatisierte Entscheidungen im HR-Bereich',
answers: [
{ questionId: 'org_employee_count', value: '40' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'hr_tech' },
{ questionId: 'tech_has_adm', value: true },
{ questionId: 'tech_adm_type', value: 'profiling' },
{ questionId: 'tech_adm_impact', value: 'employment' },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-C01'],
expectedDsfaRequired: true,
expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'AVV'],
tags: ['hard-trigger', 'adm', 'profiling'],
},
// GT-11: Fintech Kreditscoring → L3 (HT-H05 + C01)
{
id: 'GT-11',
name: 'Fintech Kreditscoring',
description: 'Finanzsektor mit automatisierten Entscheidungen',
answers: [
{ questionId: 'org_employee_count', value: '120' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'finance' },
{ questionId: 'tech_has_adm', value: true },
{ questionId: 'tech_adm_type', value: 'scoring' },
{ questionId: 'tech_adm_impact', value: 'credit' },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-H05', 'HT-C01'],
expectedDsfaRequired: true,
expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'AVV'],
tags: ['hard-trigger', 'finance', 'scoring'],
},
// GT-12: Bildungsplattform Minderjaehrige → L3 (HT-B01)
{
id: 'GT-12',
name: 'Online-Lernplattform für Schüler',
description: 'Bildungssektor mit minderjährigen Nutzern',
answers: [
{ questionId: 'org_employee_count', value: '35' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'education' },
{ questionId: 'data_subjects_minors', value: true },
{ questionId: 'data_subjects_minors_age', value: '<16' },
{ questionId: 'tech_has_tracking', value: true },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-B01'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'education', 'minors'],
},
// GT-13: Datenbroker → L3 (HT-H02)
{
id: 'GT-13',
name: 'Datenbroker / Adresshandel',
description: 'Geschäftsmodell basiert auf Datenhandel',
answers: [
{ questionId: 'org_employee_count', value: '25' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'data_broker' },
{ questionId: 'data_is_core_business', value: true },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'org_customer_count', value: '100-1000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-H02'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'data-broker'],
},
// GT-14: Video + ADM → L3 (HT-D05)
{
id: 'GT-14',
name: 'Videoüberwachung mit Gesichtserkennung',
description: 'Biometrische Daten mit automatisierter Verarbeitung',
answers: [
{ questionId: 'org_employee_count', value: '60' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'security' },
{ questionId: 'data_biometric', value: true },
{ questionId: 'tech_has_video_surveillance', value: true },
{ questionId: 'tech_has_adm', value: true },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-D05'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'biometric', 'video'],
},
// GT-15: 500-MA Konzern ohne Zert → L3 (HT-G04)
{
id: 'GT-15',
name: 'Großunternehmen ohne Zertifizierung',
description: 'Scale-Trigger durch Unternehmensgröße',
answers: [
{ questionId: 'org_employee_count', value: '500' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'manufacturing' },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'org_customer_count', value: '>100000' },
{ questionId: 'cert_has_iso27001', value: false },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-G04'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'scale', 'enterprise'],
},
// GT-16: ISO 27001 Anbieter → L4 (HT-F01)
{
id: 'GT-16',
name: 'ISO 27001 zertifizierter Cloud-Provider',
description: 'Zertifizierung erfordert höchste Compliance',
answers: [
{ questionId: 'org_employee_count', value: '150' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'cloud_services' },
{ questionId: 'cert_has_iso27001', value: true },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
{ questionId: 'process_has_dsfa', value: true },
],
expectedLevel: 'L4',
expectedHardTriggerIds: ['HT-F01'],
expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'AVV', 'CERT_ISO27001'],
tags: ['hard-trigger', 'certification', 'iso'],
},
// GT-17: TISAX Automobilzulieferer → L4 (HT-F04)
{
id: 'GT-17',
name: 'TISAX-zertifizierter Automobilzulieferer',
description: 'Automotive-Branche mit TISAX-Anforderungen',
answers: [
{ questionId: 'org_employee_count', value: '300' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'automotive' },
{ questionId: 'cert_has_tisax', value: true },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'org_customer_count', value: '10-50' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L4',
expectedHardTriggerIds: ['HT-F04'],
tags: ['hard-trigger', 'certification', 'tisax'],
},
// GT-18: ISO 27701 Cloud-Provider → L4 (HT-F02)
{
id: 'GT-18',
name: 'ISO 27701 Privacy-zertifiziert',
description: 'Privacy-spezifische Zertifizierung',
answers: [
{ questionId: 'org_employee_count', value: '200' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'cloud_services' },
{ questionId: 'cert_has_iso27701', value: true },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
{ questionId: 'process_has_dsfa', value: true },
],
expectedLevel: 'L4',
expectedHardTriggerIds: ['HT-F02'],
tags: ['hard-trigger', 'certification', 'privacy'],
},
// GT-19: Grosskonzern + Art.9 + >1M DS → L4 (HT-G05)
{
id: 'GT-19',
name: 'Konzern mit sensiblen Massendaten',
description: 'Kombination aus Scale und Art. 9 Daten',
answers: [
{ questionId: 'org_employee_count', value: '2000' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'insurance' },
{ questionId: 'data_health', value: true },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'org_customer_count', value: '>100000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
],
expectedLevel: 'L4',
expectedHardTriggerIds: ['HT-G05'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'scale', 'art9'],
},
// GT-20: Nur B2C Webshop → L2 (HT-H01)
{
id: 'GT-20',
name: 'Reiner B2C Webshop',
description: 'B2C-Trigger ohne weitere Risiken',
answers: [
{ questionId: 'org_employee_count', value: '12' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'retail' },
{ questionId: 'tech_has_webshop', value: true },
{ questionId: 'data_volume', value: '10000-100000' },
{ questionId: 'org_customer_count', value: '1000-10000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L2',
expectedHardTriggerIds: ['HT-H01'],
tags: ['b2c', 'webshop'],
},
// GT-21: Keine Daten, keine MA → L1
{
id: 'GT-21',
name: 'Minimale Datenverarbeitung',
description: 'Absolute Baseline ohne Risiken',
answers: [
{ questionId: 'org_employee_count', value: '1' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'consulting' },
{ questionId: 'data_volume', value: '<1000' },
{ questionId: 'org_customer_count', value: '<50' },
{ questionId: 'tech_has_website', value: false },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L1',
expectedHardTriggerIds: [],
tags: ['baseline', 'minimal'],
},
// GT-22: Alle Art.9 Kategorien → L3 (HT-A09)
{
id: 'GT-22',
name: 'Alle Art. 9 Kategorien',
description: 'Multiple sensible Datenkategorien',
answers: [
{ questionId: 'org_employee_count', value: '50' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'research' },
{ questionId: 'data_health', value: true },
{ questionId: 'data_genetic', value: true },
{ questionId: 'data_biometric', value: true },
{ questionId: 'data_racial_ethnic', value: true },
{ questionId: 'data_political_opinion', value: true },
{ questionId: 'data_religious', value: true },
{ questionId: 'data_union_membership', value: true },
{ questionId: 'data_sexual_orientation', value: true },
{ questionId: 'data_criminal', value: true },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-A09'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'art9', 'multiple-categories'],
},
// GT-23: Drittland + Art.9 → L3 (HT-E04)
{
id: 'GT-23',
name: 'Drittlandtransfer mit Art. 9 Daten',
description: 'Kombination aus Drittland und sensiblen Daten',
answers: [
{ questionId: 'org_employee_count', value: '45' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'us' },
{ questionId: 'org_industry', value: 'healthcare' },
{ questionId: 'data_health', value: true },
{ questionId: 'tech_has_third_country_transfer', value: true },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-E04'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'third-country', 'art9'],
},
// GT-24: Minderjaehrige + Art.9 → L4 (HT-B02)
{
id: 'GT-24',
name: 'Minderjährige mit Gesundheitsdaten',
description: 'Kombination aus vulnerabler Gruppe und Art. 9',
answers: [
{ questionId: 'org_employee_count', value: '30' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'healthcare' },
{ questionId: 'data_subjects_minors', value: true },
{ questionId: 'data_subjects_minors_age', value: '<16' },
{ questionId: 'data_health', value: true },
{ questionId: 'data_volume', value: '10000-100000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L4',
expectedHardTriggerIds: ['HT-B02'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'minors', 'health', 'combined-risk'],
},
// GT-25: KI autonome Entscheidungen → L3 (HT-C02)
{
id: 'GT-25',
name: 'KI mit autonomen Entscheidungen',
description: 'AI Act relevante autonome Systeme',
answers: [
{ questionId: 'org_employee_count', value: '70' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'ai_services' },
{ questionId: 'tech_has_adm', value: true },
{ questionId: 'tech_adm_type', value: 'autonomous_decision' },
{ questionId: 'tech_has_ai', value: true },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-C02'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'ai', 'adm'],
},
// GT-26: Multiple Zertifizierungen → L4 (HT-F01-05)
{
id: 'GT-26',
name: 'Multiple Zertifizierungen',
description: 'Mehrere Zertifizierungen kombiniert',
answers: [
{ questionId: 'org_employee_count', value: '250' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'cloud_services' },
{ questionId: 'cert_has_iso27001', value: true },
{ questionId: 'cert_has_iso27701', value: true },
{ questionId: 'cert_has_soc2', value: true },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
{ questionId: 'process_has_dsfa', value: true },
],
expectedLevel: 'L4',
expectedHardTriggerIds: ['HT-F01', 'HT-F02', 'HT-F03'],
tags: ['hard-trigger', 'certification', 'multiple'],
},
// GT-27: Oeffentlicher Sektor + Gesundheit → L3 (HT-H07 + A01)
{
id: 'GT-27',
name: 'Öffentlicher Sektor mit Gesundheitsdaten',
description: 'Behörde mit Art. 9 Datenverarbeitung',
answers: [
{ questionId: 'org_employee_count', value: '120' },
{ questionId: 'org_business_model', value: 'b2g' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'public_sector' },
{ questionId: 'org_is_public_sector', value: true },
{ questionId: 'data_health', value: true },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-H07', 'HT-A01'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'public-sector', 'health'],
},
// GT-28: Bildung + KI + Minderjaehrige → L4 (HT-B03)
{
id: 'GT-28',
name: 'EdTech mit KI für Minderjährige',
description: 'Triple-Risiko: Bildung, KI, vulnerable Gruppe',
answers: [
{ questionId: 'org_employee_count', value: '55' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'education' },
{ questionId: 'data_subjects_minors', value: true },
{ questionId: 'data_subjects_minors_age', value: '<16' },
{ questionId: 'tech_has_ai', value: true },
{ questionId: 'tech_has_adm', value: true },
{ questionId: 'data_volume', value: '100000-1000000' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L4',
expectedHardTriggerIds: ['HT-B03'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'education', 'ai', 'minors', 'triple-risk'],
},
// GT-29: Freelancer mit 1 Art.9 → L3 (hard trigger override despite low score)
{
id: 'GT-29',
name: 'Freelancer mit Gesundheitsdaten',
description: 'Hard Trigger überschreibt niedrige Score-Bewertung',
answers: [
{ questionId: 'org_employee_count', value: '1' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'de' },
{ questionId: 'org_industry', value: 'healthcare' },
{ questionId: 'data_health', value: true },
{ questionId: 'data_volume', value: '<1000' },
{ questionId: 'org_customer_count', value: '<50' },
{ questionId: 'process_has_vvt', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-A01'],
expectedDsfaRequired: true,
tags: ['hard-trigger', 'override', 'art9', 'freelancer'],
},
// GT-30: Enterprise, alle Prozesse vorhanden → L3 (good process maturity)
{
id: 'GT-30',
name: 'Enterprise mit reifer Prozesslandschaft',
description: 'Große Organisation mit allen Compliance-Prozessen',
answers: [
{ questionId: 'org_employee_count', value: '450' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'tech_hosting_location', value: 'eu' },
{ questionId: 'org_industry', value: 'manufacturing' },
{ questionId: 'data_volume', value: '>1000000' },
{ questionId: 'org_customer_count', value: '10000-100000' },
{ questionId: 'process_has_vvt', value: true },
{ questionId: 'process_has_tom', value: true },
{ questionId: 'process_has_dsfa', value: true },
{ questionId: 'process_has_incident_plan', value: true },
{ questionId: 'process_has_dsb', value: true },
{ questionId: 'process_has_training', value: true },
],
expectedLevel: 'L3',
expectedHardTriggerIds: ['HT-G04'],
tags: ['enterprise', 'mature', 'all-processes'],
},
// GT-31: SMB, nur 1 Block beantwortet → L1 (graceful degradation)
{
id: 'GT-31',
name: 'Unvollständige Profilerstellung',
description: 'Test für graceful degradation bei unvollständigen Antworten',
answers: [
{ questionId: 'org_employee_count', value: '8' },
{ questionId: 'org_business_model', value: 'b2b' },
{ questionId: 'org_industry', value: 'consulting' },
// Nur Block 1 (Organization) beantwortet, Rest fehlt
],
expectedLevel: 'L1',
expectedHardTriggerIds: [],
tags: ['incomplete', 'degradation', 'edge-case'],
},
// GT-32: CompanyProfile Prefill Konsistenz → null (prefill test, no expected level)
{
id: 'GT-32',
name: 'CompanyProfile Prefill Test',
description: 'Prüft ob CompanyProfile-Daten korrekt in ScopeProfile übernommen werden',
answers: [
{ questionId: 'org_employee_count', value: '25' },
{ questionId: 'org_business_model', value: 'b2c' },
{ questionId: 'org_industry', value: 'retail' },
{ questionId: 'tech_hosting_location', value: 'eu' },
// Diese Werte sollten mit CompanyProfile-Prefill übereinstimmen
],
expectedLevel: null,
tags: ['prefill', 'integration', 'consistency'],
},
]

View File

@@ -1,153 +0,0 @@
import { IntentClassifier } from '../intent-classifier'
describe('IntentClassifier', () => {
const classifier = new IntentClassifier()
describe('classify - Draft mode', () => {
it.each([
['Erstelle ein VVT fuer unseren Hauptprozess', 'draft'],
['Generiere eine TOM-Dokumentation', 'draft'],
['Schreibe eine Datenschutzerklaerung', 'draft'],
['Verfasse einen Entwurf fuer das Loeschkonzept', 'draft'],
['Create a DSFA document', 'draft'],
['Draft a privacy policy for us', 'draft'],
['Neues VVT anlegen', 'draft'],
])('"%s" should classify as %s', (input, expectedMode) => {
const result = classifier.classify(input)
expect(result.mode).toBe(expectedMode)
expect(result.confidence).toBeGreaterThan(0.7)
})
})
describe('classify - Validate mode', () => {
it.each([
['Pruefe die Konsistenz meiner Dokumente', 'validate'],
['Ist mein VVT korrekt?', 'validate'],
['Validiere die TOM gegen das VVT', 'validate'],
['Check die Vollstaendigkeit', 'validate'],
['Stimmt das mit der DSFA ueberein?', 'validate'],
['Cross-Check VVT und TOM', 'validate'],
])('"%s" should classify as %s', (input, expectedMode) => {
const result = classifier.classify(input)
expect(result.mode).toBe(expectedMode)
expect(result.confidence).toBeGreaterThan(0.7)
})
})
describe('classify - Ask mode', () => {
it.each([
['Was fehlt noch in meinem Profil?', 'ask'],
['Zeige mir die Luecken', 'ask'],
['Welche Dokumente fehlen noch?', 'ask'],
['Was ist der naechste Schritt?', 'ask'],
['Welche Informationen brauche ich noch?', 'ask'],
])('"%s" should classify as %s', (input, expectedMode) => {
const result = classifier.classify(input)
expect(result.mode).toBe(expectedMode)
expect(result.confidence).toBeGreaterThan(0.6)
})
})
describe('classify - Explain mode (fallback)', () => {
it.each([
['Was ist DSGVO?', 'explain'],
['Erklaere mir Art. 30', 'explain'],
['Hallo', 'explain'],
['Danke fuer die Hilfe', 'explain'],
])('"%s" should classify as %s (fallback)', (input, expectedMode) => {
const result = classifier.classify(input)
expect(result.mode).toBe(expectedMode)
})
})
describe('classify - confidence thresholds', () => {
it('should have high confidence for clear draft intents', () => {
const result = classifier.classify('Erstelle ein neues VVT')
expect(result.confidence).toBeGreaterThanOrEqual(0.85)
})
it('should have lower confidence for ambiguous inputs', () => {
const result = classifier.classify('Hallo')
expect(result.confidence).toBeLessThan(0.6)
})
it('should boost confidence with document type detection', () => {
const withDoc = classifier.classify('Erstelle VVT')
const withoutDoc = classifier.classify('Erstelle etwas')
expect(withDoc.confidence).toBeGreaterThanOrEqual(withoutDoc.confidence)
})
it('should boost confidence with multiple pattern matches', () => {
const single = classifier.classify('Erstelle Dokument')
const multi = classifier.classify('Erstelle und generiere ein neues Dokument')
expect(multi.confidence).toBeGreaterThanOrEqual(single.confidence)
})
})
describe('detectDocumentType', () => {
it.each([
['VVT erstellen', 'vvt'],
['Verarbeitungsverzeichnis', 'vvt'],
['Art. 30 Dokumentation', 'vvt'],
['TOM definieren', 'tom'],
['technisch organisatorische Massnahmen', 'tom'],
['Art. 32 Massnahmen', 'tom'],
['DSFA durchfuehren', 'dsfa'],
['Datenschutz-Folgenabschaetzung', 'dsfa'],
['Art. 35 Pruefung', 'dsfa'],
['DPIA erstellen', 'dsfa'],
['Datenschutzerklaerung', 'dsi'],
['Privacy Policy', 'dsi'],
['Art. 13 Information', 'dsi'],
['Loeschfristen definieren', 'lf'],
['Loeschkonzept erstellen', 'lf'],
['Retention Policy', 'lf'],
['Auftragsverarbeitung', 'av_vertrag'],
['AVV erstellen', 'av_vertrag'],
['Art. 28 Vertrag', 'av_vertrag'],
['Einwilligung einholen', 'einwilligung'],
['Consent Management', 'einwilligung'],
['Cookie Banner', 'einwilligung'],
])('"%s" should detect document type %s', (input, expectedType) => {
const result = classifier.detectDocumentType(input)
expect(result).toBe(expectedType)
})
it('should return undefined for unrecognized types', () => {
expect(classifier.detectDocumentType('Hallo Welt')).toBeUndefined()
expect(classifier.detectDocumentType('Was kostet das?')).toBeUndefined()
})
})
describe('classify - Umlaut handling', () => {
it('should handle German umlauts correctly', () => {
// With actual umlauts (ä, ö, ü)
const result1 = classifier.classify('Prüfe die Vollständigkeit')
expect(result1.mode).toBe('validate')
// With ae/oe/ue substitution
const result2 = classifier.classify('Pruefe die Vollstaendigkeit')
expect(result2.mode).toBe('validate')
})
it('should handle ß correctly', () => {
const result = classifier.classify('Schließe Lücken')
// Should still detect via normalized patterns
expect(result).toBeDefined()
})
})
describe('classify - combined mode + document type', () => {
it('should detect both mode and document type', () => {
const result = classifier.classify('Erstelle ein VVT fuer unsere Firma')
expect(result.mode).toBe('draft')
expect(result.detectedDocumentType).toBe('vvt')
})
it('should detect validate + document type', () => {
const result = classifier.classify('Pruefe mein TOM auf Konsistenz')
expect(result.mode).toBe('validate')
expect(result.detectedDocumentType).toBe('tom')
})
})
})

View File

@@ -1,312 +0,0 @@
import { StateProjector } from '../state-projector'
import type { SDKState } from '../../types'
describe('StateProjector', () => {
const projector = new StateProjector()
// Helper: minimal SDKState
function makeState(overrides: Partial<SDKState> = {}): SDKState {
return {
version: '1.0.0',
lastModified: new Date(),
tenantId: 'test',
userId: 'user1',
subscription: 'PROFESSIONAL',
customerType: null,
companyProfile: null,
complianceScope: null,
sourcePolicy: null,
currentPhase: 1,
currentStep: 'company-profile',
completedSteps: [],
checkpoints: {},
importedDocuments: [],
gapAnalysis: null,
useCases: [],
activeUseCase: null,
screening: null,
modules: [],
requirements: [],
controls: [],
evidence: [],
checklist: [],
risks: [],
aiActClassification: null,
obligations: [],
dsfa: null,
toms: [],
retentionPolicies: [],
vvt: [],
documents: [],
cookieBanner: null,
consents: [],
dsrConfig: null,
escalationWorkflows: [],
preferences: {
language: 'de',
theme: 'light',
compactMode: false,
showHints: true,
autoSave: true,
autoValidate: true,
allowParallelWork: true,
},
...overrides,
} as SDKState
}
function makeDecisionState(level: string = 'L2'): SDKState {
return makeState({
companyProfile: {
companyName: 'Test GmbH',
industry: 'IT-Dienstleistung',
employeeCount: 50,
businessModel: 'SaaS',
isPublicSector: false,
} as any,
complianceScope: {
decision: {
id: 'dec-1',
determinedLevel: level,
scores: { risk_score: 60, complexity_score: 50, assurance_need: 55, composite_score: 55 },
triggeredHardTriggers: [],
requiredDocuments: [
{ documentType: 'vvt', label: 'VVT', required: true, depth: 'Standard', detailItems: ['Bezeichnung', 'Zweck'], estimatedEffort: '2h', triggeredBy: [] },
{ documentType: 'tom', label: 'TOM', required: true, depth: 'Standard', detailItems: ['Verschluesselung'], estimatedEffort: '3h', triggeredBy: [] },
{ documentType: 'lf', label: 'LF', required: true, depth: 'Basis', detailItems: [], estimatedEffort: '1h', triggeredBy: [] },
],
riskFlags: [
{ id: 'rf-1', severity: 'MEDIUM', title: 'Cloud-Nutzung', description: '', recommendation: 'AVV pruefen' },
],
gaps: [
{ id: 'gap-1', severity: 'high', title: 'TOM fehlt', description: 'Keine TOM definiert', relatedDocuments: ['tom'] },
],
nextActions: [],
reasoning: [],
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
},
answers: [],
} as any,
vvt: [{ id: 'vvt-1', name: 'Kundenverwaltung' }] as any[],
toms: [],
retentionPolicies: [],
})
}
describe('projectForDraft', () => {
it('should return a DraftContext with correct structure', () => {
const state = makeDecisionState()
const result = projector.projectForDraft(state, 'vvt')
expect(result).toHaveProperty('decisions')
expect(result).toHaveProperty('companyProfile')
expect(result).toHaveProperty('constraints')
expect(result.decisions.level).toBe('L2')
})
it('should project company profile', () => {
const state = makeDecisionState()
const result = projector.projectForDraft(state, 'vvt')
expect(result.companyProfile.name).toBe('Test GmbH')
expect(result.companyProfile.industry).toBe('IT-Dienstleistung')
expect(result.companyProfile.employeeCount).toBe(50)
})
it('should provide defaults when no company profile', () => {
const state = makeState()
const result = projector.projectForDraft(state, 'vvt')
expect(result.companyProfile.name).toBe('Unbekannt')
expect(result.companyProfile.industry).toBe('Unbekannt')
expect(result.companyProfile.employeeCount).toBe(0)
})
it('should extract constraints and depth requirements', () => {
const state = makeDecisionState()
const result = projector.projectForDraft(state, 'vvt')
expect(result.constraints.depthRequirements).toBeDefined()
expect(result.constraints.boundaries.length).toBeGreaterThan(0)
})
it('should extract risk flags', () => {
const state = makeDecisionState()
const result = projector.projectForDraft(state, 'vvt')
expect(result.constraints.riskFlags.length).toBe(1)
expect(result.constraints.riskFlags[0].title).toBe('Cloud-Nutzung')
})
it('should include existing document data when available', () => {
const state = makeDecisionState()
const result = projector.projectForDraft(state, 'vvt')
expect(result.existingDocumentData).toBeDefined()
expect((result.existingDocumentData as any).totalCount).toBe(1)
})
it('should return undefined existingDocumentData when none exists', () => {
const state = makeDecisionState()
const result = projector.projectForDraft(state, 'tom')
expect(result.existingDocumentData).toBeUndefined()
})
it('should filter required documents', () => {
const state = makeDecisionState()
const result = projector.projectForDraft(state, 'vvt')
expect(result.decisions.requiredDocuments.length).toBe(3)
expect(result.decisions.requiredDocuments.every(d => d.documentType)).toBe(true)
})
it('should handle empty state gracefully', () => {
const state = makeState()
const result = projector.projectForDraft(state, 'vvt')
expect(result.decisions.level).toBe('L1')
expect(result.decisions.hardTriggers).toEqual([])
expect(result.decisions.requiredDocuments).toEqual([])
})
})
describe('projectForAsk', () => {
it('should return a GapContext with correct structure', () => {
const state = makeDecisionState()
const result = projector.projectForAsk(state)
expect(result).toHaveProperty('unansweredQuestions')
expect(result).toHaveProperty('gaps')
expect(result).toHaveProperty('missingDocuments')
})
it('should identify missing documents', () => {
const state = makeDecisionState()
// vvt exists, tom and lf are missing
const result = projector.projectForAsk(state)
expect(result.missingDocuments.some(d => d.documentType === 'tom')).toBe(true)
expect(result.missingDocuments.some(d => d.documentType === 'lf')).toBe(true)
})
it('should not list existing documents as missing', () => {
const state = makeDecisionState()
const result = projector.projectForAsk(state)
// vvt exists in state
expect(result.missingDocuments.some(d => d.documentType === 'vvt')).toBe(false)
})
it('should include gaps from scope decision', () => {
const state = makeDecisionState()
const result = projector.projectForAsk(state)
expect(result.gaps.length).toBe(1)
expect(result.gaps[0].title).toBe('TOM fehlt')
})
it('should handle empty state', () => {
const state = makeState()
const result = projector.projectForAsk(state)
expect(result.gaps).toEqual([])
expect(result.missingDocuments).toEqual([])
})
})
describe('projectForValidate', () => {
it('should return a ValidationContext with correct structure', () => {
const state = makeDecisionState()
const result = projector.projectForValidate(state, ['vvt', 'tom', 'lf'])
expect(result).toHaveProperty('documents')
expect(result).toHaveProperty('crossReferences')
expect(result).toHaveProperty('scopeLevel')
expect(result).toHaveProperty('depthRequirements')
})
it('should include all requested document types', () => {
const state = makeDecisionState()
const result = projector.projectForValidate(state, ['vvt', 'tom'])
expect(result.documents.length).toBe(2)
expect(result.documents.map(d => d.type)).toContain('vvt')
expect(result.documents.map(d => d.type)).toContain('tom')
})
it('should include cross-references', () => {
const state = makeDecisionState()
const result = projector.projectForValidate(state, ['vvt', 'tom', 'lf'])
expect(result.crossReferences).toHaveProperty('vvtCategories')
expect(result.crossReferences).toHaveProperty('tomControls')
expect(result.crossReferences).toHaveProperty('retentionCategories')
expect(result.crossReferences.vvtCategories.length).toBe(1)
expect(result.crossReferences.vvtCategories[0]).toBe('Kundenverwaltung')
})
it('should include scope level', () => {
const state = makeDecisionState('L3')
const result = projector.projectForValidate(state, ['vvt'])
expect(result.scopeLevel).toBe('L3')
})
it('should include depth requirements per document type', () => {
const state = makeDecisionState()
const result = projector.projectForValidate(state, ['vvt', 'tom'])
expect(result.depthRequirements).toHaveProperty('vvt')
expect(result.depthRequirements).toHaveProperty('tom')
})
it('should summarize documents', () => {
const state = makeDecisionState()
const result = projector.projectForValidate(state, ['vvt', 'tom'])
expect(result.documents[0].contentSummary).toContain('1')
expect(result.documents[1].contentSummary).toContain('Keine TOM')
})
it('should handle empty state', () => {
const state = makeState()
const result = projector.projectForValidate(state, ['vvt', 'tom', 'lf'])
expect(result.scopeLevel).toBe('L1')
expect(result.crossReferences.vvtCategories).toEqual([])
expect(result.crossReferences.tomControls).toEqual([])
})
})
describe('token budget estimation', () => {
it('projectForDraft should produce compact output', () => {
const state = makeDecisionState()
const result = projector.projectForDraft(state, 'vvt')
const json = JSON.stringify(result)
// Rough token estimation: ~4 chars per token
const estimatedTokens = json.length / 4
expect(estimatedTokens).toBeLessThan(2000) // Budget is ~1500
})
it('projectForAsk should produce very compact output', () => {
const state = makeDecisionState()
const result = projector.projectForAsk(state)
const json = JSON.stringify(result)
const estimatedTokens = json.length / 4
expect(estimatedTokens).toBeLessThan(1000) // Budget is ~600
})
it('projectForValidate should stay within budget', () => {
const state = makeDecisionState()
const result = projector.projectForValidate(state, ['vvt', 'tom', 'lf'])
const json = JSON.stringify(result)
const estimatedTokens = json.length / 4
expect(estimatedTokens).toBeLessThan(3000) // Budget is ~2000
})
})
})

View File

@@ -95,11 +95,11 @@ export function buildAllowedFacts(
const scope = state.complianceScope
return {
companyName: profile?.name ?? 'Unbekannt',
companyName: profile?.companyName ?? 'Unbekannt',
legalForm: profile?.legalForm ?? '',
industry: profile?.industry ?? '',
location: profile?.location ?? '',
employeeCount: profile?.employeeCount ?? 0,
location: profile?.headquartersCity ?? '',
employeeCount: parseEmployeeCount(profile?.employeeCount),
teamStructure: deriveTeamStructure(profile),
itLandscape: deriveItLandscape(profile),
@@ -213,11 +213,33 @@ export function checkForDisallowedContent(
// Private Helpers
// ============================================================================
/**
* Parst den employeeCount-String (z.B. "1-9", "50-249", "1000+") in eine Zahl.
* Verwendet den Mittelwert des Bereichs oder den unteren Wert bei "+".
*/
function parseEmployeeCount(value: string | undefined | null): number {
if (!value) return 0
// Handle "1000+" style
const plusMatch = value.match(/^(\d+)\+$/)
if (plusMatch) return parseInt(plusMatch[1], 10)
// Handle "50-249" style ranges
const rangeMatch = value.match(/^(\d+)-(\d+)$/)
if (rangeMatch) {
const low = parseInt(rangeMatch[1], 10)
const high = parseInt(rangeMatch[2], 10)
return Math.round((low + high) / 2)
}
// Try plain number
const num = parseInt(value, 10)
return isNaN(num) ? 0 : num
}
function deriveTeamStructure(profile: CompanyProfile | null): string {
if (!profile) return ''
// Ableitung aus verfuegbaren Profildaten
if (profile.employeeCount > 500) return 'Konzernstruktur'
if (profile.employeeCount > 50) return 'mittelstaendisch'
const count = parseEmployeeCount(profile.employeeCount)
if (count > 500) return 'Konzernstruktur'
if (count > 50) return 'mittelstaendisch'
return 'Kleinunternehmen'
}
@@ -225,15 +247,15 @@ function deriveItLandscape(profile: CompanyProfile | null): string {
if (!profile) return ''
return profile.businessModel?.includes('SaaS') ? 'Cloud-First' :
profile.businessModel?.includes('Cloud') ? 'Cloud-First' :
profile.isPublicSector ? 'On-Premise' : 'Hybrid'
'Hybrid'
}
function deriveSpecialFeatures(profile: CompanyProfile | null): string[] {
if (!profile) return []
const features: string[] = []
if (profile.isPublicSector) features.push('Oeffentlicher Sektor')
if (profile.employeeCount > 250) features.push('Grossunternehmen')
if (profile.dataProtectionOfficer) features.push('Interner DSB benannt')
const count = parseEmployeeCount(profile.employeeCount)
if (count > 250) features.push('Grossunternehmen')
if (profile.dpoName) features.push('Interner DSB benannt')
return features
}
@@ -253,5 +275,5 @@ function deriveTriggeredRegulations(
function derivePrimaryUseCases(state: SDKState): string[] {
if (!state.useCases || state.useCases.length === 0) return []
return state.useCases.slice(0, 3).map(uc => uc.name || uc.title || 'Unbenannt')
return state.useCases.slice(0, 3).map(uc => uc.name || 'Unbenannt')
}

View File

@@ -1,373 +0,0 @@
/**
* Intent Classifier - Leichtgewichtiger Pattern-Matcher
*
* Erkennt den Agent-Modus anhand des Nutzer-Inputs ohne LLM-Call.
* Deutsche und englische Muster werden unterstuetzt.
*
* Confidence-Schwellen:
* - >0.8: Hohe Sicherheit, automatisch anwenden
* - 0.6-0.8: Mittel, Nutzer kann bestaetigen
* - <0.6: Fallback zu 'explain'
*/
import type { AgentMode, IntentClassification } from './types'
import type { ScopeDocumentType } from '../compliance-scope-types'
// ============================================================================
// Pattern Definitions
// ============================================================================
interface ModePattern {
mode: AgentMode
patterns: RegExp[]
/** Base-Confidence wenn ein Pattern matched */
baseConfidence: number
}
const MODE_PATTERNS: ModePattern[] = [
{
mode: 'draft',
baseConfidence: 0.85,
patterns: [
/\b(erstell|generier|entw[iu]rf|entwer[ft]|schreib|verfass|formulier|anlege)/i,
/\b(draft|create|generate|write|compose)\b/i,
/\b(neues?\s+(?:vvt|tom|dsfa|dokument|loeschkonzept|datenschutzerklaerung))\b/i,
/\b(vorlage|template)\s+(erstell|generier)/i,
/\bfuer\s+(?:uns|mich|unser)\b.*\b(erstell|schreib)/i,
],
},
{
mode: 'validate',
baseConfidence: 0.80,
patterns: [
/\b(pruef|validier|check|kontrollier|ueberpruef)\b/i,
/\b(korrekt|richtig|vollstaendig|konsistent|komplett)\b.*\?/i,
/\b(stimmt|passt)\b.*\b(das|mein|unser)\b/i,
/\b(validate|verify|check|review)\b/i,
/\b(fehler|luecken?|maengel)\b.*\b(find|such|zeig)\b/i,
/\bcross[\s-]?check\b/i,
/\b(vvt|tom|dsfa)\b.*\b(konsisten[tz]|widerspruch|uebereinstimm)/i,
],
},
{
mode: 'ask',
baseConfidence: 0.75,
patterns: [
/\bwas\s+fehlt\b/i,
/\b(luecken?|gaps?)\b.*\b(zeig|find|identifizier|analysier)/i,
/\b(unvollstaendig|unfertig|offen)\b/i,
/\bwelche\s+(dokumente?|informationen?|daten)\b.*\b(fehlen?|brauch|benoetig)/i,
/\b(naechste[rn]?\s+schritt|next\s+step|todo)\b/i,
/\bworan\s+(muss|soll)\b/i,
],
},
]
/** Dokumenttyp-Erkennung */
const DOCUMENT_TYPE_PATTERNS: Array<{
type: ScopeDocumentType
patterns: RegExp[]
}> = [
{
type: 'vvt',
patterns: [
/\bv{1,2}t\b/i,
/\bverarbeitungsverzeichnis\b/i,
/\bverarbeitungstaetigkeit/i,
/\bprocessing\s+activit/i,
/\bart\.?\s*30\b/i,
],
},
{
type: 'tom',
patterns: [
/\btom\b/i,
/\btechnisch.*organisatorisch.*massnahm/i,
/\bart\.?\s*32\b/i,
/\bsicherheitsmassnahm/i,
],
},
{
type: 'dsfa',
patterns: [
/\bdsfa\b/i,
/\bdatenschutz[\s-]?folgenabschaetzung\b/i,
/\bdpia\b/i,
/\bart\.?\s*35\b/i,
/\bimpact\s+assessment\b/i,
],
},
{
type: 'dsi',
patterns: [
/\bdatenschutzerklaerung\b/i,
/\bprivacy\s+policy\b/i,
/\bdsi\b/i,
/\bart\.?\s*13\b/i,
/\bart\.?\s*14\b/i,
],
},
{
type: 'lf',
patterns: [
/\bloeschfrist/i,
/\bloeschkonzept/i,
/\bretention/i,
/\baufbewahr/i,
],
},
{
type: 'av_vertrag',
patterns: [
/\bavv?\b/i,
/\bauftragsverarbeit/i,
/\bdata\s+processing\s+agreement/i,
/\bart\.?\s*28\b/i,
],
},
{
type: 'betroffenenrechte',
patterns: [
/\bbetroffenenrecht/i,
/\bdata\s+subject\s+right/i,
/\bart\.?\s*15\b/i,
/\bauskunft/i,
],
},
{
type: 'einwilligung',
patterns: [
/\beinwillig/i,
/\bconsent/i,
/\bcookie/i,
],
},
{
type: 'datenpannen',
patterns: [
/\bdatenpanne/i,
/\bdata\s*breach/i,
/\bart\.?\s*33\b/i,
/\bsicherheitsvorfall/i,
/\bincident/i,
/\bmelde.*vorfall/i,
],
},
{
type: 'daten_transfer',
patterns: [
/\bdrittland/i,
/\btransfer/i,
/\bscc\b/i,
/\bstandardvertragsklausel/i,
/\bart\.?\s*44\b/i,
],
},
{
type: 'vertragsmanagement',
patterns: [
/\bvertragsmanagement/i,
/\bcontract\s*management/i,
],
},
{
type: 'schulung',
patterns: [
/\bschulung/i,
/\btraining/i,
/\bawareness/i,
/\bmitarbeiterschulung/i,
],
},
{
type: 'audit_log',
patterns: [
/\baudit/i,
/\blogging\b/i,
/\bprotokollierung/i,
/\bart\.?\s*5\s*abs\.?\s*2\b/i,
],
},
{
type: 'risikoanalyse',
patterns: [
/\brisikoanalyse/i,
/\brisk\s*assessment/i,
/\brisikobewertung/i,
],
},
{
type: 'notfallplan',
patterns: [
/\bnotfallplan/i,
/\bkrisenmanagement/i,
/\bbusiness\s*continuity/i,
/\bnotfall/i,
],
},
{
type: 'zertifizierung',
patterns: [
/\bzertifizierung/i,
/\biso\s*27001\b/i,
/\biso\s*27701\b/i,
/\bart\.?\s*42\b/i,
],
},
{
type: 'datenschutzmanagement',
patterns: [
/\bdsms\b/i,
/\bdatenschutzmanagement/i,
/\bpdca/i,
],
},
{
type: 'iace_ce_assessment',
patterns: [
/\biace\b/i,
/\bce[\s-]?kennzeichnung/i,
/\bai\s*act\b/i,
/\bki[\s-]?verordnung/i,
],
},
]
// ============================================================================
// Redirect Patterns (nicht-draftbare Dokumente → Document Generator)
// ============================================================================
const REDIRECT_PATTERNS: Array<{
pattern: RegExp
response: string
}> = [
{
pattern: /\bimpressum\b/i,
response: 'Impressum-Templates finden Sie unter /sdk/document-generator → Kategorie "Impressum". Der Drafting Agent erstellt keine Impressen, da diese nach DDG §5 unternehmensspezifisch sind.',
},
{
pattern: /\b(agb|allgemeine.?geschaefts)/i,
response: 'AGB-Vorlagen erstellen Sie im Document Generator unter /sdk/document-generator → Kategorie "AGB". Der Drafting Agent erstellt keine AGB, da diese nach BGB §305ff individuell gestaltet werden muessen.',
},
{
pattern: /\bwiderruf/i,
response: 'Widerrufs-Templates finden Sie unter /sdk/document-generator → Kategorie "Widerruf".',
},
{
pattern: /\bnda\b/i,
response: 'NDA-Vorlagen finden Sie unter /sdk/document-generator.',
},
{
pattern: /\bsla\b/i,
response: 'SLA-Vorlagen finden Sie unter /sdk/document-generator.',
},
]
// ============================================================================
// Classifier
// ============================================================================
export class IntentClassifier {
/**
* Klassifiziert die Nutzerabsicht anhand des Inputs.
*
* @param input - Die Nutzer-Nachricht
* @returns IntentClassification mit Mode, Confidence, Patterns
*/
classify(input: string): IntentClassification {
const normalized = this.normalize(input)
// Redirect-Check: Nicht-draftbare Dokumente → Document Generator
for (const redirect of REDIRECT_PATTERNS) {
if (redirect.pattern.test(normalized)) {
return {
mode: 'explain',
confidence: 0.90,
matchedPatterns: [redirect.pattern.source],
suggestedResponse: redirect.response,
}
}
}
let bestMatch: IntentClassification = {
mode: 'explain',
confidence: 0.3,
matchedPatterns: [],
}
for (const modePattern of MODE_PATTERNS) {
const matched: string[] = []
for (const pattern of modePattern.patterns) {
if (pattern.test(normalized)) {
matched.push(pattern.source)
}
}
if (matched.length > 0) {
// Mehr Matches = hoehere Confidence (bis zum Maximum)
const matchBonus = Math.min(matched.length - 1, 2) * 0.05
const confidence = Math.min(modePattern.baseConfidence + matchBonus, 0.99)
if (confidence > bestMatch.confidence) {
bestMatch = {
mode: modePattern.mode,
confidence,
matchedPatterns: matched,
}
}
}
}
// Dokumenttyp erkennen
const detectedDocType = this.detectDocumentType(normalized)
if (detectedDocType) {
bestMatch.detectedDocumentType = detectedDocType
// Dokumenttyp-Erkennung erhoeht Confidence leicht
bestMatch.confidence = Math.min(bestMatch.confidence + 0.05, 0.99)
}
// Fallback: Bei Confidence <0.6 immer 'explain'
if (bestMatch.confidence < 0.6) {
bestMatch.mode = 'explain'
}
return bestMatch
}
/**
* Erkennt den Dokumenttyp aus dem Input.
*/
detectDocumentType(input: string): ScopeDocumentType | undefined {
const normalized = this.normalize(input)
for (const docPattern of DOCUMENT_TYPE_PATTERNS) {
for (const pattern of docPattern.patterns) {
if (pattern.test(normalized)) {
return docPattern.type
}
}
}
return undefined
}
/**
* Normalisiert den Input fuer Pattern-Matching.
* Ersetzt Umlaute, entfernt Sonderzeichen.
*/
private normalize(input: string): string {
return input
.replace(/ä/g, 'ae')
.replace(/ö/g, 'oe')
.replace(/ü/g, 'ue')
.replace(/ß/g, 'ss')
.replace(/Ä/g, 'Ae')
.replace(/Ö/g, 'Oe')
.replace(/Ü/g, 'Ue')
}
}
/** Singleton-Instanz */
export const intentClassifier = new IntentClassifier()

View File

@@ -243,7 +243,7 @@ function sanitizeAddress(
*/
export function validateNoRemainingPII(facts: SanitizedFacts): string[] {
const warnings: string[] = []
const allValues = extractAllStringValues(facts)
const allValues = extractAllStringValues(facts as unknown as Record<string, unknown>)
for (const { path, value } of allValues) {
if (path === '__sanitized') continue

View File

@@ -1,342 +0,0 @@
/**
* State Projector - Token-budgetierte Projektion des SDK-State
*
* Extrahiert aus dem vollen SDKState (der ~50k Tokens betragen kann) nur die
* relevanten Slices fuer den jeweiligen Agent-Modus.
*
* Token-Budgets:
* - Draft: ~1500 Tokens
* - Ask: ~600 Tokens
* - Validate: ~2000 Tokens
*/
import type { SDKState, CompanyProfile } from '../types'
import type {
ComplianceScopeState,
ScopeDecision,
ScopeDocumentType,
ScopeGap,
RequiredDocument,
RiskFlag,
DOCUMENT_SCOPE_MATRIX,
DocumentDepthRequirement,
} from '../compliance-scope-types'
import { DOCUMENT_SCOPE_MATRIX as DOC_MATRIX, DOCUMENT_TYPE_LABELS } from '../compliance-scope-types'
import type {
DraftContext,
GapContext,
ValidationContext,
} from './types'
// ============================================================================
// State Projector
// ============================================================================
export class StateProjector {
/**
* Projiziert den SDKState fuer Draft-Operationen.
* Fokus: Scope-Decision, Company-Profile, Dokument-spezifische Constraints.
*
* ~1500 Tokens
*/
projectForDraft(
state: SDKState,
documentType: ScopeDocumentType
): DraftContext {
const decision = state.complianceScope?.decision ?? null
const level = decision?.determinedLevel ?? 'L1'
const depthReq = DOC_MATRIX[documentType]?.[level] ?? {
required: false,
depth: 'Basis',
detailItems: [],
estimatedEffort: 'N/A',
}
return {
decisions: {
level,
scores: decision?.scores ?? {
risk_score: 0,
complexity_score: 0,
assurance_need: 0,
composite_score: 0,
},
hardTriggers: (decision?.triggeredHardTriggers ?? []).map(t => ({
id: t.rule.id,
label: t.rule.label,
legalReference: t.rule.legalReference,
})),
requiredDocuments: (decision?.requiredDocuments ?? [])
.filter(d => d.required)
.map(d => ({
documentType: d.documentType,
depth: d.depth,
detailItems: d.detailItems,
})),
},
companyProfile: this.projectCompanyProfile(state.companyProfile),
constraints: {
depthRequirements: depthReq,
riskFlags: (decision?.riskFlags ?? []).map(f => ({
severity: f.severity,
title: f.title,
recommendation: f.recommendation,
})),
boundaries: this.deriveBoundaries(decision, documentType),
},
existingDocumentData: this.extractExistingDocumentData(state, documentType),
}
}
/**
* Projiziert den SDKState fuer Ask-Operationen.
* Fokus: Luecken, unbeantwortete Fragen, fehlende Dokumente.
*
* ~600 Tokens
*/
projectForAsk(state: SDKState): GapContext {
const decision = state.complianceScope?.decision ?? null
// Fehlende Pflichtdokumente ermitteln
const requiredDocs = (decision?.requiredDocuments ?? []).filter(d => d.required)
const existingDocTypes = this.getExistingDocumentTypes(state)
const missingDocuments = requiredDocs
.filter(d => !existingDocTypes.includes(d.documentType))
.map(d => ({
documentType: d.documentType,
label: DOCUMENT_TYPE_LABELS[d.documentType] ?? d.documentType,
depth: d.depth,
estimatedEffort: d.estimatedEffort,
}))
// Gaps aus der Scope-Decision
const gaps = (decision?.gaps ?? []).map(g => ({
id: g.id,
severity: g.severity,
title: g.title,
description: g.description,
relatedDocuments: g.relatedDocuments,
}))
// Unbeantwortete Fragen (aus dem Scope-Profiling)
const answers = state.complianceScope?.answers ?? []
const answeredIds = new Set(answers.map(a => a.questionId))
return {
unansweredQuestions: [], // Populated dynamically from question catalog
gaps,
missingDocuments,
}
}
/**
* Projiziert den SDKState fuer Validate-Operationen.
* Fokus: Cross-Dokument-Konsistenz, Scope-Compliance.
*
* ~2000 Tokens
*/
projectForValidate(
state: SDKState,
documentTypes: ScopeDocumentType[]
): ValidationContext {
const decision = state.complianceScope?.decision ?? null
const level = decision?.determinedLevel ?? 'L1'
// Dokument-Zusammenfassungen sammeln
const documents = documentTypes.map(type => ({
type,
contentSummary: this.summarizeDocument(state, type),
structuredData: this.extractExistingDocumentData(state, type),
}))
// Cross-Referenzen extrahieren
const crossReferences = {
vvtCategories: (state.vvt ?? []).map(v =>
typeof v === 'object' && v !== null && 'name' in v ? String((v as Record<string, unknown>).name) : ''
).filter(Boolean),
dsfaRisks: state.dsfa
? ['DSFA vorhanden']
: [],
tomControls: (state.toms ?? []).map(t =>
typeof t === 'object' && t !== null && 'name' in t ? String((t as Record<string, unknown>).name) : ''
).filter(Boolean),
retentionCategories: (state.retentionPolicies ?? []).map(p =>
typeof p === 'object' && p !== null && 'name' in p ? String((p as Record<string, unknown>).name) : ''
).filter(Boolean),
}
// Depth-Requirements fuer alle angefragten Typen
const depthRequirements: Record<string, DocumentDepthRequirement> = {}
for (const type of documentTypes) {
depthRequirements[type] = DOC_MATRIX[type]?.[level] ?? {
required: false,
depth: 'Basis',
detailItems: [],
estimatedEffort: 'N/A',
}
}
return {
documents,
crossReferences,
scopeLevel: level,
depthRequirements: depthRequirements as Record<ScopeDocumentType, DocumentDepthRequirement>,
}
}
// ==========================================================================
// Private Helpers
// ==========================================================================
private projectCompanyProfile(
profile: CompanyProfile | null
): DraftContext['companyProfile'] {
if (!profile) {
return {
name: 'Unbekannt',
industry: 'Unbekannt',
employeeCount: 0,
businessModel: 'Unbekannt',
isPublicSector: false,
}
}
return {
name: profile.companyName ?? profile.name ?? 'Unbekannt',
industry: profile.industry ?? 'Unbekannt',
employeeCount: typeof profile.employeeCount === 'number'
? profile.employeeCount
: parseInt(String(profile.employeeCount ?? '0'), 10) || 0,
businessModel: profile.businessModel ?? 'Unbekannt',
isPublicSector: profile.isPublicSector ?? false,
...(profile.dataProtectionOfficer ? {
dataProtectionOfficer: {
name: profile.dataProtectionOfficer.name ?? '',
email: profile.dataProtectionOfficer.email ?? '',
},
} : {}),
}
}
/**
* Leitet Grenzen (Boundaries) ab, die der Agent nicht ueberschreiten darf.
*/
private deriveBoundaries(
decision: ScopeDecision | null,
documentType: ScopeDocumentType
): string[] {
const boundaries: string[] = []
const level = decision?.determinedLevel ?? 'L1'
// Grundregel: Scope-Engine ist autoritativ
boundaries.push(
`Maximale Dokumenttiefe: ${level} (${DOC_MATRIX[documentType]?.[level]?.depth ?? 'Basis'})`
)
// DSFA-Boundary
if (documentType === 'dsfa') {
const dsfaRequired = decision?.triggeredHardTriggers?.some(
t => t.rule.dsfaRequired
) ?? false
if (!dsfaRequired && level !== 'L4') {
boundaries.push('DSFA ist laut Scope-Engine NICHT erforderlich. Nur auf expliziten Wunsch erstellen.')
}
}
// Dokument nicht in requiredDocuments?
const isRequired = decision?.requiredDocuments?.some(
d => d.documentType === documentType && d.required
) ?? false
if (!isRequired) {
boundaries.push(
`Dokument "${DOCUMENT_TYPE_LABELS[documentType] ?? documentType}" ist auf Level ${level} nicht als Pflicht eingestuft.`
)
}
return boundaries
}
/**
* Extrahiert bereits vorhandene Dokumentdaten aus dem SDK-State.
*/
private extractExistingDocumentData(
state: SDKState,
documentType: ScopeDocumentType
): Record<string, unknown> | undefined {
switch (documentType) {
case 'vvt':
return state.vvt?.length ? { entries: state.vvt.slice(0, 5), totalCount: state.vvt.length } : undefined
case 'tom':
return state.toms?.length ? { entries: state.toms.slice(0, 5), totalCount: state.toms.length } : undefined
case 'lf':
return state.retentionPolicies?.length
? { entries: state.retentionPolicies.slice(0, 5), totalCount: state.retentionPolicies.length }
: undefined
case 'dsfa':
return state.dsfa ? { assessment: state.dsfa } : undefined
case 'dsi':
return state.documents?.length
? { entries: state.documents.slice(0, 3), totalCount: state.documents.length }
: undefined
case 'einwilligung':
return state.consents?.length
? { entries: state.consents.slice(0, 5), totalCount: state.consents.length }
: undefined
default:
return undefined
}
}
/**
* Ermittelt welche Dokumenttypen bereits im State vorhanden sind.
*/
private getExistingDocumentTypes(state: SDKState): ScopeDocumentType[] {
const types: ScopeDocumentType[] = []
if (state.vvt?.length) types.push('vvt')
if (state.toms?.length) types.push('tom')
if (state.retentionPolicies?.length) types.push('lf')
if (state.dsfa) types.push('dsfa')
if (state.documents?.length) types.push('dsi')
if (state.consents?.length) types.push('einwilligung')
if (state.cookieBanner) types.push('einwilligung')
if (state.risks?.length) types.push('risikoanalyse')
if (state.escalationWorkflows?.length) types.push('datenpannen')
if (state.iaceProjects?.length) types.push('iace_ce_assessment')
if (state.obligations?.length) types.push('zertifizierung')
if (state.dsrConfig) types.push('betroffenenrechte')
return types
}
/**
* Erstellt eine kurze Zusammenfassung eines Dokuments fuer Validierung.
*/
private summarizeDocument(
state: SDKState,
documentType: ScopeDocumentType
): string {
switch (documentType) {
case 'vvt':
return state.vvt?.length
? `${state.vvt.length} Verarbeitungstaetigkeiten erfasst`
: 'Keine VVT-Eintraege vorhanden'
case 'tom':
return state.toms?.length
? `${state.toms.length} TOM-Massnahmen definiert`
: 'Keine TOM-Massnahmen vorhanden'
case 'lf':
return state.retentionPolicies?.length
? `${state.retentionPolicies.length} Loeschfristen definiert`
: 'Keine Loeschfristen vorhanden'
case 'dsfa':
return state.dsfa
? 'DSFA vorhanden'
: 'Keine DSFA vorhanden'
default:
return `Dokument ${DOCUMENT_TYPE_LABELS[documentType] ?? documentType}`
}
}
}
/** Singleton-Instanz */
export const stateProjector = new StateProjector()

View File

@@ -1,343 +0,0 @@
'use client'
/**
* useDraftingEngine - React Hook fuer die Drafting Engine
*
* Managed: currentMode, activeDocumentType, draftSessions, validationState
* Handled: State-Projection, API-Calls, Streaming
* Provides: sendMessage(), requestDraft(), validateDraft(), acceptDraft()
*/
import { useState, useCallback, useRef } from 'react'
import { useSDK } from '../context'
import { stateProjector } from './state-projector'
import { intentClassifier } from './intent-classifier'
import { constraintEnforcer } from './constraint-enforcer'
import type {
AgentMode,
DraftSession,
DraftRevision,
DraftingChatMessage,
ValidationResult,
ConstraintCheckResult,
DraftContext,
GapContext,
ValidationContext,
} from './types'
import type { ScopeDocumentType } from '../compliance-scope-types'
export interface DraftingEngineState {
currentMode: AgentMode
activeDocumentType: ScopeDocumentType | null
messages: DraftingChatMessage[]
isTyping: boolean
currentDraft: DraftRevision | null
validationResult: ValidationResult | null
constraintCheck: ConstraintCheckResult | null
error: string | null
}
export interface DraftingEngineActions {
setMode: (mode: AgentMode) => void
setDocumentType: (type: ScopeDocumentType) => void
sendMessage: (content: string) => Promise<void>
requestDraft: (instructions?: string) => Promise<void>
validateDraft: () => Promise<void>
acceptDraft: () => void
stopGeneration: () => void
clearMessages: () => void
}
export function useDraftingEngine(): DraftingEngineState & DraftingEngineActions {
const { state, dispatch } = useSDK()
const abortControllerRef = useRef<AbortController | null>(null)
const [currentMode, setCurrentMode] = useState<AgentMode>('explain')
const [activeDocumentType, setActiveDocumentType] = useState<ScopeDocumentType | null>(null)
const [messages, setMessages] = useState<DraftingChatMessage[]>([])
const [isTyping, setIsTyping] = useState(false)
const [currentDraft, setCurrentDraft] = useState<DraftRevision | null>(null)
const [validationResult, setValidationResult] = useState<ValidationResult | null>(null)
const [constraintCheck, setConstraintCheck] = useState<ConstraintCheckResult | null>(null)
const [error, setError] = useState<string | null>(null)
// Get state projection based on mode
const getProjection = useCallback(() => {
switch (currentMode) {
case 'draft':
return activeDocumentType
? stateProjector.projectForDraft(state, activeDocumentType)
: null
case 'ask':
return stateProjector.projectForAsk(state)
case 'validate':
return activeDocumentType
? stateProjector.projectForValidate(state, [activeDocumentType])
: stateProjector.projectForValidate(state, ['vvt', 'tom', 'lf'])
default:
return activeDocumentType
? stateProjector.projectForDraft(state, activeDocumentType)
: null
}
}, [state, currentMode, activeDocumentType])
const setMode = useCallback((mode: AgentMode) => {
setCurrentMode(mode)
}, [])
const setDocumentType = useCallback((type: ScopeDocumentType) => {
setActiveDocumentType(type)
}, [])
const sendMessage = useCallback(async (content: string) => {
if (!content.trim() || isTyping) return
setError(null)
// Auto-detect mode if needed
const classification = intentClassifier.classify(content)
if (classification.confidence > 0.7 && classification.mode !== currentMode) {
setCurrentMode(classification.mode)
}
if (classification.detectedDocumentType && !activeDocumentType) {
setActiveDocumentType(classification.detectedDocumentType)
}
const userMessage: DraftingChatMessage = {
role: 'user',
content: content.trim(),
}
setMessages(prev => [...prev, userMessage])
setIsTyping(true)
abortControllerRef.current = new AbortController()
try {
const projection = getProjection()
const response = await fetch('/api/sdk/drafting-engine/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: content.trim(),
history: messages.map(m => ({ role: m.role, content: m.content })),
sdkStateProjection: projection,
mode: currentMode,
documentType: activeDocumentType,
}),
signal: abortControllerRef.current.signal,
})
if (!response.ok) {
const errorData = await response.json().catch(() => ({ error: 'Unbekannter Fehler' }))
throw new Error(errorData.error || `Server-Fehler (${response.status})`)
}
const agentMessageId = `msg-${Date.now()}-agent`
setMessages(prev => [...prev, {
role: 'assistant',
content: '',
metadata: { mode: currentMode, documentType: activeDocumentType ?? undefined },
}])
// Stream response
const reader = response.body!.getReader()
const decoder = new TextDecoder()
let accumulated = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
accumulated += decoder.decode(value, { stream: true })
const text = accumulated
setMessages(prev =>
prev.map((m, i) => i === prev.length - 1 ? { ...m, content: text } : m)
)
}
setIsTyping(false)
} catch (err) {
if ((err as Error).name === 'AbortError') {
setIsTyping(false)
return
}
setError((err as Error).message)
setMessages(prev => [...prev, {
role: 'assistant',
content: `Fehler: ${(err as Error).message}`,
}])
setIsTyping(false)
}
}, [isTyping, messages, currentMode, activeDocumentType, getProjection])
const requestDraft = useCallback(async (instructions?: string) => {
if (!activeDocumentType) {
setError('Bitte waehlen Sie zuerst einen Dokumenttyp.')
return
}
setError(null)
setIsTyping(true)
try {
const draftContext = stateProjector.projectForDraft(state, activeDocumentType)
const response = await fetch('/api/sdk/drafting-engine/draft', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
documentType: activeDocumentType,
draftContext,
instructions,
existingDraft: currentDraft,
}),
})
const result = await response.json()
if (!response.ok) {
throw new Error(result.error || 'Draft-Generierung fehlgeschlagen')
}
setCurrentDraft(result.draft)
setConstraintCheck(result.constraintCheck)
setMessages(prev => [...prev, {
role: 'assistant',
content: `Draft fuer ${activeDocumentType} erstellt (${result.draft.sections.length} Sections). Oeffnen Sie den Editor zur Bearbeitung.`,
metadata: { mode: 'draft', documentType: activeDocumentType, hasDraft: true },
}])
setIsTyping(false)
} catch (err) {
setError((err as Error).message)
setIsTyping(false)
}
}, [activeDocumentType, state, currentDraft])
const validateDraft = useCallback(async () => {
setError(null)
setIsTyping(true)
try {
const docTypes: ScopeDocumentType[] = activeDocumentType
? [activeDocumentType]
: ['vvt', 'tom', 'lf']
const validationContext = stateProjector.projectForValidate(state, docTypes)
const response = await fetch('/api/sdk/drafting-engine/validate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
documentType: activeDocumentType || 'vvt',
draftContent: currentDraft?.content || '',
validationContext,
}),
})
const result = await response.json()
if (!response.ok) {
throw new Error(result.error || 'Validierung fehlgeschlagen')
}
setValidationResult(result)
const summary = result.passed
? `Validierung bestanden. ${result.warnings.length} Warnungen, ${result.suggestions.length} Vorschlaege.`
: `Validierung fehlgeschlagen. ${result.errors.length} Fehler, ${result.warnings.length} Warnungen.`
setMessages(prev => [...prev, {
role: 'assistant',
content: summary,
metadata: { mode: 'validate', hasValidation: true },
}])
setIsTyping(false)
} catch (err) {
setError((err as Error).message)
setIsTyping(false)
}
}, [activeDocumentType, state, currentDraft])
const acceptDraft = useCallback(() => {
if (!currentDraft || !activeDocumentType) return
// Dispatch the draft data into SDK state
switch (activeDocumentType) {
case 'vvt':
dispatch({
type: 'ADD_PROCESSING_ACTIVITY',
payload: {
id: `draft-vvt-${Date.now()}`,
name: currentDraft.sections.find(s => s.schemaField === 'name')?.content || 'Neuer VVT-Eintrag',
...Object.fromEntries(
currentDraft.sections
.filter(s => s.schemaField)
.map(s => [s.schemaField!, s.content])
),
},
})
break
case 'tom':
dispatch({
type: 'ADD_TOM',
payload: {
id: `draft-tom-${Date.now()}`,
name: 'TOM-Entwurf',
...Object.fromEntries(
currentDraft.sections
.filter(s => s.schemaField)
.map(s => [s.schemaField!, s.content])
),
},
})
break
default:
dispatch({
type: 'ADD_DOCUMENT',
payload: {
id: `draft-${activeDocumentType}-${Date.now()}`,
type: activeDocumentType,
content: currentDraft.content,
sections: currentDraft.sections,
},
})
}
setMessages(prev => [...prev, {
role: 'assistant',
content: `Draft wurde in den SDK-State uebernommen.`,
}])
setCurrentDraft(null)
}, [currentDraft, activeDocumentType, dispatch])
const stopGeneration = useCallback(() => {
abortControllerRef.current?.abort()
setIsTyping(false)
}, [])
const clearMessages = useCallback(() => {
setMessages([])
setCurrentDraft(null)
setValidationResult(null)
setConstraintCheck(null)
setError(null)
}, [])
return {
currentMode,
activeDocumentType,
messages,
isTyping,
currentDraft,
validationResult,
constraintCheck,
error,
setMode,
setDocumentType,
sendMessage,
requestDraft,
validateDraft,
acceptDraft,
stopGeneration,
clearMessages,
}
}

View File

@@ -199,11 +199,14 @@ describe('DSFAMitigation type', () => {
describe('DSFASectionProgress type', () => {
it('should track completion for all 5 sections', () => {
const progress: DSFASectionProgress = {
section_0_complete: false,
section_1_complete: true,
section_2_complete: true,
section_3_complete: false,
section_4_complete: false,
section_5_complete: false,
section_6_complete: false,
section_7_complete: false,
}
expect(progress.section_1_complete).toBe(true)

View File

@@ -554,6 +554,15 @@ export function TOMGeneratorProvider({
[]
)
const bulkUpdateTOMs = useCallback(
(updates: Array<{ id: string; data: Partial<DerivedTOM> }>) => {
for (const { id, data } of updates) {
dispatch({ type: 'UPDATE_DERIVED_TOM', payload: { id, data } })
}
},
[]
)
// Gap analysis
const runGapAnalysis = useCallback(() => {
if (!rulesEngineRef.current) return
@@ -666,6 +675,7 @@ export function TOMGeneratorProvider({
deriveTOMs,
updateDerivedTOM,
bulkUpdateTOMs,
runGapAnalysis,

View File

@@ -1,11 +1,11 @@
{
"name": "breakpilot-admin-v2",
"name": "breakpilot-compliance-sdk-admin",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "breakpilot-admin-v2",
"name": "breakpilot-compliance-sdk-admin",
"version": "1.0.0",
"dependencies": {
"bpmn-js": "^18.0.1",
@@ -1560,15 +1560,15 @@
}
},
"node_modules/@next/env": {
"version": "15.5.9",
"resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.9.tgz",
"integrity": "sha512-4GlTZ+EJM7WaW2HEZcyU317tIQDjkQIyENDLxYJfSWlfqguN+dHkZgyQTV/7ykvobU7yEH5gKvreNrH4B6QgIg==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.12.tgz",
"integrity": "sha512-pUvdJN1on574wQHjaBfNGDt9Mz5utDSZFsIIQkMzPgNS8ZvT4H2mwOrOIClwsQOb6EGx5M76/CZr6G8i6pSpLg==",
"license": "MIT"
},
"node_modules/@next/swc-darwin-arm64": {
"version": "15.5.7",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.7.tgz",
"integrity": "sha512-IZwtxCEpI91HVU/rAUOOobWSZv4P2DeTtNaCdHqLcTJU4wdNXgAySvKa/qJCgR5m6KI8UsKDXtO2B31jcaw1Yw==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.12.tgz",
"integrity": "sha512-RnRjBtH8S8eXCpUNkQ+543DUc7ys8y15VxmFU9HRqlo9BG3CcBUiwNtF8SNoi2xvGCVJq1vl2yYq+3oISBS0Zg==",
"cpu": [
"arm64"
],
@@ -1582,9 +1582,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
"version": "15.5.7",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.7.tgz",
"integrity": "sha512-UP6CaDBcqaCBuiq/gfCEJw7sPEoX1aIjZHnBWN9v9qYHQdMKvCKcAVs4OX1vIjeE+tC5EIuwDTVIoXpUes29lg==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.12.tgz",
"integrity": "sha512-nqa9/7iQlboF1EFtNhWxQA0rQstmYRSBGxSM6g3GxvxHxcoeqVXfGNr9stJOme674m2V7r4E3+jEhhGvSQhJRA==",
"cpu": [
"x64"
],
@@ -1598,9 +1598,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
"version": "15.5.7",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.7.tgz",
"integrity": "sha512-NCslw3GrNIw7OgmRBxHtdWFQYhexoUCq+0oS2ccjyYLtcn1SzGzeM54jpTFonIMUjNbHmpKpziXnpxhSWLcmBA==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.12.tgz",
"integrity": "sha512-dCzAjqhDHwmoB2M4eYfVKqXs99QdQxNQVpftvP1eGVppamXh/OkDAwV737Zr0KPXEqRUMN4uCjh6mjO+XtF3Mw==",
"cpu": [
"arm64"
],
@@ -1614,9 +1614,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
"version": "15.5.7",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.7.tgz",
"integrity": "sha512-nfymt+SE5cvtTrG9u1wdoxBr9bVB7mtKTcj0ltRn6gkP/2Nu1zM5ei8rwP9qKQP0Y//umK+TtkKgNtfboBxRrw==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.12.tgz",
"integrity": "sha512-+fpGWvQiITgf7PUtbWY1H7qUSnBZsPPLyyq03QuAKpVoTy/QUx1JptEDTQMVvQhvizCEuNLEeghrQUyXQOekuw==",
"cpu": [
"arm64"
],
@@ -1630,9 +1630,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
"version": "15.5.7",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.7.tgz",
"integrity": "sha512-hvXcZvCaaEbCZcVzcY7E1uXN9xWZfFvkNHwbe/n4OkRhFWrs1J1QV+4U1BN06tXLdaS4DazEGXwgqnu/VMcmqw==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.12.tgz",
"integrity": "sha512-jSLvgdRRL/hrFAPqEjJf1fFguC719kmcptjNVDJl26BnJIpjL3KH5h6mzR4mAweociLQaqvt4UyzfbFjgAdDcw==",
"cpu": [
"x64"
],
@@ -1646,9 +1646,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
"version": "15.5.7",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.7.tgz",
"integrity": "sha512-4IUO539b8FmF0odY6/SqANJdgwn1xs1GkPO5doZugwZ3ETF6JUdckk7RGmsfSf7ws8Qb2YB5It33mvNL/0acqA==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.12.tgz",
"integrity": "sha512-/uaF0WfmYqQgLfPmN6BvULwxY0dufI2mlN2JbOKqqceZh1G4hjREyi7pg03zjfyS6eqNemHAZPSoP84x17vo6w==",
"cpu": [
"x64"
],
@@ -1662,9 +1662,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
"version": "15.5.7",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.7.tgz",
"integrity": "sha512-CpJVTkYI3ZajQkC5vajM7/ApKJUOlm6uP4BknM3XKvJ7VXAvCqSjSLmM0LKdYzn6nBJVSjdclx8nYJSa3xlTgQ==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.12.tgz",
"integrity": "sha512-xhsL1OvQSfGmlL5RbOmU+FV120urrgFpYLq+6U8C6KIym32gZT6XF/SDE92jKzzlPWskkbjOKCpqk5m4i8PEfg==",
"cpu": [
"arm64"
],
@@ -1678,9 +1678,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
"version": "15.5.7",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.7.tgz",
"integrity": "sha512-gMzgBX164I6DN+9/PGA+9dQiwmTkE4TloBNx8Kv9UiGARsr9Nba7IpcBRA1iTV9vwlYnrE3Uy6I7Aj6qLjQuqw==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.12.tgz",
"integrity": "sha512-Z1Dh6lhFkxvBDH1FoW6OU/L6prYwPSlwjLiZkExIAh8fbP6iI/M7iGTQAJPYJ9YFlWobCZ1PHbchFhFYb2ADkw==",
"cpu": [
"x64"
],
@@ -1857,9 +1857,9 @@
"license": "MIT"
},
"node_modules/@rollup/rollup-android-arm-eabi": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz",
"integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz",
"integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==",
"cpu": [
"arm"
],
@@ -1871,9 +1871,9 @@
]
},
"node_modules/@rollup/rollup-android-arm64": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz",
"integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz",
"integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==",
"cpu": [
"arm64"
],
@@ -1885,9 +1885,9 @@
]
},
"node_modules/@rollup/rollup-darwin-arm64": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz",
"integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz",
"integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==",
"cpu": [
"arm64"
],
@@ -1899,9 +1899,9 @@
]
},
"node_modules/@rollup/rollup-darwin-x64": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz",
"integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz",
"integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==",
"cpu": [
"x64"
],
@@ -1913,9 +1913,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-arm64": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz",
"integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz",
"integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==",
"cpu": [
"arm64"
],
@@ -1927,9 +1927,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-x64": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz",
"integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz",
"integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==",
"cpu": [
"x64"
],
@@ -1941,9 +1941,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz",
"integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz",
"integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==",
"cpu": [
"arm"
],
@@ -1955,9 +1955,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz",
"integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz",
"integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==",
"cpu": [
"arm"
],
@@ -1969,9 +1969,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-gnu": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz",
"integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz",
"integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==",
"cpu": [
"arm64"
],
@@ -1983,9 +1983,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-musl": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz",
"integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz",
"integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==",
"cpu": [
"arm64"
],
@@ -1997,9 +1997,9 @@
]
},
"node_modules/@rollup/rollup-linux-loong64-gnu": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz",
"integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz",
"integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==",
"cpu": [
"loong64"
],
@@ -2011,9 +2011,9 @@
]
},
"node_modules/@rollup/rollup-linux-loong64-musl": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz",
"integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz",
"integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==",
"cpu": [
"loong64"
],
@@ -2025,9 +2025,9 @@
]
},
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz",
"integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz",
"integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==",
"cpu": [
"ppc64"
],
@@ -2039,9 +2039,9 @@
]
},
"node_modules/@rollup/rollup-linux-ppc64-musl": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz",
"integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz",
"integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==",
"cpu": [
"ppc64"
],
@@ -2053,9 +2053,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz",
"integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz",
"integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==",
"cpu": [
"riscv64"
],
@@ -2067,9 +2067,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-musl": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz",
"integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz",
"integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==",
"cpu": [
"riscv64"
],
@@ -2081,9 +2081,9 @@
]
},
"node_modules/@rollup/rollup-linux-s390x-gnu": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz",
"integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz",
"integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==",
"cpu": [
"s390x"
],
@@ -2095,9 +2095,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-gnu": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz",
"integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz",
"integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==",
"cpu": [
"x64"
],
@@ -2109,9 +2109,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-musl": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz",
"integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz",
"integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==",
"cpu": [
"x64"
],
@@ -2123,9 +2123,9 @@
]
},
"node_modules/@rollup/rollup-openbsd-x64": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz",
"integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz",
"integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==",
"cpu": [
"x64"
],
@@ -2137,9 +2137,9 @@
]
},
"node_modules/@rollup/rollup-openharmony-arm64": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz",
"integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz",
"integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==",
"cpu": [
"arm64"
],
@@ -2151,9 +2151,9 @@
]
},
"node_modules/@rollup/rollup-win32-arm64-msvc": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz",
"integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz",
"integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==",
"cpu": [
"arm64"
],
@@ -2165,9 +2165,9 @@
]
},
"node_modules/@rollup/rollup-win32-ia32-msvc": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz",
"integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz",
"integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==",
"cpu": [
"ia32"
],
@@ -2179,9 +2179,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-gnu": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz",
"integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz",
"integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==",
"cpu": [
"x64"
],
@@ -2193,9 +2193,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-msvc": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz",
"integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz",
"integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==",
"cpu": [
"x64"
],
@@ -3661,11 +3661,14 @@
}
},
"node_modules/dompurify": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.1.tgz",
"integrity": "sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==",
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.2.tgz",
"integrity": "sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==",
"license": "(MPL-2.0 OR Apache-2.0)",
"optional": true,
"engines": {
"node": ">=20"
},
"optionalDependencies": {
"@types/trusted-types": "^2.0.7"
}
@@ -4200,12 +4203,12 @@
}
},
"node_modules/jspdf": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/jspdf/-/jspdf-4.1.0.tgz",
"integrity": "sha512-xd1d/XRkwqnsq6FP3zH1Q+Ejqn2ULIJeDZ+FTKpaabVpZREjsJKRJwuokTNgdqOU+fl55KgbvgZ1pRTSWCP2kQ==",
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/jspdf/-/jspdf-4.2.0.tgz",
"integrity": "sha512-hR/hnRevAXXlrjeqU5oahOE+Ln9ORJUB5brLHHqH67A+RBQZuFr5GkbI9XQI8OUFSEezKegsi45QRpc4bGj75Q==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.28.4",
"@babel/runtime": "^7.28.6",
"fast-png": "^6.2.0",
"fflate": "^0.8.1"
},
@@ -4441,12 +4444,12 @@
}
},
"node_modules/next": {
"version": "15.5.9",
"resolved": "https://registry.npmjs.org/next/-/next-15.5.9.tgz",
"integrity": "sha512-agNLK89seZEtC5zUHwtut0+tNrc0Xw4FT/Dg+B/VLEo9pAcS9rtTKpek3V6kVcVwsB2YlqMaHdfZL4eLEVYuCg==",
"version": "15.5.12",
"resolved": "https://registry.npmjs.org/next/-/next-15.5.12.tgz",
"integrity": "sha512-Fi/wQ4Etlrn60rz78bebG1i1SR20QxvV8tVp6iJspjLUSHcZoeUXCt+vmWoEcza85ElZzExK/jJ/F6SvtGktjA==",
"license": "MIT",
"dependencies": {
"@next/env": "15.5.9",
"@next/env": "15.5.12",
"@swc/helpers": "0.5.15",
"caniuse-lite": "^1.0.30001579",
"postcss": "8.4.31",
@@ -4459,14 +4462,14 @@
"node": "^18.18.0 || ^19.8.0 || >= 20.0.0"
},
"optionalDependencies": {
"@next/swc-darwin-arm64": "15.5.7",
"@next/swc-darwin-x64": "15.5.7",
"@next/swc-linux-arm64-gnu": "15.5.7",
"@next/swc-linux-arm64-musl": "15.5.7",
"@next/swc-linux-x64-gnu": "15.5.7",
"@next/swc-linux-x64-musl": "15.5.7",
"@next/swc-win32-arm64-msvc": "15.5.7",
"@next/swc-win32-x64-msvc": "15.5.7",
"@next/swc-darwin-arm64": "15.5.12",
"@next/swc-darwin-x64": "15.5.12",
"@next/swc-linux-arm64-gnu": "15.5.12",
"@next/swc-linux-arm64-musl": "15.5.12",
"@next/swc-linux-x64-gnu": "15.5.12",
"@next/swc-linux-x64-musl": "15.5.12",
"@next/swc-win32-arm64-msvc": "15.5.12",
"@next/swc-win32-x64-msvc": "15.5.12",
"sharp": "^0.34.3"
},
"peerDependencies": {
@@ -5333,9 +5336,9 @@
}
},
"node_modules/rollup": {
"version": "4.57.1",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz",
"integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==",
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz",
"integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -5349,31 +5352,31 @@
"npm": ">=8.0.0"
},
"optionalDependencies": {
"@rollup/rollup-android-arm-eabi": "4.57.1",
"@rollup/rollup-android-arm64": "4.57.1",
"@rollup/rollup-darwin-arm64": "4.57.1",
"@rollup/rollup-darwin-x64": "4.57.1",
"@rollup/rollup-freebsd-arm64": "4.57.1",
"@rollup/rollup-freebsd-x64": "4.57.1",
"@rollup/rollup-linux-arm-gnueabihf": "4.57.1",
"@rollup/rollup-linux-arm-musleabihf": "4.57.1",
"@rollup/rollup-linux-arm64-gnu": "4.57.1",
"@rollup/rollup-linux-arm64-musl": "4.57.1",
"@rollup/rollup-linux-loong64-gnu": "4.57.1",
"@rollup/rollup-linux-loong64-musl": "4.57.1",
"@rollup/rollup-linux-ppc64-gnu": "4.57.1",
"@rollup/rollup-linux-ppc64-musl": "4.57.1",
"@rollup/rollup-linux-riscv64-gnu": "4.57.1",
"@rollup/rollup-linux-riscv64-musl": "4.57.1",
"@rollup/rollup-linux-s390x-gnu": "4.57.1",
"@rollup/rollup-linux-x64-gnu": "4.57.1",
"@rollup/rollup-linux-x64-musl": "4.57.1",
"@rollup/rollup-openbsd-x64": "4.57.1",
"@rollup/rollup-openharmony-arm64": "4.57.1",
"@rollup/rollup-win32-arm64-msvc": "4.57.1",
"@rollup/rollup-win32-ia32-msvc": "4.57.1",
"@rollup/rollup-win32-x64-gnu": "4.57.1",
"@rollup/rollup-win32-x64-msvc": "4.57.1",
"@rollup/rollup-android-arm-eabi": "4.59.0",
"@rollup/rollup-android-arm64": "4.59.0",
"@rollup/rollup-darwin-arm64": "4.59.0",
"@rollup/rollup-darwin-x64": "4.59.0",
"@rollup/rollup-freebsd-arm64": "4.59.0",
"@rollup/rollup-freebsd-x64": "4.59.0",
"@rollup/rollup-linux-arm-gnueabihf": "4.59.0",
"@rollup/rollup-linux-arm-musleabihf": "4.59.0",
"@rollup/rollup-linux-arm64-gnu": "4.59.0",
"@rollup/rollup-linux-arm64-musl": "4.59.0",
"@rollup/rollup-linux-loong64-gnu": "4.59.0",
"@rollup/rollup-linux-loong64-musl": "4.59.0",
"@rollup/rollup-linux-ppc64-gnu": "4.59.0",
"@rollup/rollup-linux-ppc64-musl": "4.59.0",
"@rollup/rollup-linux-riscv64-gnu": "4.59.0",
"@rollup/rollup-linux-riscv64-musl": "4.59.0",
"@rollup/rollup-linux-s390x-gnu": "4.59.0",
"@rollup/rollup-linux-x64-gnu": "4.59.0",
"@rollup/rollup-linux-x64-musl": "4.59.0",
"@rollup/rollup-openbsd-x64": "4.59.0",
"@rollup/rollup-openharmony-arm64": "4.59.0",
"@rollup/rollup-win32-arm64-msvc": "4.59.0",
"@rollup/rollup-win32-ia32-msvc": "4.59.0",
"@rollup/rollup-win32-x64-gnu": "4.59.0",
"@rollup/rollup-win32-x64-msvc": "4.59.0",
"fsevents": "~2.3.2"
}
},

View File

@@ -1,451 +0,0 @@
package handlers
import (
"net/http"
"github.com/breakpilot/ai-compliance-sdk/internal/dsb"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
// DSBHandlers handles DSB-as-a-Service portal HTTP requests.
type DSBHandlers struct {
store *dsb.Store
}
// NewDSBHandlers creates new DSB handlers.
func NewDSBHandlers(store *dsb.Store) *DSBHandlers {
return &DSBHandlers{store: store}
}
// getDSBUserID extracts and parses the X-User-ID header as UUID.
func getDSBUserID(c *gin.Context) (uuid.UUID, bool) {
userIDStr := c.GetHeader("X-User-ID")
if userIDStr == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "X-User-ID header is required"})
return uuid.Nil, false
}
userID, err := uuid.Parse(userIDStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid X-User-ID header: must be a valid UUID"})
return uuid.Nil, false
}
return userID, true
}
// ============================================================================
// Dashboard
// ============================================================================
// GetDashboard returns the aggregated DSB dashboard.
// GET /sdk/v1/dsb/dashboard
func (h *DSBHandlers) GetDashboard(c *gin.Context) {
dsbUserID, ok := getDSBUserID(c)
if !ok {
return
}
dashboard, err := h.store.GetDashboard(c.Request.Context(), dsbUserID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, dashboard)
}
// ============================================================================
// Assignments
// ============================================================================
// CreateAssignment creates a new DSB-to-tenant assignment.
// POST /sdk/v1/dsb/assignments
func (h *DSBHandlers) CreateAssignment(c *gin.Context) {
var req dsb.CreateAssignmentRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
assignment := &dsb.Assignment{
DSBUserID: req.DSBUserID,
TenantID: req.TenantID,
Status: req.Status,
ContractStart: req.ContractStart,
ContractEnd: req.ContractEnd,
MonthlyHoursBudget: req.MonthlyHoursBudget,
Notes: req.Notes,
}
if err := h.store.CreateAssignment(c.Request.Context(), assignment); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusCreated, gin.H{"assignment": assignment})
}
// ListAssignments returns all assignments for the authenticated DSB user.
// GET /sdk/v1/dsb/assignments
func (h *DSBHandlers) ListAssignments(c *gin.Context) {
dsbUserID, ok := getDSBUserID(c)
if !ok {
return
}
assignments, err := h.store.ListAssignments(c.Request.Context(), dsbUserID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"assignments": assignments,
"total": len(assignments),
})
}
// GetAssignment retrieves a single assignment by ID.
// GET /sdk/v1/dsb/assignments/:id
func (h *DSBHandlers) GetAssignment(c *gin.Context) {
id, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
assignment, err := h.store.GetAssignment(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "assignment not found"})
return
}
c.JSON(http.StatusOK, gin.H{"assignment": assignment})
}
// UpdateAssignment updates an existing assignment.
// PUT /sdk/v1/dsb/assignments/:id
func (h *DSBHandlers) UpdateAssignment(c *gin.Context) {
id, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
assignment, err := h.store.GetAssignment(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "assignment not found"})
return
}
var req dsb.UpdateAssignmentRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// Apply non-nil fields
if req.Status != nil {
assignment.Status = *req.Status
}
if req.ContractEnd != nil {
assignment.ContractEnd = req.ContractEnd
}
if req.MonthlyHoursBudget != nil {
assignment.MonthlyHoursBudget = *req.MonthlyHoursBudget
}
if req.Notes != nil {
assignment.Notes = *req.Notes
}
if err := h.store.UpdateAssignment(c.Request.Context(), assignment); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"assignment": assignment})
}
// ============================================================================
// Hours
// ============================================================================
// CreateHourEntry creates a new time tracking entry for an assignment.
// POST /sdk/v1/dsb/assignments/:id/hours
func (h *DSBHandlers) CreateHourEntry(c *gin.Context) {
assignmentID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
var req dsb.CreateHourEntryRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
billable := true
if req.Billable != nil {
billable = *req.Billable
}
entry := &dsb.HourEntry{
AssignmentID: assignmentID,
Date: req.Date,
Hours: req.Hours,
Category: req.Category,
Description: req.Description,
Billable: billable,
}
if err := h.store.CreateHourEntry(c.Request.Context(), entry); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusCreated, gin.H{"hour_entry": entry})
}
// ListHours returns time entries for an assignment.
// GET /sdk/v1/dsb/assignments/:id/hours?month=YYYY-MM
func (h *DSBHandlers) ListHours(c *gin.Context) {
assignmentID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
month := c.Query("month")
entries, err := h.store.ListHours(c.Request.Context(), assignmentID, month)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"hours": entries,
"total": len(entries),
})
}
// GetHoursSummary returns aggregated hour statistics for an assignment.
// GET /sdk/v1/dsb/assignments/:id/hours/summary?month=YYYY-MM
func (h *DSBHandlers) GetHoursSummary(c *gin.Context) {
assignmentID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
month := c.Query("month")
summary, err := h.store.GetHoursSummary(c.Request.Context(), assignmentID, month)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, summary)
}
// ============================================================================
// Tasks
// ============================================================================
// CreateTask creates a new task for an assignment.
// POST /sdk/v1/dsb/assignments/:id/tasks
func (h *DSBHandlers) CreateTask(c *gin.Context) {
assignmentID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
var req dsb.CreateTaskRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
task := &dsb.Task{
AssignmentID: assignmentID,
Title: req.Title,
Description: req.Description,
Category: req.Category,
Priority: req.Priority,
DueDate: req.DueDate,
}
if err := h.store.CreateTask(c.Request.Context(), task); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusCreated, gin.H{"task": task})
}
// ListTasks returns tasks for an assignment.
// GET /sdk/v1/dsb/assignments/:id/tasks?status=open
func (h *DSBHandlers) ListTasks(c *gin.Context) {
assignmentID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
status := c.Query("status")
tasks, err := h.store.ListTasks(c.Request.Context(), assignmentID, status)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"tasks": tasks,
"total": len(tasks),
})
}
// UpdateTask updates an existing task.
// PUT /sdk/v1/dsb/tasks/:taskId
func (h *DSBHandlers) UpdateTask(c *gin.Context) {
taskID, err := uuid.Parse(c.Param("taskId"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid task ID"})
return
}
// We need to fetch the existing task first. Since tasks belong to assignments,
// we query by task ID directly. For now, we do a lightweight approach: bind the
// update request and apply changes via store.
var req dsb.UpdateTaskRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// Fetch current task by querying all tasks and filtering. Since we don't have
// a GetTask(taskID) method, we build the task from partial data and update.
// The store UpdateTask uses the task ID to locate the row.
task := &dsb.Task{ID: taskID}
// We need to get the current values to apply partial updates correctly.
// Query the task directly.
row := h.store.Pool().QueryRow(c.Request.Context(), `
SELECT id, assignment_id, title, description, category, priority, status, due_date, completed_at, created_at, updated_at
FROM dsb_tasks WHERE id = $1
`, taskID)
if err := row.Scan(
&task.ID, &task.AssignmentID, &task.Title, &task.Description,
&task.Category, &task.Priority, &task.Status, &task.DueDate,
&task.CompletedAt, &task.CreatedAt, &task.UpdatedAt,
); err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "task not found"})
return
}
// Apply non-nil fields
if req.Title != nil {
task.Title = *req.Title
}
if req.Description != nil {
task.Description = *req.Description
}
if req.Category != nil {
task.Category = *req.Category
}
if req.Priority != nil {
task.Priority = *req.Priority
}
if req.Status != nil {
task.Status = *req.Status
}
if req.DueDate != nil {
task.DueDate = req.DueDate
}
if err := h.store.UpdateTask(c.Request.Context(), task); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"task": task})
}
// CompleteTask marks a task as completed.
// POST /sdk/v1/dsb/tasks/:taskId/complete
func (h *DSBHandlers) CompleteTask(c *gin.Context) {
taskID, err := uuid.Parse(c.Param("taskId"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid task ID"})
return
}
if err := h.store.CompleteTask(c.Request.Context(), taskID); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"message": "task completed"})
}
// ============================================================================
// Communications
// ============================================================================
// CreateCommunication creates a new communication log entry.
// POST /sdk/v1/dsb/assignments/:id/communications
func (h *DSBHandlers) CreateCommunication(c *gin.Context) {
assignmentID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
var req dsb.CreateCommunicationRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
comm := &dsb.Communication{
AssignmentID: assignmentID,
Direction: req.Direction,
Channel: req.Channel,
Subject: req.Subject,
Content: req.Content,
Participants: req.Participants,
}
if err := h.store.CreateCommunication(c.Request.Context(), comm); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusCreated, gin.H{"communication": comm})
}
// ListCommunications returns all communications for an assignment.
// GET /sdk/v1/dsb/assignments/:id/communications
func (h *DSBHandlers) ListCommunications(c *gin.Context) {
assignmentID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"})
return
}
comms, err := h.store.ListCommunications(c.Request.Context(), assignmentID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"communications": comms,
"total": len(comms),
})
}

View File

@@ -1,638 +0,0 @@
package handlers
import (
"fmt"
"net/http"
"os"
"time"
"github.com/breakpilot/ai-compliance-sdk/internal/funding"
"github.com/breakpilot/ai-compliance-sdk/internal/llm"
"github.com/breakpilot/ai-compliance-sdk/internal/rbac"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"gopkg.in/yaml.v3"
)
// FundingHandlers handles funding application API endpoints
type FundingHandlers struct {
store funding.Store
providerRegistry *llm.ProviderRegistry
wizardSchema *WizardSchema
bundeslandProfiles map[string]*BundeslandProfile
}
// WizardSchema represents the loaded wizard schema
type WizardSchema struct {
Metadata struct {
Version string `yaml:"version"`
Name string `yaml:"name"`
Description string `yaml:"description"`
TotalSteps int `yaml:"total_steps"`
} `yaml:"metadata"`
Steps []WizardStep `yaml:"steps"`
FundingAssistant struct {
Enabled bool `yaml:"enabled"`
Model string `yaml:"model"`
SystemPrompt string `yaml:"system_prompt"`
StepContexts map[int]string `yaml:"step_contexts"`
QuickPrompts []QuickPrompt `yaml:"quick_prompts"`
} `yaml:"funding_assistant"`
Presets map[string]Preset `yaml:"presets"`
}
// WizardStep represents a step in the wizard
type WizardStep struct {
Number int `yaml:"number" json:"number"`
ID string `yaml:"id" json:"id"`
Title string `yaml:"title" json:"title"`
Subtitle string `yaml:"subtitle" json:"subtitle"`
Description string `yaml:"description" json:"description"`
Icon string `yaml:"icon" json:"icon"`
IsRequired bool `yaml:"is_required" json:"is_required"`
Fields []WizardField `yaml:"fields" json:"fields"`
AssistantContext string `yaml:"assistant_context" json:"assistant_context"`
}
// WizardField represents a field in the wizard
type WizardField struct {
ID string `yaml:"id" json:"id"`
Type string `yaml:"type" json:"type"`
Label string `yaml:"label" json:"label"`
Placeholder string `yaml:"placeholder,omitempty" json:"placeholder,omitempty"`
Required bool `yaml:"required,omitempty" json:"required,omitempty"`
Options []FieldOption `yaml:"options,omitempty" json:"options,omitempty"`
HelpText string `yaml:"help_text,omitempty" json:"help_text,omitempty"`
MaxLength int `yaml:"max_length,omitempty" json:"max_length,omitempty"`
Min *int `yaml:"min,omitempty" json:"min,omitempty"`
Max *int `yaml:"max,omitempty" json:"max,omitempty"`
Default interface{} `yaml:"default,omitempty" json:"default,omitempty"`
Conditional string `yaml:"conditional,omitempty" json:"conditional,omitempty"`
}
// FieldOption represents an option for select fields
type FieldOption struct {
Value string `yaml:"value" json:"value"`
Label string `yaml:"label" json:"label"`
Description string `yaml:"description,omitempty" json:"description,omitempty"`
}
// QuickPrompt represents a quick prompt for the assistant
type QuickPrompt struct {
Label string `yaml:"label" json:"label"`
Prompt string `yaml:"prompt" json:"prompt"`
}
// Preset represents a BreakPilot preset
type Preset struct {
ID string `yaml:"id" json:"id"`
Name string `yaml:"name" json:"name"`
Description string `yaml:"description" json:"description"`
BudgetItems []funding.BudgetItem `yaml:"budget_items" json:"budget_items"`
AutoFill map[string]interface{} `yaml:"auto_fill" json:"auto_fill"`
}
// BundeslandProfile represents a federal state profile
type BundeslandProfile struct {
Name string `yaml:"name" json:"name"`
Short string `yaml:"short" json:"short"`
FundingPrograms []string `yaml:"funding_programs" json:"funding_programs"`
DefaultFundingRate float64 `yaml:"default_funding_rate" json:"default_funding_rate"`
RequiresMEP bool `yaml:"requires_mep" json:"requires_mep"`
ContactAuthority ContactAuthority `yaml:"contact_authority" json:"contact_authority"`
SpecialRequirements []string `yaml:"special_requirements" json:"special_requirements"`
}
// ContactAuthority represents a contact authority
type ContactAuthority struct {
Name string `yaml:"name" json:"name"`
Department string `yaml:"department,omitempty" json:"department,omitempty"`
Website string `yaml:"website" json:"website"`
Email string `yaml:"email,omitempty" json:"email,omitempty"`
}
// NewFundingHandlers creates new funding handlers
func NewFundingHandlers(store funding.Store, providerRegistry *llm.ProviderRegistry) *FundingHandlers {
h := &FundingHandlers{
store: store,
providerRegistry: providerRegistry,
}
// Load wizard schema
if err := h.loadWizardSchema(); err != nil {
fmt.Printf("Warning: Could not load wizard schema: %v\n", err)
}
// Load bundesland profiles
if err := h.loadBundeslandProfiles(); err != nil {
fmt.Printf("Warning: Could not load bundesland profiles: %v\n", err)
}
return h
}
func (h *FundingHandlers) loadWizardSchema() error {
data, err := os.ReadFile("policies/funding/foerderantrag_wizard_v1.yaml")
if err != nil {
return err
}
h.wizardSchema = &WizardSchema{}
return yaml.Unmarshal(data, h.wizardSchema)
}
func (h *FundingHandlers) loadBundeslandProfiles() error {
data, err := os.ReadFile("policies/funding/bundesland_profiles.yaml")
if err != nil {
return err
}
var profiles struct {
Bundeslaender map[string]*BundeslandProfile `yaml:"bundeslaender"`
}
if err := yaml.Unmarshal(data, &profiles); err != nil {
return err
}
h.bundeslandProfiles = profiles.Bundeslaender
return nil
}
// ============================================================================
// Application CRUD
// ============================================================================
// CreateApplication creates a new funding application
// POST /sdk/v1/funding/applications
func (h *FundingHandlers) CreateApplication(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
userID := rbac.GetUserID(c)
if tenantID == uuid.Nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"})
return
}
var req funding.CreateApplicationRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
app := &funding.FundingApplication{
TenantID: tenantID,
Title: req.Title,
FundingProgram: req.FundingProgram,
Status: funding.ApplicationStatusDraft,
CurrentStep: 1,
TotalSteps: 8,
WizardData: make(map[string]interface{}),
CreatedBy: userID,
UpdatedBy: userID,
}
// Initialize school profile with federal state
app.SchoolProfile = &funding.SchoolProfile{
FederalState: req.FederalState,
}
// Apply preset if specified
if req.PresetID != "" && h.wizardSchema != nil {
if preset, ok := h.wizardSchema.Presets[req.PresetID]; ok {
app.Budget = &funding.Budget{
BudgetItems: preset.BudgetItems,
}
app.WizardData["preset_id"] = req.PresetID
app.WizardData["preset_applied"] = true
for k, v := range preset.AutoFill {
app.WizardData[k] = v
}
}
}
if err := h.store.CreateApplication(c.Request.Context(), app); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Add history entry
h.store.AddHistoryEntry(c.Request.Context(), &funding.ApplicationHistoryEntry{
ApplicationID: app.ID,
Action: "created",
PerformedBy: userID,
Notes: "Antrag erstellt",
})
c.JSON(http.StatusCreated, app)
}
// GetApplication retrieves a funding application
// GET /sdk/v1/funding/applications/:id
func (h *FundingHandlers) GetApplication(c *gin.Context) {
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"})
return
}
app, err := h.store.GetApplication(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, app)
}
// ListApplications returns a list of funding applications
// GET /sdk/v1/funding/applications
func (h *FundingHandlers) ListApplications(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
if tenantID == uuid.Nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"})
return
}
filter := funding.ApplicationFilter{
Page: 1,
PageSize: 20,
}
// Parse query parameters
if status := c.Query("status"); status != "" {
s := funding.ApplicationStatus(status)
filter.Status = &s
}
if program := c.Query("program"); program != "" {
p := funding.FundingProgram(program)
filter.FundingProgram = &p
}
result, err := h.store.ListApplications(c.Request.Context(), tenantID, filter)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, result)
}
// UpdateApplication updates a funding application
// PUT /sdk/v1/funding/applications/:id
func (h *FundingHandlers) UpdateApplication(c *gin.Context) {
userID := rbac.GetUserID(c)
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"})
return
}
app, err := h.store.GetApplication(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
return
}
var req funding.UpdateApplicationRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if req.Title != nil {
app.Title = *req.Title
}
if req.WizardData != nil {
for k, v := range req.WizardData {
app.WizardData[k] = v
}
}
if req.CurrentStep != nil {
app.CurrentStep = *req.CurrentStep
}
app.UpdatedBy = userID
if err := h.store.UpdateApplication(c.Request.Context(), app); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, app)
}
// DeleteApplication deletes a funding application
// DELETE /sdk/v1/funding/applications/:id
func (h *FundingHandlers) DeleteApplication(c *gin.Context) {
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"})
return
}
if err := h.store.DeleteApplication(c.Request.Context(), id); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"message": "application archived"})
}
// ============================================================================
// Wizard Endpoints
// ============================================================================
// GetWizardSchema returns the wizard schema
// GET /sdk/v1/funding/wizard/schema
func (h *FundingHandlers) GetWizardSchema(c *gin.Context) {
if h.wizardSchema == nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "wizard schema not loaded"})
return
}
c.JSON(http.StatusOK, gin.H{
"metadata": h.wizardSchema.Metadata,
"steps": h.wizardSchema.Steps,
"presets": h.wizardSchema.Presets,
"assistant": gin.H{
"enabled": h.wizardSchema.FundingAssistant.Enabled,
"quick_prompts": h.wizardSchema.FundingAssistant.QuickPrompts,
},
})
}
// SaveWizardStep saves wizard step data
// POST /sdk/v1/funding/applications/:id/wizard
func (h *FundingHandlers) SaveWizardStep(c *gin.Context) {
userID := rbac.GetUserID(c)
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"})
return
}
var req funding.SaveWizardStepRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// Save step data
if err := h.store.SaveWizardStep(c.Request.Context(), id, req.Step, req.Data); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Get updated progress
progress, err := h.store.GetWizardProgress(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Add history entry
h.store.AddHistoryEntry(c.Request.Context(), &funding.ApplicationHistoryEntry{
ApplicationID: id,
Action: "wizard_step_saved",
PerformedBy: userID,
Notes: fmt.Sprintf("Schritt %d gespeichert", req.Step),
})
c.JSON(http.StatusOK, progress)
}
// AskAssistant handles LLM assistant queries
// POST /sdk/v1/funding/wizard/ask
func (h *FundingHandlers) AskAssistant(c *gin.Context) {
var req funding.AssistantRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if h.wizardSchema == nil || !h.wizardSchema.FundingAssistant.Enabled {
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "assistant not available"})
return
}
// Build system prompt with step context
systemPrompt := h.wizardSchema.FundingAssistant.SystemPrompt
if stepContext, ok := h.wizardSchema.FundingAssistant.StepContexts[req.CurrentStep]; ok {
systemPrompt += "\n\nKontext fuer diesen Schritt:\n" + stepContext
}
// Build messages
messages := []llm.Message{
{Role: "system", Content: systemPrompt},
}
for _, msg := range req.History {
messages = append(messages, llm.Message{
Role: msg.Role,
Content: msg.Content,
})
}
messages = append(messages, llm.Message{
Role: "user",
Content: req.Question,
})
// Generate response using registry
chatReq := &llm.ChatRequest{
Messages: messages,
Temperature: 0.3,
MaxTokens: 1000,
}
response, err := h.providerRegistry.Chat(c.Request.Context(), chatReq)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, funding.AssistantResponse{
Answer: response.Message.Content,
})
}
// ============================================================================
// Status Endpoints
// ============================================================================
// SubmitApplication submits an application for review
// POST /sdk/v1/funding/applications/:id/submit
func (h *FundingHandlers) SubmitApplication(c *gin.Context) {
userID := rbac.GetUserID(c)
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"})
return
}
app, err := h.store.GetApplication(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
return
}
// Validate that all required steps are completed
progress, _ := h.store.GetWizardProgress(c.Request.Context(), id)
if progress == nil || len(progress.CompletedSteps) < app.TotalSteps {
c.JSON(http.StatusBadRequest, gin.H{"error": "not all required steps completed"})
return
}
// Update status
app.Status = funding.ApplicationStatusSubmitted
now := time.Now()
app.SubmittedAt = &now
app.UpdatedBy = userID
if err := h.store.UpdateApplication(c.Request.Context(), app); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Add history entry
h.store.AddHistoryEntry(c.Request.Context(), &funding.ApplicationHistoryEntry{
ApplicationID: id,
Action: "submitted",
PerformedBy: userID,
Notes: "Antrag eingereicht",
})
c.JSON(http.StatusOK, app)
}
// ============================================================================
// Export Endpoints
// ============================================================================
// ExportApplication exports all documents as ZIP
// GET /sdk/v1/funding/applications/:id/export
func (h *FundingHandlers) ExportApplication(c *gin.Context) {
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"})
return
}
app, err := h.store.GetApplication(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
return
}
// Generate export (this will be implemented in export.go)
// For now, return a placeholder response
c.JSON(http.StatusOK, gin.H{
"message": "Export generation initiated",
"application_id": app.ID,
"status": "processing",
})
}
// PreviewApplication generates a PDF preview
// GET /sdk/v1/funding/applications/:id/preview
func (h *FundingHandlers) PreviewApplication(c *gin.Context) {
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"})
return
}
app, err := h.store.GetApplication(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
return
}
// Generate PDF preview (placeholder)
c.JSON(http.StatusOK, gin.H{
"message": "Preview generation initiated",
"application_id": app.ID,
})
}
// ============================================================================
// Bundesland Profile Endpoints
// ============================================================================
// GetBundeslandProfiles returns all bundesland profiles
// GET /sdk/v1/funding/bundeslaender
func (h *FundingHandlers) GetBundeslandProfiles(c *gin.Context) {
if h.bundeslandProfiles == nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "bundesland profiles not loaded"})
return
}
c.JSON(http.StatusOK, h.bundeslandProfiles)
}
// GetBundeslandProfile returns a specific bundesland profile
// GET /sdk/v1/funding/bundeslaender/:state
func (h *FundingHandlers) GetBundeslandProfile(c *gin.Context) {
state := c.Param("state")
if h.bundeslandProfiles == nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "bundesland profiles not loaded"})
return
}
profile, ok := h.bundeslandProfiles[state]
if !ok {
c.JSON(http.StatusNotFound, gin.H{"error": "bundesland not found"})
return
}
c.JSON(http.StatusOK, profile)
}
// ============================================================================
// Statistics Endpoint
// ============================================================================
// GetStatistics returns funding statistics
// GET /sdk/v1/funding/statistics
func (h *FundingHandlers) GetStatistics(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
if tenantID == uuid.Nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"})
return
}
stats, err := h.store.GetStatistics(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, stats)
}
// ============================================================================
// History Endpoint
// ============================================================================
// GetApplicationHistory returns the audit trail
// GET /sdk/v1/funding/applications/:id/history
func (h *FundingHandlers) GetApplicationHistory(c *gin.Context) {
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"})
return
}
history, err := h.store.GetHistory(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, history)
}

View File

@@ -1,188 +0,0 @@
package handlers
import (
"net/http"
"github.com/breakpilot/ai-compliance-sdk/internal/gci"
"github.com/breakpilot/ai-compliance-sdk/internal/rbac"
"github.com/gin-gonic/gin"
)
type GCIHandlers struct {
engine *gci.Engine
}
func NewGCIHandlers(engine *gci.Engine) *GCIHandlers {
return &GCIHandlers{engine: engine}
}
// GetScore returns the GCI score for the current tenant
// GET /sdk/v1/gci/score
func (h *GCIHandlers) GetScore(c *gin.Context) {
tenantID := rbac.GetTenantID(c).String()
profile := c.DefaultQuery("profile", "default")
result := h.engine.Calculate(tenantID, profile)
c.JSON(http.StatusOK, result)
}
// GetScoreBreakdown returns the detailed 4-level GCI breakdown
// GET /sdk/v1/gci/score/breakdown
func (h *GCIHandlers) GetScoreBreakdown(c *gin.Context) {
tenantID := rbac.GetTenantID(c).String()
profile := c.DefaultQuery("profile", "default")
breakdown := h.engine.CalculateBreakdown(tenantID, profile)
c.JSON(http.StatusOK, breakdown)
}
// GetHistory returns historical GCI snapshots for trend analysis
// GET /sdk/v1/gci/score/history
func (h *GCIHandlers) GetHistory(c *gin.Context) {
tenantID := rbac.GetTenantID(c).String()
history := h.engine.GetHistory(tenantID)
c.JSON(http.StatusOK, gin.H{
"tenant_id": tenantID,
"snapshots": history,
"total": len(history),
})
}
// GetMatrix returns the compliance matrix (roles x regulations)
// GET /sdk/v1/gci/matrix
func (h *GCIHandlers) GetMatrix(c *gin.Context) {
tenantID := rbac.GetTenantID(c).String()
matrix := h.engine.GetMatrix(tenantID)
c.JSON(http.StatusOK, gin.H{
"tenant_id": tenantID,
"matrix": matrix,
})
}
// GetAuditTrail returns the audit trail for the latest GCI calculation
// GET /sdk/v1/gci/audit-trail
func (h *GCIHandlers) GetAuditTrail(c *gin.Context) {
tenantID := rbac.GetTenantID(c).String()
profile := c.DefaultQuery("profile", "default")
result := h.engine.Calculate(tenantID, profile)
c.JSON(http.StatusOK, gin.H{
"tenant_id": tenantID,
"gci_score": result.GCIScore,
"audit_trail": result.AuditTrail,
})
}
// GetNIS2Score returns the NIS2-specific compliance score
// GET /sdk/v1/gci/nis2/score
func (h *GCIHandlers) GetNIS2Score(c *gin.Context) {
tenantID := rbac.GetTenantID(c).String()
score := gci.CalculateNIS2Score(tenantID)
c.JSON(http.StatusOK, score)
}
// ListNIS2Roles returns available NIS2 responsibility roles
// GET /sdk/v1/gci/nis2/roles
func (h *GCIHandlers) ListNIS2Roles(c *gin.Context) {
roles := gci.ListNIS2Roles()
c.JSON(http.StatusOK, gin.H{
"roles": roles,
"total": len(roles),
})
}
// AssignNIS2Role assigns a NIS2 role to a user (stub - returns mock)
// POST /sdk/v1/gci/nis2/roles/assign
func (h *GCIHandlers) AssignNIS2Role(c *gin.Context) {
var req struct {
RoleID string `json:"role_id" binding:"required"`
UserID string `json:"user_id" binding:"required"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
role, found := gci.GetNIS2Role(req.RoleID)
if !found {
c.JSON(http.StatusNotFound, gin.H{"error": "NIS2 role not found"})
return
}
c.JSON(http.StatusOK, gin.H{
"status": "assigned",
"role": role,
"user_id": req.UserID,
})
}
// GetISOGapAnalysis returns the ISO 27001 gap analysis
// GET /sdk/v1/gci/iso/gap-analysis
func (h *GCIHandlers) GetISOGapAnalysis(c *gin.Context) {
tenantID := rbac.GetTenantID(c).String()
analysis := gci.CalculateISOGapAnalysis(tenantID)
c.JSON(http.StatusOK, analysis)
}
// ListISOMappings returns all ISO 27001 control mappings
// GET /sdk/v1/gci/iso/mappings
func (h *GCIHandlers) ListISOMappings(c *gin.Context) {
category := c.Query("category")
if category != "" {
controls := gci.GetISOControlsByCategory(category)
c.JSON(http.StatusOK, gin.H{
"controls": controls,
"total": len(controls),
"category": category,
})
return
}
categories := []string{"A.5", "A.6", "A.7", "A.8"}
result := make(map[string][]gci.ISOControl)
total := 0
for _, cat := range categories {
controls := gci.GetISOControlsByCategory(cat)
if len(controls) > 0 {
result[cat] = controls
total += len(controls)
}
}
c.JSON(http.StatusOK, gin.H{
"categories": result,
"total": total,
})
}
// GetISOMapping returns a single ISO control by ID
// GET /sdk/v1/gci/iso/mappings/:controlId
func (h *GCIHandlers) GetISOMapping(c *gin.Context) {
controlID := c.Param("controlId")
control, found := gci.GetISOControlByID(controlID)
if !found {
c.JSON(http.StatusNotFound, gin.H{"error": "ISO control not found"})
return
}
c.JSON(http.StatusOK, control)
}
// GetWeightProfiles returns available weighting profiles
// GET /sdk/v1/gci/profiles
func (h *GCIHandlers) GetWeightProfiles(c *gin.Context) {
profiles := []string{"default", "nis2_relevant", "ki_nutzer"}
result := make([]gci.WeightProfile, 0, len(profiles))
for _, id := range profiles {
result = append(result, gci.GetProfile(id))
}
c.JSON(http.StatusOK, gin.H{
"profiles": result,
})
}

View File

@@ -1,115 +0,0 @@
package handlers
import (
"net/http"
"github.com/breakpilot/ai-compliance-sdk/internal/industry"
"github.com/gin-gonic/gin"
)
// IndustryHandlers handles industry-specific compliance template requests.
// All data is static (embedded Go structs), so no store/database is needed.
type IndustryHandlers struct{}
// NewIndustryHandlers creates new industry handlers
func NewIndustryHandlers() *IndustryHandlers {
return &IndustryHandlers{}
}
// ============================================================================
// Industry Template Endpoints
// ============================================================================
// ListIndustries returns a summary list of all available industry templates.
// GET /sdk/v1/industries
func (h *IndustryHandlers) ListIndustries(c *gin.Context) {
templates := industry.GetAllTemplates()
summaries := make([]industry.IndustrySummary, 0, len(templates))
for _, t := range templates {
summaries = append(summaries, industry.IndustrySummary{
Slug: t.Slug,
Name: t.Name,
Description: t.Description,
Icon: t.Icon,
RegulationCount: len(t.Regulations),
TemplateCount: len(t.VVTTemplates),
})
}
c.JSON(http.StatusOK, industry.IndustryListResponse{
Industries: summaries,
Total: len(summaries),
})
}
// GetIndustry returns the full industry template for a given slug.
// GET /sdk/v1/industries/:slug
func (h *IndustryHandlers) GetIndustry(c *gin.Context) {
slug := c.Param("slug")
tmpl := industry.GetTemplateBySlug(slug)
if tmpl == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "industry template not found", "slug": slug})
return
}
c.JSON(http.StatusOK, tmpl)
}
// GetVVTTemplates returns only the VVT templates for a given industry.
// GET /sdk/v1/industries/:slug/vvt-templates
func (h *IndustryHandlers) GetVVTTemplates(c *gin.Context) {
slug := c.Param("slug")
tmpl := industry.GetTemplateBySlug(slug)
if tmpl == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "industry template not found", "slug": slug})
return
}
c.JSON(http.StatusOK, gin.H{
"slug": tmpl.Slug,
"industry": tmpl.Name,
"vvt_templates": tmpl.VVTTemplates,
"total": len(tmpl.VVTTemplates),
})
}
// GetTOMRecommendations returns only the TOM recommendations for a given industry.
// GET /sdk/v1/industries/:slug/tom-recommendations
func (h *IndustryHandlers) GetTOMRecommendations(c *gin.Context) {
slug := c.Param("slug")
tmpl := industry.GetTemplateBySlug(slug)
if tmpl == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "industry template not found", "slug": slug})
return
}
c.JSON(http.StatusOK, gin.H{
"slug": tmpl.Slug,
"industry": tmpl.Name,
"tom_recommendations": tmpl.TOMRecommendations,
"total": len(tmpl.TOMRecommendations),
})
}
// GetRiskScenarios returns only the risk scenarios for a given industry.
// GET /sdk/v1/industries/:slug/risk-scenarios
func (h *IndustryHandlers) GetRiskScenarios(c *gin.Context) {
slug := c.Param("slug")
tmpl := industry.GetTemplateBySlug(slug)
if tmpl == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "industry template not found", "slug": slug})
return
}
c.JSON(http.StatusOK, gin.H{
"slug": tmpl.Slug,
"industry": tmpl.Name,
"risk_scenarios": tmpl.RiskScenarios,
"total": len(tmpl.RiskScenarios),
})
}

View File

@@ -1,268 +0,0 @@
package handlers
import (
"net/http"
"github.com/breakpilot/ai-compliance-sdk/internal/multitenant"
"github.com/breakpilot/ai-compliance-sdk/internal/rbac"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
// MultiTenantHandlers handles multi-tenant administration endpoints.
type MultiTenantHandlers struct {
store *multitenant.Store
rbacStore *rbac.Store
}
// NewMultiTenantHandlers creates new multi-tenant handlers.
func NewMultiTenantHandlers(store *multitenant.Store, rbacStore *rbac.Store) *MultiTenantHandlers {
return &MultiTenantHandlers{
store: store,
rbacStore: rbacStore,
}
}
// GetOverview returns all tenants with compliance scores and module highlights.
// GET /sdk/v1/multi-tenant/overview
func (h *MultiTenantHandlers) GetOverview(c *gin.Context) {
overview, err := h.store.GetOverview(c.Request.Context())
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, overview)
}
// GetTenantDetail returns detailed compliance info for one tenant.
// GET /sdk/v1/multi-tenant/tenants/:id
func (h *MultiTenantHandlers) GetTenantDetail(c *gin.Context) {
id, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"})
return
}
detail, err := h.store.GetTenantDetail(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "tenant not found"})
return
}
c.JSON(http.StatusOK, detail)
}
// CreateTenant creates a new tenant with default setup.
// It creates the tenant via the RBAC store and then creates a default "main" namespace.
// POST /sdk/v1/multi-tenant/tenants
func (h *MultiTenantHandlers) CreateTenant(c *gin.Context) {
var req multitenant.CreateTenantRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// Build the tenant from the request
tenant := &rbac.Tenant{
Name: req.Name,
Slug: req.Slug,
MaxUsers: req.MaxUsers,
LLMQuotaMonthly: req.LLMQuotaMonthly,
}
// Create tenant via RBAC store (assigns ID, timestamps, defaults)
if err := h.rbacStore.CreateTenant(c.Request.Context(), tenant); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Create default "main" namespace for the new tenant
defaultNamespace := &rbac.Namespace{
TenantID: tenant.ID,
Name: "Main",
Slug: "main",
}
if err := h.rbacStore.CreateNamespace(c.Request.Context(), defaultNamespace); err != nil {
// Tenant was created successfully but namespace creation failed.
// Log and continue -- the tenant is still usable.
c.JSON(http.StatusCreated, gin.H{
"tenant": tenant,
"warning": "tenant created but default namespace creation failed: " + err.Error(),
})
return
}
c.JSON(http.StatusCreated, gin.H{
"tenant": tenant,
"namespace": defaultNamespace,
})
}
// UpdateTenant performs a partial update of tenant settings.
// Only non-nil fields in the request body are applied.
// PUT /sdk/v1/multi-tenant/tenants/:id
func (h *MultiTenantHandlers) UpdateTenant(c *gin.Context) {
id, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"})
return
}
var req multitenant.UpdateTenantRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// Fetch the existing tenant so we can apply partial updates
tenant, err := h.rbacStore.GetTenant(c.Request.Context(), id)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "tenant not found"})
return
}
// Apply only the fields that were provided
if req.Name != nil {
tenant.Name = *req.Name
}
if req.MaxUsers != nil {
tenant.MaxUsers = *req.MaxUsers
}
if req.LLMQuotaMonthly != nil {
tenant.LLMQuotaMonthly = *req.LLMQuotaMonthly
}
if req.Status != nil {
tenant.Status = rbac.TenantStatus(*req.Status)
}
if err := h.rbacStore.UpdateTenant(c.Request.Context(), tenant); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, tenant)
}
// ListNamespaces returns all namespaces for a specific tenant.
// GET /sdk/v1/multi-tenant/tenants/:id/namespaces
func (h *MultiTenantHandlers) ListNamespaces(c *gin.Context) {
tenantID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"})
return
}
namespaces, err := h.rbacStore.ListNamespaces(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"namespaces": namespaces,
"total": len(namespaces),
})
}
// CreateNamespace creates a new namespace within a tenant.
// POST /sdk/v1/multi-tenant/tenants/:id/namespaces
func (h *MultiTenantHandlers) CreateNamespace(c *gin.Context) {
tenantID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"})
return
}
// Verify the tenant exists
_, err = h.rbacStore.GetTenant(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "tenant not found"})
return
}
var req multitenant.CreateNamespaceRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
namespace := &rbac.Namespace{
TenantID: tenantID,
Name: req.Name,
Slug: req.Slug,
}
// Apply optional fields if provided
if req.IsolationLevel != "" {
namespace.IsolationLevel = rbac.IsolationLevel(req.IsolationLevel)
}
if req.DataClassification != "" {
namespace.DataClassification = rbac.DataClassification(req.DataClassification)
}
if err := h.rbacStore.CreateNamespace(c.Request.Context(), namespace); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusCreated, namespace)
}
// SwitchTenant returns the tenant info needed for the frontend to switch context.
// The caller provides a tenant_id and receives back the tenant details needed
// to update the frontend's active tenant state.
// POST /sdk/v1/multi-tenant/switch
func (h *MultiTenantHandlers) SwitchTenant(c *gin.Context) {
var req multitenant.SwitchTenantRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
tenantID, err := uuid.Parse(req.TenantID)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"})
return
}
tenant, err := h.rbacStore.GetTenant(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "tenant not found"})
return
}
// Verify the tenant is active
if tenant.Status != rbac.TenantStatusActive {
c.JSON(http.StatusForbidden, gin.H{
"error": "tenant not active",
"status": string(tenant.Status),
})
return
}
// Get namespaces for the tenant so the frontend can populate namespace selectors
namespaces, err := h.rbacStore.ListNamespaces(c.Request.Context(), tenantID)
if err != nil {
// Non-fatal: return tenant info without namespaces
c.JSON(http.StatusOK, gin.H{
"tenant": multitenant.SwitchTenantResponse{
TenantID: tenant.ID,
TenantName: tenant.Name,
TenantSlug: tenant.Slug,
Status: string(tenant.Status),
},
})
return
}
c.JSON(http.StatusOK, gin.H{
"tenant": multitenant.SwitchTenantResponse{
TenantID: tenant.ID,
TenantName: tenant.Name,
TenantSlug: tenant.Slug,
Status: string(tenant.Status),
},
"namespaces": namespaces,
})
}

View File

@@ -1,97 +0,0 @@
package handlers
import (
"net/http"
"github.com/breakpilot/ai-compliance-sdk/internal/rbac"
"github.com/breakpilot/ai-compliance-sdk/internal/reporting"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
type ReportingHandlers struct {
store *reporting.Store
}
func NewReportingHandlers(store *reporting.Store) *ReportingHandlers {
return &ReportingHandlers{store: store}
}
// GetExecutiveReport generates a comprehensive compliance report
// GET /sdk/v1/reporting/executive
func (h *ReportingHandlers) GetExecutiveReport(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
if tenantID == uuid.Nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"})
return
}
report, err := h.store.GenerateReport(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, report)
}
// GetComplianceScore returns just the overall compliance score
// GET /sdk/v1/reporting/score
func (h *ReportingHandlers) GetComplianceScore(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
if tenantID == uuid.Nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"})
return
}
report, err := h.store.GenerateReport(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"compliance_score": report.ComplianceScore,
"risk_level": report.RiskOverview.OverallLevel,
"generated_at": report.GeneratedAt,
})
}
// GetUpcomingDeadlines returns deadlines across all modules
// GET /sdk/v1/reporting/deadlines
func (h *ReportingHandlers) GetUpcomingDeadlines(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
if tenantID == uuid.Nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"})
return
}
report, err := h.store.GenerateReport(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"deadlines": report.UpcomingDeadlines,
"total": len(report.UpcomingDeadlines),
})
}
// GetRiskOverview returns the aggregated risk assessment
// GET /sdk/v1/reporting/risks
func (h *ReportingHandlers) GetRiskOverview(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
if tenantID == uuid.Nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"})
return
}
report, err := h.store.GenerateReport(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, report.RiskOverview)
}

View File

@@ -1,631 +0,0 @@
package handlers
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/breakpilot/ai-compliance-sdk/internal/rbac"
"github.com/breakpilot/ai-compliance-sdk/internal/sso"
"github.com/gin-gonic/gin"
"github.com/golang-jwt/jwt/v5"
"github.com/google/uuid"
)
// SSOHandlers handles SSO-related HTTP requests.
type SSOHandlers struct {
store *sso.Store
jwtSecret string
}
// NewSSOHandlers creates new SSO handlers.
func NewSSOHandlers(store *sso.Store, jwtSecret string) *SSOHandlers {
return &SSOHandlers{store: store, jwtSecret: jwtSecret}
}
// ============================================================================
// SSO Configuration CRUD
// ============================================================================
// CreateConfig creates a new SSO configuration for the tenant.
// POST /sdk/v1/sso/configs
func (h *SSOHandlers) CreateConfig(c *gin.Context) {
var req sso.CreateSSOConfigRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
tenantID := rbac.GetTenantID(c)
cfg, err := h.store.CreateConfig(c.Request.Context(), tenantID, &req)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusCreated, gin.H{"config": cfg})
}
// ListConfigs lists all SSO configurations for the tenant.
// GET /sdk/v1/sso/configs
func (h *SSOHandlers) ListConfigs(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
configs, err := h.store.ListConfigs(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"configs": configs,
"total": len(configs),
})
}
// GetConfig retrieves an SSO configuration by ID.
// GET /sdk/v1/sso/configs/:id
func (h *SSOHandlers) GetConfig(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
configID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid config ID"})
return
}
cfg, err := h.store.GetConfig(c.Request.Context(), tenantID, configID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if cfg == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "sso configuration not found"})
return
}
c.JSON(http.StatusOK, gin.H{"config": cfg})
}
// UpdateConfig updates an SSO configuration.
// PUT /sdk/v1/sso/configs/:id
func (h *SSOHandlers) UpdateConfig(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
configID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid config ID"})
return
}
var req sso.UpdateSSOConfigRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
cfg, err := h.store.UpdateConfig(c.Request.Context(), tenantID, configID, &req)
if err != nil {
if err.Error() == "sso configuration not found" {
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"config": cfg})
}
// DeleteConfig deletes an SSO configuration.
// DELETE /sdk/v1/sso/configs/:id
func (h *SSOHandlers) DeleteConfig(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
configID, err := uuid.Parse(c.Param("id"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid config ID"})
return
}
if err := h.store.DeleteConfig(c.Request.Context(), tenantID, configID); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"message": "sso configuration deleted"})
}
// ============================================================================
// SSO Users
// ============================================================================
// ListUsers lists all SSO-provisioned users for the tenant.
// GET /sdk/v1/sso/users
func (h *SSOHandlers) ListUsers(c *gin.Context) {
tenantID := rbac.GetTenantID(c)
users, err := h.store.ListUsers(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"users": users,
"total": len(users),
})
}
// ============================================================================
// OIDC Flow
// ============================================================================
// InitiateOIDCLogin initiates the OIDC authorization code flow.
// It looks up the enabled SSO config for the tenant, builds the authorization
// URL, sets a state cookie, and redirects the user to the IdP.
// GET /sdk/v1/sso/oidc/login
func (h *SSOHandlers) InitiateOIDCLogin(c *gin.Context) {
// Resolve tenant ID from query param
tenantIDStr := c.Query("tenant_id")
if tenantIDStr == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "tenant_id query parameter is required"})
return
}
tenantID, err := uuid.Parse(tenantIDStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant_id"})
return
}
// Look up the enabled SSO config
cfg, err := h.store.GetEnabledConfig(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if cfg == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "no enabled SSO configuration found for this tenant"})
return
}
if cfg.ProviderType != sso.ProviderTypeOIDC {
c.JSON(http.StatusBadRequest, gin.H{"error": "SSO configuration is not OIDC"})
return
}
// Discover the authorization endpoint
discoveryURL := strings.TrimSuffix(cfg.OIDCIssuerURL, "/") + "/.well-known/openid-configuration"
authEndpoint, _, _, err := discoverOIDCEndpoints(discoveryURL)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("OIDC discovery failed: %v", err)})
return
}
// Generate state parameter (random bytes + tenant_id for correlation)
stateBytes := make([]byte, 32)
if _, err := rand.Read(stateBytes); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate state"})
return
}
state := base64.URLEncoding.EncodeToString(stateBytes) + "." + tenantID.String()
// Generate nonce
nonceBytes := make([]byte, 16)
if _, err := rand.Read(nonceBytes); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate nonce"})
return
}
nonce := base64.URLEncoding.EncodeToString(nonceBytes)
// Build authorization URL
scopes := cfg.OIDCScopes
if len(scopes) == 0 {
scopes = []string{"openid", "profile", "email"}
}
params := url.Values{
"client_id": {cfg.OIDCClientID},
"redirect_uri": {cfg.OIDCRedirectURI},
"response_type": {"code"},
"scope": {strings.Join(scopes, " ")},
"state": {state},
"nonce": {nonce},
}
authURL := authEndpoint + "?" + params.Encode()
// Set state cookie for CSRF protection (HttpOnly, 10 min expiry)
c.SetCookie("sso_state", state, 600, "/", "", true, true)
c.SetCookie("sso_nonce", nonce, 600, "/", "", true, true)
c.Redirect(http.StatusFound, authURL)
}
// HandleOIDCCallback handles the OIDC authorization code callback from the IdP.
// It validates the state, exchanges the code for tokens, extracts user info,
// performs JIT user provisioning, and issues a JWT.
// GET /sdk/v1/sso/oidc/callback
func (h *SSOHandlers) HandleOIDCCallback(c *gin.Context) {
// Check for errors from the IdP
if errParam := c.Query("error"); errParam != "" {
errDesc := c.Query("error_description")
c.JSON(http.StatusBadRequest, gin.H{
"error": errParam,
"description": errDesc,
})
return
}
code := c.Query("code")
stateParam := c.Query("state")
if code == "" || stateParam == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "missing code or state parameter"})
return
}
// Validate state cookie
stateCookie, err := c.Cookie("sso_state")
if err != nil || stateCookie != stateParam {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid state parameter (CSRF check failed)"})
return
}
// Extract tenant ID from state
parts := strings.SplitN(stateParam, ".", 2)
if len(parts) != 2 {
c.JSON(http.StatusBadRequest, gin.H{"error": "malformed state parameter"})
return
}
tenantID, err := uuid.Parse(parts[1])
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant_id in state"})
return
}
// Look up the enabled SSO config
cfg, err := h.store.GetEnabledConfig(c.Request.Context(), tenantID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if cfg == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "no enabled SSO configuration found"})
return
}
// Discover OIDC endpoints
discoveryURL := strings.TrimSuffix(cfg.OIDCIssuerURL, "/") + "/.well-known/openid-configuration"
_, tokenEndpoint, userInfoEndpoint, err := discoverOIDCEndpoints(discoveryURL)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("OIDC discovery failed: %v", err)})
return
}
// Exchange authorization code for tokens
tokenResp, err := exchangeCodeForTokens(tokenEndpoint, code, cfg.OIDCClientID, cfg.OIDCClientSecret, cfg.OIDCRedirectURI)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("token exchange failed: %v", err)})
return
}
// Extract user claims from ID token or UserInfo endpoint
claims, err := extractUserClaims(tokenResp, userInfoEndpoint)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to extract user claims: %v", err)})
return
}
sub := getStringClaim(claims, "sub")
email := getStringClaim(claims, "email")
name := getStringClaim(claims, "name")
groups := getStringSliceClaim(claims, "groups")
if sub == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "ID token missing 'sub' claim"})
return
}
if email == "" {
email = sub
}
if name == "" {
name = email
}
// JIT provision the user
user, err := h.store.UpsertUser(c.Request.Context(), tenantID, cfg.ID, sub, email, name, groups)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("user provisioning failed: %v", err)})
return
}
// Determine roles from role mapping
roles := resolveRoles(cfg, groups)
// Generate JWT
ssoClaims := sso.SSOClaims{
UserID: user.ID,
TenantID: tenantID,
Email: user.Email,
DisplayName: user.DisplayName,
Roles: roles,
SSOConfigID: cfg.ID,
}
jwtToken, err := h.generateJWT(ssoClaims)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("JWT generation failed: %v", err)})
return
}
// Clear state cookies
c.SetCookie("sso_state", "", -1, "/", "", true, true)
c.SetCookie("sso_nonce", "", -1, "/", "", true, true)
// Return JWT as JSON (the frontend can also handle redirect)
c.JSON(http.StatusOK, gin.H{
"token": jwtToken,
"user": user,
"roles": roles,
})
}
// ============================================================================
// JWT Generation
// ============================================================================
// generateJWT creates a signed JWT token containing the SSO claims.
func (h *SSOHandlers) generateJWT(claims sso.SSOClaims) (string, error) {
now := time.Now().UTC()
expiry := now.Add(24 * time.Hour)
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
"user_id": claims.UserID.String(),
"tenant_id": claims.TenantID.String(),
"email": claims.Email,
"display_name": claims.DisplayName,
"roles": claims.Roles,
"sso_config_id": claims.SSOConfigID.String(),
"iss": "ai-compliance-sdk",
"iat": now.Unix(),
"exp": expiry.Unix(),
})
tokenString, err := token.SignedString([]byte(h.jwtSecret))
if err != nil {
return "", fmt.Errorf("failed to sign JWT: %w", err)
}
return tokenString, nil
}
// ============================================================================
// OIDC Discovery & Token Exchange (manual HTTP, no external OIDC library)
// ============================================================================
// oidcDiscoveryResponse holds the relevant fields from the OIDC discovery document.
type oidcDiscoveryResponse struct {
AuthorizationEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
UserinfoEndpoint string `json:"userinfo_endpoint"`
JwksURI string `json:"jwks_uri"`
Issuer string `json:"issuer"`
}
// discoverOIDCEndpoints fetches the OIDC discovery document and returns
// the authorization, token, and userinfo endpoints.
func discoverOIDCEndpoints(discoveryURL string) (authEndpoint, tokenEndpoint, userInfoEndpoint string, err error) {
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Get(discoveryURL)
if err != nil {
return "", "", "", fmt.Errorf("failed to fetch discovery document: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", "", "", fmt.Errorf("discovery endpoint returned %d: %s", resp.StatusCode, string(body))
}
var discovery oidcDiscoveryResponse
if err := json.NewDecoder(resp.Body).Decode(&discovery); err != nil {
return "", "", "", fmt.Errorf("failed to decode discovery document: %w", err)
}
if discovery.AuthorizationEndpoint == "" {
return "", "", "", fmt.Errorf("discovery document missing authorization_endpoint")
}
if discovery.TokenEndpoint == "" {
return "", "", "", fmt.Errorf("discovery document missing token_endpoint")
}
return discovery.AuthorizationEndpoint, discovery.TokenEndpoint, discovery.UserinfoEndpoint, nil
}
// oidcTokenResponse holds the response from the OIDC token endpoint.
type oidcTokenResponse struct {
AccessToken string `json:"access_token"`
IDToken string `json:"id_token"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
RefreshToken string `json:"refresh_token,omitempty"`
}
// exchangeCodeForTokens exchanges an authorization code for tokens at the token endpoint.
func exchangeCodeForTokens(tokenEndpoint, code, clientID, clientSecret, redirectURI string) (*oidcTokenResponse, error) {
client := &http.Client{Timeout: 10 * time.Second}
data := url.Values{
"grant_type": {"authorization_code"},
"code": {code},
"client_id": {clientID},
"redirect_uri": {redirectURI},
}
req, err := http.NewRequest("POST", tokenEndpoint, strings.NewReader(data.Encode()))
if err != nil {
return nil, fmt.Errorf("failed to create token request: %w", err)
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// Use client_secret_basic if provided
if clientSecret != "" {
req.SetBasicAuth(clientID, clientSecret)
}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("token request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("token endpoint returned %d: %s", resp.StatusCode, string(body))
}
var tokenResp oidcTokenResponse
if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil {
return nil, fmt.Errorf("failed to decode token response: %w", err)
}
return &tokenResp, nil
}
// extractUserClaims extracts user claims from the ID token payload.
// If the ID token is unavailable or incomplete, it falls back to the UserInfo endpoint.
func extractUserClaims(tokenResp *oidcTokenResponse, userInfoEndpoint string) (map[string]interface{}, error) {
claims := make(map[string]interface{})
// Try to decode ID token payload (without signature verification for claims extraction;
// in production, you should verify the signature using the JWKS endpoint)
if tokenResp.IDToken != "" {
parts := strings.Split(tokenResp.IDToken, ".")
if len(parts) == 3 {
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err == nil {
if err := json.Unmarshal(payload, &claims); err == nil && claims["sub"] != nil {
return claims, nil
}
}
}
}
// Fallback to UserInfo endpoint
if userInfoEndpoint != "" && tokenResp.AccessToken != "" {
userClaims, err := fetchUserInfo(userInfoEndpoint, tokenResp.AccessToken)
if err == nil && userClaims["sub"] != nil {
return userClaims, nil
}
}
if claims["sub"] != nil {
return claims, nil
}
return nil, fmt.Errorf("could not extract user claims from ID token or UserInfo endpoint")
}
// fetchUserInfo calls the OIDC UserInfo endpoint with the access token.
func fetchUserInfo(userInfoEndpoint, accessToken string) (map[string]interface{}, error) {
client := &http.Client{Timeout: 10 * time.Second}
req, err := http.NewRequest("GET", userInfoEndpoint, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+accessToken)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("userinfo endpoint returned %d", resp.StatusCode)
}
var claims map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&claims); err != nil {
return nil, err
}
return claims, nil
}
// ============================================================================
// Claim Extraction Helpers
// ============================================================================
// getStringClaim extracts a string claim from a claims map.
func getStringClaim(claims map[string]interface{}, key string) string {
if v, ok := claims[key]; ok {
if s, ok := v.(string); ok {
return s
}
}
return ""
}
// getStringSliceClaim extracts a string slice claim from a claims map.
func getStringSliceClaim(claims map[string]interface{}, key string) []string {
v, ok := claims[key]
if !ok {
return nil
}
switch val := v.(type) {
case []interface{}:
result := make([]string, 0, len(val))
for _, item := range val {
if s, ok := item.(string); ok {
result = append(result, s)
}
}
return result
case []string:
return val
default:
return nil
}
}
// resolveRoles maps SSO groups to internal roles using the config's role mapping.
// If no groups match, the default role is returned.
func resolveRoles(cfg *sso.SSOConfig, groups []string) []string {
if cfg.RoleMapping == nil || len(cfg.RoleMapping) == 0 {
if cfg.DefaultRoleID != nil {
return []string{cfg.DefaultRoleID.String()}
}
return []string{"compliance_user"}
}
roleSet := make(map[string]bool)
for _, group := range groups {
if role, ok := cfg.RoleMapping[group]; ok {
roleSet[role] = true
}
}
if len(roleSet) == 0 {
if cfg.DefaultRoleID != nil {
return []string{cfg.DefaultRoleID.String()}
}
return []string{"compliance_user"}
}
roles := make([]string, 0, len(roleSet))
for role := range roleSet {
roles = append(roles, role)
}
return roles
}

View File

@@ -1,164 +0,0 @@
package dsb
import (
"time"
"github.com/google/uuid"
)
// ============================================================================
// Core Models
// ============================================================================
// Assignment represents a DSB-to-tenant assignment.
type Assignment struct {
ID uuid.UUID `json:"id"`
DSBUserID uuid.UUID `json:"dsb_user_id"`
TenantID uuid.UUID `json:"tenant_id"`
TenantName string `json:"tenant_name"` // populated via JOIN
TenantSlug string `json:"tenant_slug"` // populated via JOIN
Status string `json:"status"` // active, paused, terminated
ContractStart time.Time `json:"contract_start"`
ContractEnd *time.Time `json:"contract_end,omitempty"`
MonthlyHoursBudget float64 `json:"monthly_hours_budget"`
Notes string `json:"notes"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// HourEntry represents a DSB time tracking entry.
type HourEntry struct {
ID uuid.UUID `json:"id"`
AssignmentID uuid.UUID `json:"assignment_id"`
Date time.Time `json:"date"`
Hours float64 `json:"hours"`
Category string `json:"category"` // dsfa_review, consultation, audit, training, incident_response, documentation, meeting, other
Description string `json:"description"`
Billable bool `json:"billable"`
CreatedAt time.Time `json:"created_at"`
}
// Task represents a DSB task/work item.
type Task struct {
ID uuid.UUID `json:"id"`
AssignmentID uuid.UUID `json:"assignment_id"`
Title string `json:"title"`
Description string `json:"description"`
Category string `json:"category"` // dsfa_review, dsr_response, incident_review, audit_preparation, policy_review, training, consultation, other
Priority string `json:"priority"` // low, medium, high, urgent
Status string `json:"status"` // open, in_progress, waiting, completed, cancelled
DueDate *time.Time `json:"due_date,omitempty"`
CompletedAt *time.Time `json:"completed_at,omitempty"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// Communication represents a DSB communication log entry.
type Communication struct {
ID uuid.UUID `json:"id"`
AssignmentID uuid.UUID `json:"assignment_id"`
Direction string `json:"direction"` // inbound, outbound
Channel string `json:"channel"` // email, phone, meeting, portal, letter
Subject string `json:"subject"`
Content string `json:"content"`
Participants string `json:"participants"`
CreatedAt time.Time `json:"created_at"`
}
// ============================================================================
// Dashboard Models
// ============================================================================
// DSBDashboard provides the aggregated overview for a DSB user.
type DSBDashboard struct {
Assignments []AssignmentOverview `json:"assignments"`
TotalAssignments int `json:"total_assignments"`
ActiveAssignments int `json:"active_assignments"`
TotalHoursThisMonth float64 `json:"total_hours_this_month"`
OpenTasks int `json:"open_tasks"`
UrgentTasks int `json:"urgent_tasks"`
GeneratedAt time.Time `json:"generated_at"`
}
// AssignmentOverview enriches an Assignment with aggregated metrics.
type AssignmentOverview struct {
Assignment
ComplianceScore int `json:"compliance_score"`
HoursThisMonth float64 `json:"hours_this_month"`
HoursBudget float64 `json:"hours_budget"`
OpenTaskCount int `json:"open_task_count"`
UrgentTaskCount int `json:"urgent_task_count"`
NextDeadline *time.Time `json:"next_deadline,omitempty"`
}
// ============================================================================
// Request Models
// ============================================================================
// CreateAssignmentRequest is the request body for creating an assignment.
type CreateAssignmentRequest struct {
DSBUserID uuid.UUID `json:"dsb_user_id" binding:"required"`
TenantID uuid.UUID `json:"tenant_id" binding:"required"`
Status string `json:"status"`
ContractStart time.Time `json:"contract_start" binding:"required"`
ContractEnd *time.Time `json:"contract_end,omitempty"`
MonthlyHoursBudget float64 `json:"monthly_hours_budget"`
Notes string `json:"notes"`
}
// UpdateAssignmentRequest is the request body for updating an assignment.
type UpdateAssignmentRequest struct {
Status *string `json:"status,omitempty"`
ContractEnd *time.Time `json:"contract_end,omitempty"`
MonthlyHoursBudget *float64 `json:"monthly_hours_budget,omitempty"`
Notes *string `json:"notes,omitempty"`
}
// CreateHourEntryRequest is the request body for creating a time entry.
type CreateHourEntryRequest struct {
Date time.Time `json:"date" binding:"required"`
Hours float64 `json:"hours" binding:"required"`
Category string `json:"category" binding:"required"`
Description string `json:"description" binding:"required"`
Billable *bool `json:"billable,omitempty"`
}
// CreateTaskRequest is the request body for creating a task.
type CreateTaskRequest struct {
Title string `json:"title" binding:"required"`
Description string `json:"description"`
Category string `json:"category" binding:"required"`
Priority string `json:"priority"`
DueDate *time.Time `json:"due_date,omitempty"`
}
// UpdateTaskRequest is the request body for updating a task.
type UpdateTaskRequest struct {
Title *string `json:"title,omitempty"`
Description *string `json:"description,omitempty"`
Category *string `json:"category,omitempty"`
Priority *string `json:"priority,omitempty"`
Status *string `json:"status,omitempty"`
DueDate *time.Time `json:"due_date,omitempty"`
}
// CreateCommunicationRequest is the request body for creating a communication entry.
type CreateCommunicationRequest struct {
Direction string `json:"direction" binding:"required"`
Channel string `json:"channel" binding:"required"`
Subject string `json:"subject" binding:"required"`
Content string `json:"content"`
Participants string `json:"participants"`
}
// ============================================================================
// Summary Models
// ============================================================================
// HoursSummary provides aggregated hour statistics for an assignment.
type HoursSummary struct {
TotalHours float64 `json:"total_hours"`
BillableHours float64 `json:"billable_hours"`
ByCategory map[string]float64 `json:"by_category"`
Period string `json:"period"` // YYYY-MM or "all"
}

View File

@@ -1,510 +0,0 @@
package dsb
import (
"context"
"fmt"
"time"
"github.com/breakpilot/ai-compliance-sdk/internal/reporting"
"github.com/google/uuid"
"github.com/jackc/pgx/v5/pgxpool"
)
// Store provides database operations for the DSB portal.
type Store struct {
pool *pgxpool.Pool
reportingStore *reporting.Store
}
// NewStore creates a new DSB store.
func NewStore(pool *pgxpool.Pool, reportingStore *reporting.Store) *Store {
return &Store{
pool: pool,
reportingStore: reportingStore,
}
}
// Pool returns the underlying connection pool for direct queries when needed.
func (s *Store) Pool() *pgxpool.Pool {
return s.pool
}
// ============================================================================
// Dashboard
// ============================================================================
// GetDashboard generates the aggregated DSB dashboard for a given DSB user.
func (s *Store) GetDashboard(ctx context.Context, dsbUserID uuid.UUID) (*DSBDashboard, error) {
assignments, err := s.ListAssignments(ctx, dsbUserID)
if err != nil {
return nil, fmt.Errorf("list assignments: %w", err)
}
now := time.Now().UTC()
currentMonth := now.Format("2006-01")
dashboard := &DSBDashboard{
Assignments: make([]AssignmentOverview, 0, len(assignments)),
GeneratedAt: now,
}
for _, a := range assignments {
overview := AssignmentOverview{
Assignment: a,
HoursBudget: a.MonthlyHoursBudget,
}
// Enrich with compliance score (error-tolerant)
if s.reportingStore != nil {
report, err := s.reportingStore.GenerateReport(ctx, a.TenantID)
if err == nil && report != nil {
overview.ComplianceScore = report.ComplianceScore
}
}
// Hours this month
summary, err := s.GetHoursSummary(ctx, a.ID, currentMonth)
if err == nil && summary != nil {
overview.HoursThisMonth = summary.TotalHours
}
// Open and urgent tasks
openTasks, err := s.ListTasks(ctx, a.ID, "open")
if err == nil {
overview.OpenTaskCount = len(openTasks)
for _, t := range openTasks {
if t.Priority == "urgent" {
overview.UrgentTaskCount++
}
if t.DueDate != nil && (overview.NextDeadline == nil || t.DueDate.Before(*overview.NextDeadline)) {
overview.NextDeadline = t.DueDate
}
}
}
// Also count in_progress tasks
inProgressTasks, err := s.ListTasks(ctx, a.ID, "in_progress")
if err == nil {
overview.OpenTaskCount += len(inProgressTasks)
for _, t := range inProgressTasks {
if t.Priority == "urgent" {
overview.UrgentTaskCount++
}
if t.DueDate != nil && (overview.NextDeadline == nil || t.DueDate.Before(*overview.NextDeadline)) {
overview.NextDeadline = t.DueDate
}
}
}
dashboard.Assignments = append(dashboard.Assignments, overview)
dashboard.TotalAssignments++
if a.Status == "active" {
dashboard.ActiveAssignments++
}
dashboard.TotalHoursThisMonth += overview.HoursThisMonth
dashboard.OpenTasks += overview.OpenTaskCount
dashboard.UrgentTasks += overview.UrgentTaskCount
}
return dashboard, nil
}
// ============================================================================
// Assignments
// ============================================================================
// CreateAssignment inserts a new DSB assignment.
func (s *Store) CreateAssignment(ctx context.Context, a *Assignment) error {
a.ID = uuid.New()
now := time.Now().UTC()
a.CreatedAt = now
a.UpdatedAt = now
if a.Status == "" {
a.Status = "active"
}
_, err := s.pool.Exec(ctx, `
INSERT INTO dsb_assignments (id, dsb_user_id, tenant_id, status, contract_start, contract_end, monthly_hours_budget, notes, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
`, a.ID, a.DSBUserID, a.TenantID, a.Status, a.ContractStart, a.ContractEnd, a.MonthlyHoursBudget, a.Notes, a.CreatedAt, a.UpdatedAt)
if err != nil {
return fmt.Errorf("insert assignment: %w", err)
}
return nil
}
// ListAssignments returns all assignments for a given DSB user, joined with tenant info.
func (s *Store) ListAssignments(ctx context.Context, dsbUserID uuid.UUID) ([]Assignment, error) {
rows, err := s.pool.Query(ctx, `
SELECT a.id, a.dsb_user_id, a.tenant_id, ct.name, ct.slug,
a.status, a.contract_start, a.contract_end,
a.monthly_hours_budget, a.notes, a.created_at, a.updated_at
FROM dsb_assignments a
JOIN compliance_tenants ct ON ct.id = a.tenant_id
WHERE a.dsb_user_id = $1
ORDER BY a.created_at DESC
`, dsbUserID)
if err != nil {
return nil, fmt.Errorf("query assignments: %w", err)
}
defer rows.Close()
var assignments []Assignment
for rows.Next() {
var a Assignment
if err := rows.Scan(
&a.ID, &a.DSBUserID, &a.TenantID, &a.TenantName, &a.TenantSlug,
&a.Status, &a.ContractStart, &a.ContractEnd,
&a.MonthlyHoursBudget, &a.Notes, &a.CreatedAt, &a.UpdatedAt,
); err != nil {
return nil, fmt.Errorf("scan assignment: %w", err)
}
assignments = append(assignments, a)
}
if assignments == nil {
assignments = []Assignment{}
}
return assignments, nil
}
// GetAssignment retrieves a single assignment by ID.
func (s *Store) GetAssignment(ctx context.Context, id uuid.UUID) (*Assignment, error) {
var a Assignment
err := s.pool.QueryRow(ctx, `
SELECT a.id, a.dsb_user_id, a.tenant_id, ct.name, ct.slug,
a.status, a.contract_start, a.contract_end,
a.monthly_hours_budget, a.notes, a.created_at, a.updated_at
FROM dsb_assignments a
JOIN compliance_tenants ct ON ct.id = a.tenant_id
WHERE a.id = $1
`, id).Scan(
&a.ID, &a.DSBUserID, &a.TenantID, &a.TenantName, &a.TenantSlug,
&a.Status, &a.ContractStart, &a.ContractEnd,
&a.MonthlyHoursBudget, &a.Notes, &a.CreatedAt, &a.UpdatedAt,
)
if err != nil {
return nil, fmt.Errorf("get assignment: %w", err)
}
return &a, nil
}
// UpdateAssignment updates an existing assignment.
func (s *Store) UpdateAssignment(ctx context.Context, a *Assignment) error {
_, err := s.pool.Exec(ctx, `
UPDATE dsb_assignments
SET status = $2, contract_end = $3, monthly_hours_budget = $4, notes = $5, updated_at = NOW()
WHERE id = $1
`, a.ID, a.Status, a.ContractEnd, a.MonthlyHoursBudget, a.Notes)
if err != nil {
return fmt.Errorf("update assignment: %w", err)
}
return nil
}
// ============================================================================
// Hours
// ============================================================================
// CreateHourEntry inserts a new time tracking entry.
func (s *Store) CreateHourEntry(ctx context.Context, h *HourEntry) error {
h.ID = uuid.New()
h.CreatedAt = time.Now().UTC()
_, err := s.pool.Exec(ctx, `
INSERT INTO dsb_hours (id, assignment_id, date, hours, category, description, billable, created_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
`, h.ID, h.AssignmentID, h.Date, h.Hours, h.Category, h.Description, h.Billable, h.CreatedAt)
if err != nil {
return fmt.Errorf("insert hour entry: %w", err)
}
return nil
}
// ListHours returns time entries for an assignment, optionally filtered by month (YYYY-MM).
func (s *Store) ListHours(ctx context.Context, assignmentID uuid.UUID, month string) ([]HourEntry, error) {
var query string
var args []interface{}
if month != "" {
query = `
SELECT id, assignment_id, date, hours, category, description, billable, created_at
FROM dsb_hours
WHERE assignment_id = $1 AND to_char(date, 'YYYY-MM') = $2
ORDER BY date DESC, created_at DESC
`
args = []interface{}{assignmentID, month}
} else {
query = `
SELECT id, assignment_id, date, hours, category, description, billable, created_at
FROM dsb_hours
WHERE assignment_id = $1
ORDER BY date DESC, created_at DESC
`
args = []interface{}{assignmentID}
}
rows, err := s.pool.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("query hours: %w", err)
}
defer rows.Close()
var entries []HourEntry
for rows.Next() {
var h HourEntry
if err := rows.Scan(
&h.ID, &h.AssignmentID, &h.Date, &h.Hours, &h.Category,
&h.Description, &h.Billable, &h.CreatedAt,
); err != nil {
return nil, fmt.Errorf("scan hour entry: %w", err)
}
entries = append(entries, h)
}
if entries == nil {
entries = []HourEntry{}
}
return entries, nil
}
// GetHoursSummary returns aggregated hour statistics for an assignment, optionally filtered by month.
func (s *Store) GetHoursSummary(ctx context.Context, assignmentID uuid.UUID, month string) (*HoursSummary, error) {
summary := &HoursSummary{
ByCategory: make(map[string]float64),
Period: "all",
}
if month != "" {
summary.Period = month
}
// Total and billable hours
var totalQuery string
var totalArgs []interface{}
if month != "" {
totalQuery = `
SELECT COALESCE(SUM(hours), 0), COALESCE(SUM(CASE WHEN billable THEN hours ELSE 0 END), 0)
FROM dsb_hours
WHERE assignment_id = $1 AND to_char(date, 'YYYY-MM') = $2
`
totalArgs = []interface{}{assignmentID, month}
} else {
totalQuery = `
SELECT COALESCE(SUM(hours), 0), COALESCE(SUM(CASE WHEN billable THEN hours ELSE 0 END), 0)
FROM dsb_hours
WHERE assignment_id = $1
`
totalArgs = []interface{}{assignmentID}
}
err := s.pool.QueryRow(ctx, totalQuery, totalArgs...).Scan(&summary.TotalHours, &summary.BillableHours)
if err != nil {
return nil, fmt.Errorf("query hours summary totals: %w", err)
}
// Hours by category
var catQuery string
var catArgs []interface{}
if month != "" {
catQuery = `
SELECT category, COALESCE(SUM(hours), 0)
FROM dsb_hours
WHERE assignment_id = $1 AND to_char(date, 'YYYY-MM') = $2
GROUP BY category
`
catArgs = []interface{}{assignmentID, month}
} else {
catQuery = `
SELECT category, COALESCE(SUM(hours), 0)
FROM dsb_hours
WHERE assignment_id = $1
GROUP BY category
`
catArgs = []interface{}{assignmentID}
}
rows, err := s.pool.Query(ctx, catQuery, catArgs...)
if err != nil {
return nil, fmt.Errorf("query hours by category: %w", err)
}
defer rows.Close()
for rows.Next() {
var cat string
var hours float64
if err := rows.Scan(&cat, &hours); err != nil {
return nil, fmt.Errorf("scan category hours: %w", err)
}
summary.ByCategory[cat] = hours
}
return summary, nil
}
// ============================================================================
// Tasks
// ============================================================================
// CreateTask inserts a new DSB task.
func (s *Store) CreateTask(ctx context.Context, t *Task) error {
t.ID = uuid.New()
now := time.Now().UTC()
t.CreatedAt = now
t.UpdatedAt = now
if t.Status == "" {
t.Status = "open"
}
if t.Priority == "" {
t.Priority = "medium"
}
_, err := s.pool.Exec(ctx, `
INSERT INTO dsb_tasks (id, assignment_id, title, description, category, priority, status, due_date, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
`, t.ID, t.AssignmentID, t.Title, t.Description, t.Category, t.Priority, t.Status, t.DueDate, t.CreatedAt, t.UpdatedAt)
if err != nil {
return fmt.Errorf("insert task: %w", err)
}
return nil
}
// ListTasks returns tasks for an assignment, optionally filtered by status.
func (s *Store) ListTasks(ctx context.Context, assignmentID uuid.UUID, status string) ([]Task, error) {
var query string
var args []interface{}
if status != "" {
query = `
SELECT id, assignment_id, title, description, category, priority, status, due_date, completed_at, created_at, updated_at
FROM dsb_tasks
WHERE assignment_id = $1 AND status = $2
ORDER BY CASE priority
WHEN 'urgent' THEN 1
WHEN 'high' THEN 2
WHEN 'medium' THEN 3
WHEN 'low' THEN 4
ELSE 5
END, due_date ASC NULLS LAST, created_at DESC
`
args = []interface{}{assignmentID, status}
} else {
query = `
SELECT id, assignment_id, title, description, category, priority, status, due_date, completed_at, created_at, updated_at
FROM dsb_tasks
WHERE assignment_id = $1
ORDER BY CASE priority
WHEN 'urgent' THEN 1
WHEN 'high' THEN 2
WHEN 'medium' THEN 3
WHEN 'low' THEN 4
ELSE 5
END, due_date ASC NULLS LAST, created_at DESC
`
args = []interface{}{assignmentID}
}
rows, err := s.pool.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("query tasks: %w", err)
}
defer rows.Close()
var tasks []Task
for rows.Next() {
var t Task
if err := rows.Scan(
&t.ID, &t.AssignmentID, &t.Title, &t.Description, &t.Category,
&t.Priority, &t.Status, &t.DueDate, &t.CompletedAt,
&t.CreatedAt, &t.UpdatedAt,
); err != nil {
return nil, fmt.Errorf("scan task: %w", err)
}
tasks = append(tasks, t)
}
if tasks == nil {
tasks = []Task{}
}
return tasks, nil
}
// UpdateTask updates an existing task.
func (s *Store) UpdateTask(ctx context.Context, t *Task) error {
_, err := s.pool.Exec(ctx, `
UPDATE dsb_tasks
SET title = $2, description = $3, category = $4, priority = $5, status = $6, due_date = $7, updated_at = NOW()
WHERE id = $1
`, t.ID, t.Title, t.Description, t.Category, t.Priority, t.Status, t.DueDate)
if err != nil {
return fmt.Errorf("update task: %w", err)
}
return nil
}
// CompleteTask marks a task as completed with the current timestamp.
func (s *Store) CompleteTask(ctx context.Context, taskID uuid.UUID) error {
_, err := s.pool.Exec(ctx, `
UPDATE dsb_tasks
SET status = 'completed', completed_at = NOW(), updated_at = NOW()
WHERE id = $1
`, taskID)
if err != nil {
return fmt.Errorf("complete task: %w", err)
}
return nil
}
// ============================================================================
// Communications
// ============================================================================
// CreateCommunication inserts a new communication log entry.
func (s *Store) CreateCommunication(ctx context.Context, c *Communication) error {
c.ID = uuid.New()
c.CreatedAt = time.Now().UTC()
_, err := s.pool.Exec(ctx, `
INSERT INTO dsb_communications (id, assignment_id, direction, channel, subject, content, participants, created_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
`, c.ID, c.AssignmentID, c.Direction, c.Channel, c.Subject, c.Content, c.Participants, c.CreatedAt)
if err != nil {
return fmt.Errorf("insert communication: %w", err)
}
return nil
}
// ListCommunications returns all communication entries for an assignment.
func (s *Store) ListCommunications(ctx context.Context, assignmentID uuid.UUID) ([]Communication, error) {
rows, err := s.pool.Query(ctx, `
SELECT id, assignment_id, direction, channel, subject, content, participants, created_at
FROM dsb_communications
WHERE assignment_id = $1
ORDER BY created_at DESC
`, assignmentID)
if err != nil {
return nil, fmt.Errorf("query communications: %w", err)
}
defer rows.Close()
var comms []Communication
for rows.Next() {
var c Communication
if err := rows.Scan(
&c.ID, &c.AssignmentID, &c.Direction, &c.Channel,
&c.Subject, &c.Content, &c.Participants, &c.CreatedAt,
); err != nil {
return nil, fmt.Errorf("scan communication: %w", err)
}
comms = append(comms, c)
}
if comms == nil {
comms = []Communication{}
}
return comms, nil
}

View File

@@ -1,395 +0,0 @@
package funding
import (
"archive/zip"
"bytes"
"fmt"
"io"
"time"
"github.com/jung-kurt/gofpdf"
"github.com/xuri/excelize/v2"
)
// ExportService handles document generation
type ExportService struct{}
// NewExportService creates a new export service
func NewExportService() *ExportService {
return &ExportService{}
}
// GenerateApplicationLetter generates the main application letter as PDF
func (s *ExportService) GenerateApplicationLetter(app *FundingApplication) ([]byte, error) {
pdf := gofpdf.New("P", "mm", "A4", "")
pdf.SetMargins(25, 25, 25)
pdf.AddPage()
// Header
pdf.SetFont("Helvetica", "B", 14)
pdf.Cell(0, 10, "Antrag auf Foerderung im Rahmen der digitalen Bildungsinfrastruktur")
pdf.Ln(15)
// Application number
pdf.SetFont("Helvetica", "", 10)
pdf.Cell(0, 6, fmt.Sprintf("Antragsnummer: %s", app.ApplicationNumber))
pdf.Ln(6)
pdf.Cell(0, 6, fmt.Sprintf("Datum: %s", time.Now().Format("02.01.2006")))
pdf.Ln(15)
// Section 1: Einleitung
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "1. Einleitung")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
if app.SchoolProfile != nil {
pdf.MultiCell(0, 6, fmt.Sprintf(
"Die %s (Schulnummer: %s) beantragt hiermit Foerdermittel aus dem Programm %s.\n\n"+
"Schultraeger: %s\n"+
"Schulform: %s\n"+
"Schueleranzahl: %d\n"+
"Lehrkraefte: %d",
app.SchoolProfile.Name,
app.SchoolProfile.SchoolNumber,
app.FundingProgram,
app.SchoolProfile.CarrierName,
app.SchoolProfile.Type,
app.SchoolProfile.StudentCount,
app.SchoolProfile.TeacherCount,
), "", "", false)
}
pdf.Ln(10)
// Section 2: Projektziel
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "2. Projektziel")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
if app.ProjectPlan != nil {
pdf.MultiCell(0, 6, app.ProjectPlan.Summary, "", "", false)
pdf.Ln(5)
pdf.MultiCell(0, 6, app.ProjectPlan.Goals, "", "", false)
}
pdf.Ln(10)
// Section 3: Beschreibung der Massnahme
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "3. Beschreibung der Massnahme")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
if app.ProjectPlan != nil {
pdf.MultiCell(0, 6, app.ProjectPlan.DidacticConcept, "", "", false)
}
pdf.Ln(10)
// Section 4: Datenschutz & IT-Betrieb
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "4. Datenschutz & IT-Betrieb")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
if app.ProjectPlan != nil && app.ProjectPlan.DataProtection != "" {
pdf.MultiCell(0, 6, app.ProjectPlan.DataProtection, "", "", false)
}
pdf.Ln(10)
// Section 5: Kosten & Finanzierung
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "5. Kosten & Finanzierung")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
if app.Budget != nil {
pdf.Cell(0, 6, fmt.Sprintf("Gesamtkosten: %.2f EUR", app.Budget.TotalCost))
pdf.Ln(6)
pdf.Cell(0, 6, fmt.Sprintf("Beantragter Foerderbetrag: %.2f EUR (%.0f%%)", app.Budget.RequestedFunding, app.Budget.FundingRate*100))
pdf.Ln(6)
pdf.Cell(0, 6, fmt.Sprintf("Eigenanteil: %.2f EUR", app.Budget.OwnContribution))
}
pdf.Ln(10)
// Section 6: Laufzeit
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "6. Laufzeit")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
if app.Timeline != nil {
pdf.Cell(0, 6, fmt.Sprintf("Projektbeginn: %s", app.Timeline.PlannedStart.Format("02.01.2006")))
pdf.Ln(6)
pdf.Cell(0, 6, fmt.Sprintf("Projektende: %s", app.Timeline.PlannedEnd.Format("02.01.2006")))
}
pdf.Ln(15)
// Footer note
pdf.SetFont("Helvetica", "I", 9)
pdf.MultiCell(0, 5, "Hinweis: Dieser Antrag wurde mit dem Foerderantrag-Wizard von BreakPilot erstellt. "+
"Die finale Pruefung und Einreichung erfolgt durch den Schultraeger.", "", "", false)
var buf bytes.Buffer
if err := pdf.Output(&buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// GenerateBudgetPlan generates the budget plan as XLSX
func (s *ExportService) GenerateBudgetPlan(app *FundingApplication) ([]byte, error) {
f := excelize.NewFile()
sheetName := "Kostenplan"
f.SetSheetName("Sheet1", sheetName)
// Header row
headers := []string{
"Pos.", "Kategorie", "Beschreibung", "Hersteller",
"Anzahl", "Einzelpreis", "Gesamt", "Foerderfahig", "Finanzierung",
}
for i, h := range headers {
cell, _ := excelize.CoordinatesToCellName(i+1, 1)
f.SetCellValue(sheetName, cell, h)
}
// Style header
headerStyle, _ := f.NewStyle(&excelize.Style{
Font: &excelize.Font{Bold: true},
Fill: excelize.Fill{Type: "pattern", Color: []string{"#E0E0E0"}, Pattern: 1},
})
f.SetRowStyle(sheetName, 1, 1, headerStyle)
// Data rows
row := 2
if app.Budget != nil {
for i, item := range app.Budget.BudgetItems {
f.SetCellValue(sheetName, fmt.Sprintf("A%d", row), i+1)
f.SetCellValue(sheetName, fmt.Sprintf("B%d", row), string(item.Category))
f.SetCellValue(sheetName, fmt.Sprintf("C%d", row), item.Description)
f.SetCellValue(sheetName, fmt.Sprintf("D%d", row), item.Manufacturer)
f.SetCellValue(sheetName, fmt.Sprintf("E%d", row), item.Quantity)
f.SetCellValue(sheetName, fmt.Sprintf("F%d", row), item.UnitPrice)
f.SetCellValue(sheetName, fmt.Sprintf("G%d", row), item.TotalPrice)
fundable := "Nein"
if item.IsFundable {
fundable = "Ja"
}
f.SetCellValue(sheetName, fmt.Sprintf("H%d", row), fundable)
f.SetCellValue(sheetName, fmt.Sprintf("I%d", row), item.FundingSource)
row++
}
// Summary rows
row += 2
f.SetCellValue(sheetName, fmt.Sprintf("F%d", row), "Gesamtkosten:")
f.SetCellValue(sheetName, fmt.Sprintf("G%d", row), app.Budget.TotalCost)
row++
f.SetCellValue(sheetName, fmt.Sprintf("F%d", row), "Foerderbetrag:")
f.SetCellValue(sheetName, fmt.Sprintf("G%d", row), app.Budget.RequestedFunding)
row++
f.SetCellValue(sheetName, fmt.Sprintf("F%d", row), "Eigenanteil:")
f.SetCellValue(sheetName, fmt.Sprintf("G%d", row), app.Budget.OwnContribution)
}
// Set column widths
f.SetColWidth(sheetName, "A", "A", 6)
f.SetColWidth(sheetName, "B", "B", 15)
f.SetColWidth(sheetName, "C", "C", 35)
f.SetColWidth(sheetName, "D", "D", 15)
f.SetColWidth(sheetName, "E", "E", 8)
f.SetColWidth(sheetName, "F", "F", 12)
f.SetColWidth(sheetName, "G", "G", 12)
f.SetColWidth(sheetName, "H", "H", 12)
f.SetColWidth(sheetName, "I", "I", 15)
// Add currency format
currencyStyle, _ := f.NewStyle(&excelize.Style{
NumFmt: 44, // Currency format
})
f.SetColStyle(sheetName, "F", currencyStyle)
f.SetColStyle(sheetName, "G", currencyStyle)
var buf bytes.Buffer
if err := f.Write(&buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// GenerateDataProtectionConcept generates the data protection concept as PDF
func (s *ExportService) GenerateDataProtectionConcept(app *FundingApplication) ([]byte, error) {
pdf := gofpdf.New("P", "mm", "A4", "")
pdf.SetMargins(25, 25, 25)
pdf.AddPage()
// Header
pdf.SetFont("Helvetica", "B", 14)
pdf.Cell(0, 10, "Datenschutz- und Betriebskonzept")
pdf.Ln(15)
pdf.SetFont("Helvetica", "", 10)
pdf.Cell(0, 6, fmt.Sprintf("Antragsnummer: %s", app.ApplicationNumber))
pdf.Ln(6)
if app.SchoolProfile != nil {
pdf.Cell(0, 6, fmt.Sprintf("Schule: %s", app.SchoolProfile.Name))
}
pdf.Ln(15)
// Section: Lokale Verarbeitung
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "1. Grundsaetze der Datenverarbeitung")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
if app.ProjectPlan != nil && app.ProjectPlan.DataProtection != "" {
pdf.MultiCell(0, 6, app.ProjectPlan.DataProtection, "", "", false)
} else {
pdf.MultiCell(0, 6, "Das Projekt setzt auf eine vollstaendig lokale Datenverarbeitung:\n\n"+
"- Alle Daten werden ausschliesslich auf den schuleigenen Systemen verarbeitet\n"+
"- Keine Uebermittlung personenbezogener Daten an externe Dienste\n"+
"- Keine Cloud-Speicherung sensibler Daten\n"+
"- Betrieb im Verantwortungsbereich der Schule", "", "", false)
}
pdf.Ln(10)
// Section: Technische Massnahmen
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "2. Technische und organisatorische Massnahmen")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
pdf.MultiCell(0, 6, "Folgende TOMs werden umgesetzt:\n\n"+
"- Zugriffskontrolle ueber schuleigene Benutzerverwaltung\n"+
"- Verschluesselte Datenspeicherung\n"+
"- Regelmaessige Sicherheitsupdates\n"+
"- Protokollierung von Zugriffen\n"+
"- Automatische Loeschung nach definierten Fristen", "", "", false)
pdf.Ln(10)
// Section: Betriebskonzept
pdf.SetFont("Helvetica", "B", 12)
pdf.Cell(0, 8, "3. Betriebskonzept")
pdf.Ln(10)
pdf.SetFont("Helvetica", "", 10)
if app.ProjectPlan != nil && app.ProjectPlan.MaintenancePlan != "" {
pdf.MultiCell(0, 6, app.ProjectPlan.MaintenancePlan, "", "", false)
} else {
pdf.MultiCell(0, 6, "Der laufende Betrieb wird wie folgt sichergestellt:\n\n"+
"- Schulung des technischen Personals\n"+
"- Dokumentierte Betriebsverfahren\n"+
"- Regelmaessige Wartung und Updates\n"+
"- Definierte Ansprechpartner", "", "", false)
}
var buf bytes.Buffer
if err := pdf.Output(&buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// GenerateExportBundle generates a ZIP file with all documents
func (s *ExportService) GenerateExportBundle(app *FundingApplication) ([]byte, error) {
var buf bytes.Buffer
zipWriter := zip.NewWriter(&buf)
// Generate and add application letter
letter, err := s.GenerateApplicationLetter(app)
if err == nil {
w, _ := zipWriter.Create(fmt.Sprintf("%s_Antragsschreiben.pdf", app.ApplicationNumber))
w.Write(letter)
}
// Generate and add budget plan
budget, err := s.GenerateBudgetPlan(app)
if err == nil {
w, _ := zipWriter.Create(fmt.Sprintf("%s_Kostenplan.xlsx", app.ApplicationNumber))
w.Write(budget)
}
// Generate and add data protection concept
dp, err := s.GenerateDataProtectionConcept(app)
if err == nil {
w, _ := zipWriter.Create(fmt.Sprintf("%s_Datenschutzkonzept.pdf", app.ApplicationNumber))
w.Write(dp)
}
// Add attachments
for _, attachment := range app.Attachments {
// Read attachment from storage and add to ZIP
// This would need actual file system access
_ = attachment
}
if err := zipWriter.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// ExportDocument represents a generated document
type GeneratedDocument struct {
Name string
Type string // pdf, xlsx, docx
Content []byte
MimeType string
}
// GenerateAllDocuments generates all documents for an application
func (s *ExportService) GenerateAllDocuments(app *FundingApplication) ([]GeneratedDocument, error) {
var docs []GeneratedDocument
// Application letter
letter, err := s.GenerateApplicationLetter(app)
if err == nil {
docs = append(docs, GeneratedDocument{
Name: fmt.Sprintf("%s_Antragsschreiben.pdf", app.ApplicationNumber),
Type: "pdf",
Content: letter,
MimeType: "application/pdf",
})
}
// Budget plan
budget, err := s.GenerateBudgetPlan(app)
if err == nil {
docs = append(docs, GeneratedDocument{
Name: fmt.Sprintf("%s_Kostenplan.xlsx", app.ApplicationNumber),
Type: "xlsx",
Content: budget,
MimeType: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
})
}
// Data protection concept
dp, err := s.GenerateDataProtectionConcept(app)
if err == nil {
docs = append(docs, GeneratedDocument{
Name: fmt.Sprintf("%s_Datenschutzkonzept.pdf", app.ApplicationNumber),
Type: "pdf",
Content: dp,
MimeType: "application/pdf",
})
}
return docs, nil
}
// WriteZipToWriter writes all documents to a zip writer
func (s *ExportService) WriteZipToWriter(app *FundingApplication, w io.Writer) error {
zipWriter := zip.NewWriter(w)
defer zipWriter.Close()
docs, err := s.GenerateAllDocuments(app)
if err != nil {
return err
}
for _, doc := range docs {
f, err := zipWriter.Create(doc.Name)
if err != nil {
continue
}
f.Write(doc.Content)
}
return nil
}

View File

@@ -1,394 +0,0 @@
package funding
import (
"time"
"github.com/google/uuid"
)
// ============================================================================
// Constants / Enums
// ============================================================================
// FundingProgram represents the type of funding program
type FundingProgram string
const (
FundingProgramDigitalPakt1 FundingProgram = "DIGITALPAKT_1"
FundingProgramDigitalPakt2 FundingProgram = "DIGITALPAKT_2"
FundingProgramLandesfoerderung FundingProgram = "LANDESFOERDERUNG"
FundingProgramSchultraeger FundingProgram = "SCHULTRAEGER"
FundingProgramSonstige FundingProgram = "SONSTIGE"
)
// ApplicationStatus represents the workflow status
type ApplicationStatus string
const (
ApplicationStatusDraft ApplicationStatus = "DRAFT"
ApplicationStatusInProgress ApplicationStatus = "IN_PROGRESS"
ApplicationStatusReview ApplicationStatus = "REVIEW"
ApplicationStatusSubmitted ApplicationStatus = "SUBMITTED"
ApplicationStatusApproved ApplicationStatus = "APPROVED"
ApplicationStatusRejected ApplicationStatus = "REJECTED"
ApplicationStatusArchived ApplicationStatus = "ARCHIVED"
)
// FederalState represents German federal states
type FederalState string
const (
FederalStateNI FederalState = "NI" // Niedersachsen
FederalStateNRW FederalState = "NRW" // Nordrhein-Westfalen
FederalStateBAY FederalState = "BAY" // Bayern
FederalStateBW FederalState = "BW" // Baden-Wuerttemberg
FederalStateHE FederalState = "HE" // Hessen
FederalStateSN FederalState = "SN" // Sachsen
FederalStateTH FederalState = "TH" // Thueringen
FederalStateSA FederalState = "SA" // Sachsen-Anhalt
FederalStateBB FederalState = "BB" // Brandenburg
FederalStateMV FederalState = "MV" // Mecklenburg-Vorpommern
FederalStateSH FederalState = "SH" // Schleswig-Holstein
FederalStateHH FederalState = "HH" // Hamburg
FederalStateHB FederalState = "HB" // Bremen
FederalStateBE FederalState = "BE" // Berlin
FederalStateSL FederalState = "SL" // Saarland
FederalStateRP FederalState = "RP" // Rheinland-Pfalz
)
// SchoolType represents different school types
type SchoolType string
const (
SchoolTypeGrundschule SchoolType = "GRUNDSCHULE"
SchoolTypeHauptschule SchoolType = "HAUPTSCHULE"
SchoolTypeRealschule SchoolType = "REALSCHULE"
SchoolTypeGymnasium SchoolType = "GYMNASIUM"
SchoolTypeGesamtschule SchoolType = "GESAMTSCHULE"
SchoolTypeOberschule SchoolType = "OBERSCHULE"
SchoolTypeFoerderschule SchoolType = "FOERDERSCHULE"
SchoolTypeBerufsschule SchoolType = "BERUFSSCHULE"
SchoolTypeBerufskolleg SchoolType = "BERUFSKOLLEG"
SchoolTypeFachoberschule SchoolType = "FACHOBERSCHULE"
SchoolTypeBerufliches SchoolType = "BERUFLICHES_GYMNASIUM"
SchoolTypeSonstige SchoolType = "SONSTIGE"
)
// CarrierType represents the school carrier type
type CarrierType string
const (
CarrierTypePublic CarrierType = "PUBLIC" // Oeffentlich
CarrierTypePrivate CarrierType = "PRIVATE" // Privat
CarrierTypeChurch CarrierType = "CHURCH" // Kirchlich
CarrierTypeNonProfit CarrierType = "NON_PROFIT" // Gemeinnuetzig
)
// BudgetCategory represents categories for budget items
type BudgetCategory string
const (
BudgetCategoryNetwork BudgetCategory = "NETWORK" // Netzwerk/Verkabelung
BudgetCategoryWLAN BudgetCategory = "WLAN" // WLAN-Infrastruktur
BudgetCategoryDevices BudgetCategory = "DEVICES" // Endgeraete
BudgetCategoryPresentation BudgetCategory = "PRESENTATION" // Praesentationstechnik
BudgetCategorySoftware BudgetCategory = "SOFTWARE" // Software-Lizenzen
BudgetCategoryServer BudgetCategory = "SERVER" // Server/Rechenzentrum
BudgetCategoryServices BudgetCategory = "SERVICES" // Dienstleistungen
BudgetCategoryTraining BudgetCategory = "TRAINING" // Schulungen
BudgetCategorySonstige BudgetCategory = "SONSTIGE" // Sonstige
)
// ============================================================================
// Main Entities
// ============================================================================
// FundingApplication represents a funding application
type FundingApplication struct {
ID uuid.UUID `json:"id"`
TenantID uuid.UUID `json:"tenant_id"`
ApplicationNumber string `json:"application_number"` // e.g., DP2-NI-2026-00123
Title string `json:"title"`
FundingProgram FundingProgram `json:"funding_program"`
Status ApplicationStatus `json:"status"`
// Wizard State
CurrentStep int `json:"current_step"`
TotalSteps int `json:"total_steps"`
WizardData map[string]interface{} `json:"wizard_data,omitempty"`
// School Information
SchoolProfile *SchoolProfile `json:"school_profile,omitempty"`
// Project Information
ProjectPlan *ProjectPlan `json:"project_plan,omitempty"`
Budget *Budget `json:"budget,omitempty"`
Timeline *ProjectTimeline `json:"timeline,omitempty"`
// Financial Summary
RequestedAmount float64 `json:"requested_amount"`
OwnContribution float64 `json:"own_contribution"`
ApprovedAmount *float64 `json:"approved_amount,omitempty"`
// Attachments
Attachments []Attachment `json:"attachments,omitempty"`
// Audit Trail
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
SubmittedAt *time.Time `json:"submitted_at,omitempty"`
CreatedBy uuid.UUID `json:"created_by"`
UpdatedBy uuid.UUID `json:"updated_by"`
}
// SchoolProfile contains school information
type SchoolProfile struct {
Name string `json:"name"`
SchoolNumber string `json:"school_number"` // Official school number
Type SchoolType `json:"type"`
FederalState FederalState `json:"federal_state"`
Address Address `json:"address"`
ContactPerson ContactPerson `json:"contact_person"`
StudentCount int `json:"student_count"`
TeacherCount int `json:"teacher_count"`
ClassCount int `json:"class_count"`
CarrierType CarrierType `json:"carrier_type"`
CarrierName string `json:"carrier_name"`
CarrierAddress *Address `json:"carrier_address,omitempty"`
Infrastructure *InfrastructureStatus `json:"infrastructure,omitempty"`
}
// Address represents a postal address
type Address struct {
Street string `json:"street"`
HouseNo string `json:"house_no"`
PostalCode string `json:"postal_code"`
City string `json:"city"`
Country string `json:"country,omitempty"`
}
// ContactPerson represents a contact person
type ContactPerson struct {
Salutation string `json:"salutation,omitempty"` // Herr/Frau
Title string `json:"title,omitempty"` // Dr., Prof.
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Position string `json:"position,omitempty"` // Schulleitung, IT-Beauftragter
Email string `json:"email"`
Phone string `json:"phone,omitempty"`
}
// InfrastructureStatus describes current IT infrastructure
type InfrastructureStatus struct {
HasWLAN bool `json:"has_wlan"`
WLANCoverage int `json:"wlan_coverage"` // Percentage 0-100
HasStructuredCabling bool `json:"has_structured_cabling"`
InternetBandwidth string `json:"internet_bandwidth"` // e.g., "100 Mbit/s"
DeviceCount int `json:"device_count"` // Current devices
HasServerRoom bool `json:"has_server_room"`
Notes string `json:"notes,omitempty"`
}
// ProjectPlan describes the project
type ProjectPlan struct {
ProjectName string `json:"project_name"`
Summary string `json:"summary"` // Kurzbeschreibung
Goals string `json:"goals"` // Projektziele
DidacticConcept string `json:"didactic_concept"` // Paedagogisches Konzept
MEPReference string `json:"mep_reference,omitempty"` // Medienentwicklungsplan Bezug
DataProtection string `json:"data_protection"` // Datenschutzkonzept
MaintenancePlan string `json:"maintenance_plan"` // Wartungs-/Betriebskonzept
TargetGroups []string `json:"target_groups"` // e.g., ["Schueler", "Lehrer"]
SubjectsAffected []string `json:"subjects_affected,omitempty"` // Betroffene Faecher
}
// Budget represents the financial plan
type Budget struct {
TotalCost float64 `json:"total_cost"`
RequestedFunding float64 `json:"requested_funding"`
OwnContribution float64 `json:"own_contribution"`
OtherFunding float64 `json:"other_funding"`
FundingRate float64 `json:"funding_rate"` // 0.90 = 90%
BudgetItems []BudgetItem `json:"budget_items"`
IsWithinLimits bool `json:"is_within_limits"`
Justification string `json:"justification,omitempty"` // Begruendung
}
// BudgetItem represents a single budget line item
type BudgetItem struct {
ID uuid.UUID `json:"id"`
Position int `json:"position"` // Order number
Category BudgetCategory `json:"category"`
Description string `json:"description"`
Manufacturer string `json:"manufacturer,omitempty"`
ProductName string `json:"product_name,omitempty"`
Quantity int `json:"quantity"`
UnitPrice float64 `json:"unit_price"`
TotalPrice float64 `json:"total_price"`
IsFundable bool `json:"is_fundable"` // Foerderfahig Ja/Nein
FundingSource string `json:"funding_source"` // digitalpakt, eigenanteil, sonstige
Notes string `json:"notes,omitempty"`
}
// ProjectTimeline represents project schedule
type ProjectTimeline struct {
PlannedStart time.Time `json:"planned_start"`
PlannedEnd time.Time `json:"planned_end"`
Milestones []Milestone `json:"milestones,omitempty"`
ProjectPhase string `json:"project_phase,omitempty"` // Current phase
}
// Milestone represents a project milestone
type Milestone struct {
ID uuid.UUID `json:"id"`
Title string `json:"title"`
Description string `json:"description,omitempty"`
DueDate time.Time `json:"due_date"`
CompletedAt *time.Time `json:"completed_at,omitempty"`
Status string `json:"status"` // planned, in_progress, completed
}
// Attachment represents an uploaded file
type Attachment struct {
ID uuid.UUID `json:"id"`
FileName string `json:"file_name"`
FileType string `json:"file_type"` // pdf, docx, xlsx, jpg, png
FileSize int64 `json:"file_size"` // bytes
Category string `json:"category"` // angebot, mep, nachweis, sonstiges
Description string `json:"description,omitempty"`
StoragePath string `json:"-"` // Internal path, not exposed
UploadedAt time.Time `json:"uploaded_at"`
UploadedBy uuid.UUID `json:"uploaded_by"`
}
// ============================================================================
// Wizard Step Data
// ============================================================================
// WizardStep represents a single wizard step
type WizardStep struct {
Number int `json:"number"`
Title string `json:"title"`
Description string `json:"description"`
Fields []string `json:"fields"` // Field IDs for this step
IsCompleted bool `json:"is_completed"`
IsRequired bool `json:"is_required"`
HelpContext string `json:"help_context"` // Context for LLM assistant
}
// WizardProgress tracks wizard completion
type WizardProgress struct {
CurrentStep int `json:"current_step"`
TotalSteps int `json:"total_steps"`
CompletedSteps []int `json:"completed_steps"`
StepValidation map[int][]string `json:"step_validation,omitempty"` // Errors per step
FormData map[string]interface{} `json:"form_data"`
LastSavedAt time.Time `json:"last_saved_at"`
}
// ============================================================================
// BreakPilot Presets
// ============================================================================
// ProductPreset represents a BreakPilot product preset
type ProductPreset struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
BudgetItems []BudgetItem `json:"budget_items"`
AutoFill map[string]interface{} `json:"auto_fill"`
DataProtection string `json:"data_protection"`
}
// ============================================================================
// Export Structures
// ============================================================================
// ExportDocument represents a generated document
type ExportDocument struct {
Type string `json:"type"` // antragsschreiben, kostenplan, datenschutz
Format string `json:"format"` // pdf, docx, xlsx
FileName string `json:"file_name"`
GeneratedAt time.Time `json:"generated_at"`
ContentHash string `json:"content_hash"`
StoragePath string `json:"-"`
}
// ExportBundle represents a ZIP bundle of all documents
type ExportBundle struct {
ID uuid.UUID `json:"id"`
ApplicationID uuid.UUID `json:"application_id"`
Documents []ExportDocument `json:"documents"`
GeneratedAt time.Time `json:"generated_at"`
DownloadURL string `json:"download_url"`
ExpiresAt time.Time `json:"expires_at"`
}
// ============================================================================
// LLM Assistant
// ============================================================================
// AssistantMessage represents a chat message with the assistant
type AssistantMessage struct {
Role string `json:"role"` // user, assistant, system
Content string `json:"content"`
Step int `json:"step,omitempty"` // Current wizard step
}
// AssistantRequest for asking questions
type AssistantRequest struct {
ApplicationID uuid.UUID `json:"application_id"`
Question string `json:"question"`
CurrentStep int `json:"current_step"`
Context map[string]interface{} `json:"context,omitempty"`
History []AssistantMessage `json:"history,omitempty"`
}
// AssistantResponse from the assistant
type AssistantResponse struct {
Answer string `json:"answer"`
Suggestions []string `json:"suggestions,omitempty"`
References []string `json:"references,omitempty"` // Links to help resources
FormFills map[string]interface{} `json:"form_fills,omitempty"` // Suggested form values
}
// ============================================================================
// API Request/Response Types
// ============================================================================
// CreateApplicationRequest for creating a new application
type CreateApplicationRequest struct {
Title string `json:"title"`
FundingProgram FundingProgram `json:"funding_program"`
FederalState FederalState `json:"federal_state"`
PresetID string `json:"preset_id,omitempty"` // Optional BreakPilot preset
}
// UpdateApplicationRequest for updating an application
type UpdateApplicationRequest struct {
Title *string `json:"title,omitempty"`
WizardData map[string]interface{} `json:"wizard_data,omitempty"`
CurrentStep *int `json:"current_step,omitempty"`
}
// SaveWizardStepRequest for saving a wizard step
type SaveWizardStepRequest struct {
Step int `json:"step"`
Data map[string]interface{} `json:"data"`
Complete bool `json:"complete"` // Mark step as complete
}
// ApplicationListResponse for list endpoints
type ApplicationListResponse struct {
Applications []FundingApplication `json:"applications"`
Total int `json:"total"`
Page int `json:"page"`
PageSize int `json:"page_size"`
}
// ExportRequest for export endpoints
type ExportRequest struct {
Format string `json:"format"` // zip, pdf, docx
Documents []string `json:"documents"` // Which documents to include
Language string `json:"language"` // de, en
}

View File

@@ -1,652 +0,0 @@
package funding
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
)
// PostgresStore implements Store using PostgreSQL
type PostgresStore struct {
pool *pgxpool.Pool
}
// NewPostgresStore creates a new PostgreSQL store
func NewPostgresStore(pool *pgxpool.Pool) *PostgresStore {
return &PostgresStore{pool: pool}
}
// CreateApplication creates a new funding application
func (s *PostgresStore) CreateApplication(ctx context.Context, app *FundingApplication) error {
app.ID = uuid.New()
app.CreatedAt = time.Now()
app.UpdatedAt = time.Now()
app.TotalSteps = 8 // Default 8-step wizard
// Generate application number
app.ApplicationNumber = s.generateApplicationNumber(app.FundingProgram, app.SchoolProfile)
// Marshal JSON fields
wizardDataJSON, err := json.Marshal(app.WizardData)
if err != nil {
return fmt.Errorf("failed to marshal wizard data: %w", err)
}
schoolProfileJSON, err := json.Marshal(app.SchoolProfile)
if err != nil {
return fmt.Errorf("failed to marshal school profile: %w", err)
}
projectPlanJSON, err := json.Marshal(app.ProjectPlan)
if err != nil {
return fmt.Errorf("failed to marshal project plan: %w", err)
}
budgetJSON, err := json.Marshal(app.Budget)
if err != nil {
return fmt.Errorf("failed to marshal budget: %w", err)
}
timelineJSON, err := json.Marshal(app.Timeline)
if err != nil {
return fmt.Errorf("failed to marshal timeline: %w", err)
}
query := `
INSERT INTO funding_applications (
id, tenant_id, application_number, title, funding_program, status,
current_step, total_steps, wizard_data,
school_profile, project_plan, budget, timeline,
requested_amount, own_contribution,
created_at, updated_at, created_by, updated_by
) VALUES (
$1, $2, $3, $4, $5, $6,
$7, $8, $9,
$10, $11, $12, $13,
$14, $15,
$16, $17, $18, $19
)
`
_, err = s.pool.Exec(ctx, query,
app.ID, app.TenantID, app.ApplicationNumber, app.Title, app.FundingProgram, app.Status,
app.CurrentStep, app.TotalSteps, wizardDataJSON,
schoolProfileJSON, projectPlanJSON, budgetJSON, timelineJSON,
app.RequestedAmount, app.OwnContribution,
app.CreatedAt, app.UpdatedAt, app.CreatedBy, app.UpdatedBy,
)
if err != nil {
return fmt.Errorf("failed to create application: %w", err)
}
return nil
}
// GetApplication retrieves an application by ID
func (s *PostgresStore) GetApplication(ctx context.Context, id uuid.UUID) (*FundingApplication, error) {
query := `
SELECT
id, tenant_id, application_number, title, funding_program, status,
current_step, total_steps, wizard_data,
school_profile, project_plan, budget, timeline,
requested_amount, own_contribution, approved_amount,
created_at, updated_at, submitted_at, created_by, updated_by
FROM funding_applications
WHERE id = $1
`
var app FundingApplication
var wizardDataJSON, schoolProfileJSON, projectPlanJSON, budgetJSON, timelineJSON []byte
err := s.pool.QueryRow(ctx, query, id).Scan(
&app.ID, &app.TenantID, &app.ApplicationNumber, &app.Title, &app.FundingProgram, &app.Status,
&app.CurrentStep, &app.TotalSteps, &wizardDataJSON,
&schoolProfileJSON, &projectPlanJSON, &budgetJSON, &timelineJSON,
&app.RequestedAmount, &app.OwnContribution, &app.ApprovedAmount,
&app.CreatedAt, &app.UpdatedAt, &app.SubmittedAt, &app.CreatedBy, &app.UpdatedBy,
)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, fmt.Errorf("application not found: %s", id)
}
return nil, fmt.Errorf("failed to get application: %w", err)
}
// Unmarshal JSON fields
if len(wizardDataJSON) > 0 {
if err := json.Unmarshal(wizardDataJSON, &app.WizardData); err != nil {
return nil, fmt.Errorf("failed to unmarshal wizard data: %w", err)
}
}
if len(schoolProfileJSON) > 0 {
app.SchoolProfile = &SchoolProfile{}
if err := json.Unmarshal(schoolProfileJSON, app.SchoolProfile); err != nil {
return nil, fmt.Errorf("failed to unmarshal school profile: %w", err)
}
}
if len(projectPlanJSON) > 0 {
app.ProjectPlan = &ProjectPlan{}
if err := json.Unmarshal(projectPlanJSON, app.ProjectPlan); err != nil {
return nil, fmt.Errorf("failed to unmarshal project plan: %w", err)
}
}
if len(budgetJSON) > 0 {
app.Budget = &Budget{}
if err := json.Unmarshal(budgetJSON, app.Budget); err != nil {
return nil, fmt.Errorf("failed to unmarshal budget: %w", err)
}
}
if len(timelineJSON) > 0 {
app.Timeline = &ProjectTimeline{}
if err := json.Unmarshal(timelineJSON, app.Timeline); err != nil {
return nil, fmt.Errorf("failed to unmarshal timeline: %w", err)
}
}
// Load attachments
attachments, err := s.GetAttachments(ctx, id)
if err == nil {
app.Attachments = attachments
}
return &app, nil
}
// GetApplicationByNumber retrieves an application by number
func (s *PostgresStore) GetApplicationByNumber(ctx context.Context, number string) (*FundingApplication, error) {
query := `SELECT id FROM funding_applications WHERE application_number = $1`
var id uuid.UUID
err := s.pool.QueryRow(ctx, query, number).Scan(&id)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, fmt.Errorf("application not found: %s", number)
}
return nil, fmt.Errorf("failed to find application by number: %w", err)
}
return s.GetApplication(ctx, id)
}
// UpdateApplication updates an existing application
func (s *PostgresStore) UpdateApplication(ctx context.Context, app *FundingApplication) error {
app.UpdatedAt = time.Now()
// Marshal JSON fields
wizardDataJSON, _ := json.Marshal(app.WizardData)
schoolProfileJSON, _ := json.Marshal(app.SchoolProfile)
projectPlanJSON, _ := json.Marshal(app.ProjectPlan)
budgetJSON, _ := json.Marshal(app.Budget)
timelineJSON, _ := json.Marshal(app.Timeline)
query := `
UPDATE funding_applications SET
title = $2, funding_program = $3, status = $4,
current_step = $5, wizard_data = $6,
school_profile = $7, project_plan = $8, budget = $9, timeline = $10,
requested_amount = $11, own_contribution = $12, approved_amount = $13,
updated_at = $14, submitted_at = $15, updated_by = $16
WHERE id = $1
`
result, err := s.pool.Exec(ctx, query,
app.ID, app.Title, app.FundingProgram, app.Status,
app.CurrentStep, wizardDataJSON,
schoolProfileJSON, projectPlanJSON, budgetJSON, timelineJSON,
app.RequestedAmount, app.OwnContribution, app.ApprovedAmount,
app.UpdatedAt, app.SubmittedAt, app.UpdatedBy,
)
if err != nil {
return fmt.Errorf("failed to update application: %w", err)
}
if result.RowsAffected() == 0 {
return fmt.Errorf("application not found: %s", app.ID)
}
return nil
}
// DeleteApplication soft-deletes an application
func (s *PostgresStore) DeleteApplication(ctx context.Context, id uuid.UUID) error {
query := `UPDATE funding_applications SET status = 'ARCHIVED', updated_at = $2 WHERE id = $1`
result, err := s.pool.Exec(ctx, query, id, time.Now())
if err != nil {
return fmt.Errorf("failed to delete application: %w", err)
}
if result.RowsAffected() == 0 {
return fmt.Errorf("application not found: %s", id)
}
return nil
}
// ListApplications returns a paginated list of applications
func (s *PostgresStore) ListApplications(ctx context.Context, tenantID uuid.UUID, filter ApplicationFilter) (*ApplicationListResponse, error) {
// Build query with filters
query := `
SELECT
id, tenant_id, application_number, title, funding_program, status,
current_step, total_steps, wizard_data,
school_profile, project_plan, budget, timeline,
requested_amount, own_contribution, approved_amount,
created_at, updated_at, submitted_at, created_by, updated_by
FROM funding_applications
WHERE tenant_id = $1 AND status != 'ARCHIVED'
`
args := []interface{}{tenantID}
argIndex := 2
if filter.Status != nil {
query += fmt.Sprintf(" AND status = $%d", argIndex)
args = append(args, *filter.Status)
argIndex++
}
if filter.FundingProgram != nil {
query += fmt.Sprintf(" AND funding_program = $%d", argIndex)
args = append(args, *filter.FundingProgram)
argIndex++
}
// Count total
countQuery := `SELECT COUNT(*) FROM funding_applications WHERE tenant_id = $1 AND status != 'ARCHIVED'`
var total int
s.pool.QueryRow(ctx, countQuery, tenantID).Scan(&total)
// Add sorting and pagination
sortBy := "created_at"
if filter.SortBy != "" {
sortBy = filter.SortBy
}
sortOrder := "DESC"
if filter.SortOrder == "asc" {
sortOrder = "ASC"
}
query += fmt.Sprintf(" ORDER BY %s %s", sortBy, sortOrder)
if filter.PageSize <= 0 {
filter.PageSize = 20
}
if filter.Page <= 0 {
filter.Page = 1
}
offset := (filter.Page - 1) * filter.PageSize
query += fmt.Sprintf(" LIMIT %d OFFSET %d", filter.PageSize, offset)
rows, err := s.pool.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to list applications: %w", err)
}
defer rows.Close()
var apps []FundingApplication
for rows.Next() {
var app FundingApplication
var wizardDataJSON, schoolProfileJSON, projectPlanJSON, budgetJSON, timelineJSON []byte
err := rows.Scan(
&app.ID, &app.TenantID, &app.ApplicationNumber, &app.Title, &app.FundingProgram, &app.Status,
&app.CurrentStep, &app.TotalSteps, &wizardDataJSON,
&schoolProfileJSON, &projectPlanJSON, &budgetJSON, &timelineJSON,
&app.RequestedAmount, &app.OwnContribution, &app.ApprovedAmount,
&app.CreatedAt, &app.UpdatedAt, &app.SubmittedAt, &app.CreatedBy, &app.UpdatedBy,
)
if err != nil {
return nil, fmt.Errorf("failed to scan application: %w", err)
}
// Unmarshal JSON fields
if len(schoolProfileJSON) > 0 {
app.SchoolProfile = &SchoolProfile{}
json.Unmarshal(schoolProfileJSON, app.SchoolProfile)
}
apps = append(apps, app)
}
return &ApplicationListResponse{
Applications: apps,
Total: total,
Page: filter.Page,
PageSize: filter.PageSize,
}, nil
}
// SearchApplications searches applications by text
func (s *PostgresStore) SearchApplications(ctx context.Context, tenantID uuid.UUID, query string) ([]FundingApplication, error) {
searchQuery := `
SELECT id FROM funding_applications
WHERE tenant_id = $1
AND status != 'ARCHIVED'
AND (
title ILIKE $2
OR application_number ILIKE $2
OR school_profile::text ILIKE $2
)
ORDER BY updated_at DESC
LIMIT 50
`
rows, err := s.pool.Query(ctx, searchQuery, tenantID, "%"+query+"%")
if err != nil {
return nil, fmt.Errorf("failed to search applications: %w", err)
}
defer rows.Close()
var apps []FundingApplication
for rows.Next() {
var id uuid.UUID
if err := rows.Scan(&id); err != nil {
continue
}
app, err := s.GetApplication(ctx, id)
if err == nil {
apps = append(apps, *app)
}
}
return apps, nil
}
// SaveWizardStep saves data for a wizard step
func (s *PostgresStore) SaveWizardStep(ctx context.Context, appID uuid.UUID, step int, data map[string]interface{}) error {
// Get current wizard data
app, err := s.GetApplication(ctx, appID)
if err != nil {
return err
}
// Initialize wizard data if nil
if app.WizardData == nil {
app.WizardData = make(map[string]interface{})
}
// Merge step data
stepKey := fmt.Sprintf("step_%d", step)
app.WizardData[stepKey] = data
app.CurrentStep = step
// Update application
return s.UpdateApplication(ctx, app)
}
// GetWizardProgress returns the wizard progress
func (s *PostgresStore) GetWizardProgress(ctx context.Context, appID uuid.UUID) (*WizardProgress, error) {
app, err := s.GetApplication(ctx, appID)
if err != nil {
return nil, err
}
progress := &WizardProgress{
CurrentStep: app.CurrentStep,
TotalSteps: app.TotalSteps,
CompletedSteps: []int{},
FormData: app.WizardData,
LastSavedAt: app.UpdatedAt,
}
// Determine completed steps from wizard data
for i := 1; i <= app.TotalSteps; i++ {
stepKey := fmt.Sprintf("step_%d", i)
if _, ok := app.WizardData[stepKey]; ok {
progress.CompletedSteps = append(progress.CompletedSteps, i)
}
}
return progress, nil
}
// AddAttachment adds an attachment to an application
func (s *PostgresStore) AddAttachment(ctx context.Context, appID uuid.UUID, attachment *Attachment) error {
attachment.ID = uuid.New()
attachment.UploadedAt = time.Now()
query := `
INSERT INTO funding_attachments (
id, application_id, file_name, file_type, file_size,
category, description, storage_path, uploaded_at, uploaded_by
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
`
_, err := s.pool.Exec(ctx, query,
attachment.ID, appID, attachment.FileName, attachment.FileType, attachment.FileSize,
attachment.Category, attachment.Description, attachment.StoragePath,
attachment.UploadedAt, attachment.UploadedBy,
)
return err
}
// GetAttachments returns all attachments for an application
func (s *PostgresStore) GetAttachments(ctx context.Context, appID uuid.UUID) ([]Attachment, error) {
query := `
SELECT id, file_name, file_type, file_size, category, description, storage_path, uploaded_at, uploaded_by
FROM funding_attachments
WHERE application_id = $1
ORDER BY uploaded_at DESC
`
rows, err := s.pool.Query(ctx, query, appID)
if err != nil {
return nil, err
}
defer rows.Close()
var attachments []Attachment
for rows.Next() {
var a Attachment
err := rows.Scan(&a.ID, &a.FileName, &a.FileType, &a.FileSize, &a.Category, &a.Description, &a.StoragePath, &a.UploadedAt, &a.UploadedBy)
if err != nil {
continue
}
attachments = append(attachments, a)
}
return attachments, nil
}
// DeleteAttachment deletes an attachment
func (s *PostgresStore) DeleteAttachment(ctx context.Context, attachmentID uuid.UUID) error {
query := `DELETE FROM funding_attachments WHERE id = $1`
_, err := s.pool.Exec(ctx, query, attachmentID)
return err
}
// AddHistoryEntry adds an audit trail entry
func (s *PostgresStore) AddHistoryEntry(ctx context.Context, entry *ApplicationHistoryEntry) error {
entry.ID = uuid.New()
entry.PerformedAt = time.Now().Format(time.RFC3339)
oldValuesJSON, _ := json.Marshal(entry.OldValues)
newValuesJSON, _ := json.Marshal(entry.NewValues)
changedFieldsJSON, _ := json.Marshal(entry.ChangedFields)
query := `
INSERT INTO funding_application_history (
id, application_id, action, changed_fields, old_values, new_values,
performed_by, performed_at, notes
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
`
_, err := s.pool.Exec(ctx, query,
entry.ID, entry.ApplicationID, entry.Action, changedFieldsJSON, oldValuesJSON, newValuesJSON,
entry.PerformedBy, entry.PerformedAt, entry.Notes,
)
return err
}
// GetHistory returns the audit trail for an application
func (s *PostgresStore) GetHistory(ctx context.Context, appID uuid.UUID) ([]ApplicationHistoryEntry, error) {
query := `
SELECT id, application_id, action, changed_fields, old_values, new_values, performed_by, performed_at, notes
FROM funding_application_history
WHERE application_id = $1
ORDER BY performed_at DESC
`
rows, err := s.pool.Query(ctx, query, appID)
if err != nil {
return nil, err
}
defer rows.Close()
var history []ApplicationHistoryEntry
for rows.Next() {
var entry ApplicationHistoryEntry
var changedFieldsJSON, oldValuesJSON, newValuesJSON []byte
err := rows.Scan(
&entry.ID, &entry.ApplicationID, &entry.Action, &changedFieldsJSON, &oldValuesJSON, &newValuesJSON,
&entry.PerformedBy, &entry.PerformedAt, &entry.Notes,
)
if err != nil {
continue
}
json.Unmarshal(changedFieldsJSON, &entry.ChangedFields)
json.Unmarshal(oldValuesJSON, &entry.OldValues)
json.Unmarshal(newValuesJSON, &entry.NewValues)
history = append(history, entry)
}
return history, nil
}
// GetStatistics returns funding statistics
func (s *PostgresStore) GetStatistics(ctx context.Context, tenantID uuid.UUID) (*FundingStatistics, error) {
stats := &FundingStatistics{
ByProgram: make(map[FundingProgram]int),
ByState: make(map[FederalState]int),
}
// Total and by status
query := `
SELECT
COUNT(*) as total,
COUNT(*) FILTER (WHERE status = 'DRAFT') as draft,
COUNT(*) FILTER (WHERE status = 'SUBMITTED') as submitted,
COUNT(*) FILTER (WHERE status = 'APPROVED') as approved,
COUNT(*) FILTER (WHERE status = 'REJECTED') as rejected,
COALESCE(SUM(requested_amount), 0) as total_requested,
COALESCE(SUM(COALESCE(approved_amount, 0)), 0) as total_approved
FROM funding_applications
WHERE tenant_id = $1 AND status != 'ARCHIVED'
`
err := s.pool.QueryRow(ctx, query, tenantID).Scan(
&stats.TotalApplications, &stats.DraftCount, &stats.SubmittedCount,
&stats.ApprovedCount, &stats.RejectedCount,
&stats.TotalRequested, &stats.TotalApproved,
)
if err != nil {
return nil, err
}
// By program
programQuery := `
SELECT funding_program, COUNT(*)
FROM funding_applications
WHERE tenant_id = $1 AND status != 'ARCHIVED'
GROUP BY funding_program
`
rows, _ := s.pool.Query(ctx, programQuery, tenantID)
for rows.Next() {
var program FundingProgram
var count int
rows.Scan(&program, &count)
stats.ByProgram[program] = count
}
rows.Close()
return stats, nil
}
// SaveExportBundle saves an export bundle record
func (s *PostgresStore) SaveExportBundle(ctx context.Context, bundle *ExportBundle) error {
bundle.ID = uuid.New()
bundle.GeneratedAt = time.Now()
bundle.ExpiresAt = time.Now().Add(24 * time.Hour) // 24h expiry
documentsJSON, _ := json.Marshal(bundle.Documents)
query := `
INSERT INTO funding_export_bundles (
id, application_id, documents, generated_at, download_url, expires_at
) VALUES ($1, $2, $3, $4, $5, $6)
`
_, err := s.pool.Exec(ctx, query,
bundle.ID, bundle.ApplicationID, documentsJSON,
bundle.GeneratedAt, bundle.DownloadURL, bundle.ExpiresAt,
)
return err
}
// GetExportBundle retrieves an export bundle
func (s *PostgresStore) GetExportBundle(ctx context.Context, bundleID uuid.UUID) (*ExportBundle, error) {
query := `
SELECT id, application_id, documents, generated_at, download_url, expires_at
FROM funding_export_bundles
WHERE id = $1 AND expires_at > NOW()
`
var bundle ExportBundle
var documentsJSON []byte
err := s.pool.QueryRow(ctx, query, bundleID).Scan(
&bundle.ID, &bundle.ApplicationID, &documentsJSON,
&bundle.GeneratedAt, &bundle.DownloadURL, &bundle.ExpiresAt,
)
if err != nil {
return nil, err
}
json.Unmarshal(documentsJSON, &bundle.Documents)
return &bundle, nil
}
// generateApplicationNumber creates a unique application number
func (s *PostgresStore) generateApplicationNumber(program FundingProgram, school *SchoolProfile) string {
year := time.Now().Year()
state := "XX"
if school != nil {
state = string(school.FederalState)
}
prefix := "FA"
switch program {
case FundingProgramDigitalPakt1:
prefix = "DP1"
case FundingProgramDigitalPakt2:
prefix = "DP2"
case FundingProgramLandesfoerderung:
prefix = "LF"
}
// Get sequence number
var seq int
s.pool.QueryRow(context.Background(),
`SELECT COALESCE(MAX(CAST(SUBSTRING(application_number FROM '\d{5}$') AS INTEGER)), 0) + 1
FROM funding_applications WHERE application_number LIKE $1`,
fmt.Sprintf("%s-%s-%d-%%", prefix, state, year),
).Scan(&seq)
return fmt.Sprintf("%s-%s-%d-%05d", prefix, state, year, seq)
}

View File

@@ -1,81 +0,0 @@
package funding
import (
"context"
"github.com/google/uuid"
)
// Store defines the interface for funding application persistence
type Store interface {
// Application CRUD
CreateApplication(ctx context.Context, app *FundingApplication) error
GetApplication(ctx context.Context, id uuid.UUID) (*FundingApplication, error)
GetApplicationByNumber(ctx context.Context, number string) (*FundingApplication, error)
UpdateApplication(ctx context.Context, app *FundingApplication) error
DeleteApplication(ctx context.Context, id uuid.UUID) error
// List & Search
ListApplications(ctx context.Context, tenantID uuid.UUID, filter ApplicationFilter) (*ApplicationListResponse, error)
SearchApplications(ctx context.Context, tenantID uuid.UUID, query string) ([]FundingApplication, error)
// Wizard Data
SaveWizardStep(ctx context.Context, appID uuid.UUID, step int, data map[string]interface{}) error
GetWizardProgress(ctx context.Context, appID uuid.UUID) (*WizardProgress, error)
// Attachments
AddAttachment(ctx context.Context, appID uuid.UUID, attachment *Attachment) error
GetAttachments(ctx context.Context, appID uuid.UUID) ([]Attachment, error)
DeleteAttachment(ctx context.Context, attachmentID uuid.UUID) error
// Application History (Audit Trail)
AddHistoryEntry(ctx context.Context, entry *ApplicationHistoryEntry) error
GetHistory(ctx context.Context, appID uuid.UUID) ([]ApplicationHistoryEntry, error)
// Statistics
GetStatistics(ctx context.Context, tenantID uuid.UUID) (*FundingStatistics, error)
// Export Tracking
SaveExportBundle(ctx context.Context, bundle *ExportBundle) error
GetExportBundle(ctx context.Context, bundleID uuid.UUID) (*ExportBundle, error)
}
// ApplicationFilter for filtering list queries
type ApplicationFilter struct {
Status *ApplicationStatus `json:"status,omitempty"`
FundingProgram *FundingProgram `json:"funding_program,omitempty"`
FederalState *FederalState `json:"federal_state,omitempty"`
CreatedAfter *string `json:"created_after,omitempty"`
CreatedBefore *string `json:"created_before,omitempty"`
Page int `json:"page"`
PageSize int `json:"page_size"`
SortBy string `json:"sort_by,omitempty"`
SortOrder string `json:"sort_order,omitempty"` // asc, desc
}
// ApplicationHistoryEntry for audit trail
type ApplicationHistoryEntry struct {
ID uuid.UUID `json:"id"`
ApplicationID uuid.UUID `json:"application_id"`
Action string `json:"action"` // created, updated, submitted, approved, etc.
ChangedFields []string `json:"changed_fields,omitempty"`
OldValues map[string]interface{} `json:"old_values,omitempty"`
NewValues map[string]interface{} `json:"new_values,omitempty"`
PerformedBy uuid.UUID `json:"performed_by"`
PerformedAt string `json:"performed_at"`
Notes string `json:"notes,omitempty"`
}
// FundingStatistics for dashboard
type FundingStatistics struct {
TotalApplications int `json:"total_applications"`
DraftCount int `json:"draft_count"`
SubmittedCount int `json:"submitted_count"`
ApprovedCount int `json:"approved_count"`
RejectedCount int `json:"rejected_count"`
TotalRequested float64 `json:"total_requested"`
TotalApproved float64 `json:"total_approved"`
AverageProcessDays float64 `json:"average_process_days"`
ByProgram map[FundingProgram]int `json:"by_program"`
ByState map[FederalState]int `json:"by_state"`
}

View File

@@ -1,371 +0,0 @@
package gci
import (
"fmt"
"math"
"time"
)
// Engine calculates the GCI score
type Engine struct{}
// NewEngine creates a new GCI calculation engine
func NewEngine() *Engine {
return &Engine{}
}
// Calculate computes the full GCI result for a tenant
func (e *Engine) Calculate(tenantID string, profileID string) *GCIResult {
now := time.Now()
profile := GetProfile(profileID)
auditTrail := []AuditEntry{}
// Step 1: Get module data (mock for now)
modules := MockModuleData(tenantID)
certDates := MockCertificateData()
// Step 2: Calculate Level 1 - Module Scores with validity
for i := range modules {
m := &modules[i]
if m.Assigned > 0 {
m.RawScore = float64(m.Completed) / float64(m.Assigned) * 100.0
}
// Apply validity factor
if validUntil, ok := certDates[m.ModuleID]; ok {
m.ValidityFactor = CalculateValidityFactor(validUntil, now)
} else {
m.ValidityFactor = 1.0 // No certificate tracking = assume valid
}
m.FinalScore = m.RawScore * m.ValidityFactor
if m.ValidityFactor < 1.0 {
auditTrail = append(auditTrail, AuditEntry{
Timestamp: now,
Factor: "validity_decay",
Description: fmt.Sprintf("Modul '%s': Gueltigkeitsfaktor %.2f (Zertifikat laeuft ab/abgelaufen)", m.ModuleName, m.ValidityFactor),
Value: m.ValidityFactor,
Impact: "negative",
})
}
}
// Step 3: Calculate Level 2 - Risk-Weighted Scores per area
areaModules := map[string][]ModuleScore{
"dsgvo": {},
"nis2": {},
"iso27001": {},
"ai_act": {},
}
for _, m := range modules {
if _, ok := areaModules[m.Category]; ok {
areaModules[m.Category] = append(areaModules[m.Category], m)
}
}
level2Areas := []RiskWeightedScore{}
areaNames := map[string]string{
"dsgvo": "DSGVO",
"nis2": "NIS2",
"iso27001": "ISO 27001",
"ai_act": "EU AI Act",
}
for areaID, mods := range areaModules {
rws := RiskWeightedScore{
AreaID: areaID,
AreaName: areaNames[areaID],
Modules: mods,
}
for _, m := range mods {
rws.WeightedSum += m.FinalScore * m.RiskWeight
rws.TotalWeight += m.RiskWeight
}
if rws.TotalWeight > 0 {
rws.AreaScore = rws.WeightedSum / rws.TotalWeight
}
level2Areas = append(level2Areas, rws)
}
// Step 4: Calculate Level 3 - Regulation Area Scores
areaScores := []RegulationAreaScore{}
for _, rws := range level2Areas {
weight := profile.Weights[rws.AreaID]
completedCount := 0
for _, m := range rws.Modules {
if m.Completed >= m.Assigned && m.Assigned > 0 {
completedCount++
}
}
ras := RegulationAreaScore{
RegulationID: rws.AreaID,
RegulationName: rws.AreaName,
Score: math.Round(rws.AreaScore*100) / 100,
Weight: weight,
WeightedScore: rws.AreaScore * weight,
ModuleCount: len(rws.Modules),
CompletedCount: completedCount,
}
areaScores = append(areaScores, ras)
auditTrail = append(auditTrail, AuditEntry{
Timestamp: now,
Factor: "area_score",
Description: fmt.Sprintf("Bereich '%s': Score %.1f, Gewicht %.0f%%", rws.AreaName, rws.AreaScore, weight*100),
Value: rws.AreaScore,
Impact: "neutral",
})
}
// Step 5: Calculate raw GCI
rawGCI := 0.0
totalWeight := 0.0
for _, ras := range areaScores {
rawGCI += ras.WeightedScore
totalWeight += ras.Weight
}
if totalWeight > 0 {
rawGCI = rawGCI / totalWeight
}
// Step 6: Apply Criticality Multiplier
criticalityMult := calculateCriticalityMultiplier(modules)
auditTrail = append(auditTrail, AuditEntry{
Timestamp: now,
Factor: "criticality_multiplier",
Description: fmt.Sprintf("Kritikalitaetsmultiplikator: %.3f", criticalityMult),
Value: criticalityMult,
Impact: func() string {
if criticalityMult < 1.0 {
return "negative"
}
return "neutral"
}(),
})
// Step 7: Apply Incident Adjustment
openInc, critInc := MockIncidentData()
incidentAdj := calculateIncidentAdjustment(openInc, critInc)
auditTrail = append(auditTrail, AuditEntry{
Timestamp: now,
Factor: "incident_adjustment",
Description: fmt.Sprintf("Vorfallsanpassung: %.3f (%d offen, %d kritisch)", incidentAdj, openInc, critInc),
Value: incidentAdj,
Impact: "negative",
})
// Step 8: Final GCI
finalGCI := rawGCI * criticalityMult * incidentAdj
finalGCI = math.Max(0, math.Min(100, math.Round(finalGCI*10)/10))
// Step 9: Determine Maturity Level
maturity := determineMaturityLevel(finalGCI)
auditTrail = append(auditTrail, AuditEntry{
Timestamp: now,
Factor: "final_gci",
Description: fmt.Sprintf("GCI-Endergebnis: %.1f → Reifegrad: %s", finalGCI, MaturityLabels[maturity]),
Value: finalGCI,
Impact: "neutral",
})
return &GCIResult{
TenantID: tenantID,
GCIScore: finalGCI,
MaturityLevel: maturity,
MaturityLabel: MaturityLabels[maturity],
CalculatedAt: now,
Profile: profileID,
AreaScores: areaScores,
CriticalityMult: criticalityMult,
IncidentAdj: incidentAdj,
AuditTrail: auditTrail,
}
}
// CalculateBreakdown returns the full 4-level breakdown
func (e *Engine) CalculateBreakdown(tenantID string, profileID string) *GCIBreakdown {
result := e.Calculate(tenantID, profileID)
modules := MockModuleData(tenantID)
certDates := MockCertificateData()
now := time.Now()
// Recalculate module scores for the breakdown
for i := range modules {
m := &modules[i]
if m.Assigned > 0 {
m.RawScore = float64(m.Completed) / float64(m.Assigned) * 100.0
}
if validUntil, ok := certDates[m.ModuleID]; ok {
m.ValidityFactor = CalculateValidityFactor(validUntil, now)
} else {
m.ValidityFactor = 1.0
}
m.FinalScore = m.RawScore * m.ValidityFactor
}
// Build Level 2 areas
areaModules := map[string][]ModuleScore{}
for _, m := range modules {
areaModules[m.Category] = append(areaModules[m.Category], m)
}
areaNames := map[string]string{"dsgvo": "DSGVO", "nis2": "NIS2", "iso27001": "ISO 27001", "ai_act": "EU AI Act"}
level2 := []RiskWeightedScore{}
for areaID, mods := range areaModules {
rws := RiskWeightedScore{AreaID: areaID, AreaName: areaNames[areaID], Modules: mods}
for _, m := range mods {
rws.WeightedSum += m.FinalScore * m.RiskWeight
rws.TotalWeight += m.RiskWeight
}
if rws.TotalWeight > 0 {
rws.AreaScore = rws.WeightedSum / rws.TotalWeight
}
level2 = append(level2, rws)
}
return &GCIBreakdown{
GCIResult: *result,
Level1Modules: modules,
Level2Areas: level2,
}
}
// GetHistory returns historical GCI snapshots
func (e *Engine) GetHistory(tenantID string) []GCISnapshot {
// Add current score to history
result := e.Calculate(tenantID, "default")
history := MockGCIHistory(tenantID)
current := GCISnapshot{
TenantID: tenantID,
Score: result.GCIScore,
MaturityLevel: result.MaturityLevel,
AreaScores: make(map[string]float64),
CalculatedAt: result.CalculatedAt,
}
for _, as := range result.AreaScores {
current.AreaScores[as.RegulationID] = as.Score
}
history = append(history, current)
return history
}
// GetMatrix returns the compliance matrix (roles x regulations)
func (e *Engine) GetMatrix(tenantID string) []ComplianceMatrixEntry {
modules := MockModuleData(tenantID)
roles := []struct {
ID string
Name string
}{
{"management", "Geschaeftsfuehrung"},
{"it_security", "IT-Sicherheit / CISO"},
{"data_protection", "Datenschutz / DSB"},
{"hr", "Personalwesen"},
{"general", "Allgemeine Mitarbeiter"},
}
// Define which modules are relevant per role
roleModules := map[string][]string{
"management": {"dsgvo-grundlagen", "nis2-management", "ai-governance", "iso-isms"},
"it_security": {"nis2-risikomanagement", "nis2-incident-response", "iso-zugangssteuerung", "iso-kryptografie", "ai-hochrisiko"},
"data_protection": {"dsgvo-grundlagen", "dsgvo-betroffenenrechte", "dsgvo-tom", "dsgvo-dsfa", "dsgvo-auftragsverarbeitung"},
"hr": {"dsgvo-grundlagen", "dsgvo-betroffenenrechte", "nis2-management"},
"general": {"dsgvo-grundlagen", "nis2-risikomanagement", "ai-risikokategorien", "ai-transparenz"},
}
moduleMap := map[string]ModuleScore{}
for _, m := range modules {
moduleMap[m.ModuleID] = m
}
entries := []ComplianceMatrixEntry{}
for _, role := range roles {
entry := ComplianceMatrixEntry{
Role: role.ID,
RoleName: role.Name,
Regulations: map[string]float64{},
}
regScores := map[string][]float64{}
requiredModuleIDs := roleModules[role.ID]
entry.RequiredModules = len(requiredModuleIDs)
for _, modID := range requiredModuleIDs {
if m, ok := moduleMap[modID]; ok {
score := 0.0
if m.Assigned > 0 {
score = float64(m.Completed) / float64(m.Assigned) * 100
}
regScores[m.Category] = append(regScores[m.Category], score)
if m.Completed >= m.Assigned && m.Assigned > 0 {
entry.CompletedModules++
}
}
}
totalScore := 0.0
count := 0
for reg, scores := range regScores {
sum := 0.0
for _, s := range scores {
sum += s
}
avg := sum / float64(len(scores))
entry.Regulations[reg] = math.Round(avg*10) / 10
totalScore += avg
count++
}
if count > 0 {
entry.OverallScore = math.Round(totalScore/float64(count)*10) / 10
}
entries = append(entries, entry)
}
return entries
}
// Helper functions
func calculateCriticalityMultiplier(modules []ModuleScore) float64 {
criticalModules := 0
criticalLow := 0
for _, m := range modules {
if m.RiskWeight >= 2.5 {
criticalModules++
if m.FinalScore < 50 {
criticalLow++
}
}
}
if criticalModules == 0 {
return 1.0
}
// Reduce score if critical modules have low completion
ratio := float64(criticalLow) / float64(criticalModules)
return 1.0 - (ratio * 0.15) // max 15% reduction
}
func calculateIncidentAdjustment(openIncidents, criticalIncidents int) float64 {
adj := 1.0
// Each open incident reduces by 1%
adj -= float64(openIncidents) * 0.01
// Each critical incident reduces by additional 3%
adj -= float64(criticalIncidents) * 0.03
return math.Max(0.8, adj) // minimum 80% (max 20% reduction)
}
func determineMaturityLevel(score float64) string {
switch {
case score >= 90:
return MaturityOptimized
case score >= 75:
return MaturityManaged
case score >= 60:
return MaturityDefined
case score >= 40:
return MaturityReactive
default:
return MaturityHighRisk
}
}

View File

@@ -1,188 +0,0 @@
package gci
import "math"
// ISOGapAnalysis represents the complete ISO 27001 gap analysis
type ISOGapAnalysis struct {
TenantID string `json:"tenant_id"`
TotalControls int `json:"total_controls"`
CoveredFull int `json:"covered_full"`
CoveredPartial int `json:"covered_partial"`
NotCovered int `json:"not_covered"`
CoveragePercent float64 `json:"coverage_percent"`
CategorySummaries []ISOCategorySummary `json:"category_summaries"`
ControlDetails []ISOControlDetail `json:"control_details"`
Gaps []ISOGap `json:"gaps"`
}
// ISOControlDetail shows coverage status for a single control
type ISOControlDetail struct {
Control ISOControl `json:"control"`
CoverageLevel string `json:"coverage_level"` // full, partial, none
CoveredBy []string `json:"covered_by"` // module IDs
Score float64 `json:"score"` // 0-100
}
// ISOGap represents an identified gap in ISO coverage
type ISOGap struct {
ControlID string `json:"control_id"`
ControlName string `json:"control_name"`
Category string `json:"category"`
Priority string `json:"priority"` // high, medium, low
Recommendation string `json:"recommendation"`
}
// CalculateISOGapAnalysis performs the ISO 27001 gap analysis
func CalculateISOGapAnalysis(tenantID string) *ISOGapAnalysis {
modules := MockModuleData(tenantID)
moduleMap := map[string]ModuleScore{}
for _, m := range modules {
moduleMap[m.ModuleID] = m
}
// Build reverse mapping: control -> modules covering it
controlCoverage := map[string][]string{}
controlCoverageLevel := map[string]string{}
for _, mapping := range DefaultISOModuleMappings {
for _, controlID := range mapping.ISOControls {
controlCoverage[controlID] = append(controlCoverage[controlID], mapping.ModuleID)
// Use the highest coverage level
existingLevel := controlCoverageLevel[controlID]
if mapping.CoverageLevel == "full" || existingLevel == "" {
controlCoverageLevel[controlID] = mapping.CoverageLevel
}
}
}
// Analyze each control
details := []ISOControlDetail{}
gaps := []ISOGap{}
coveredFull := 0
coveredPartial := 0
notCovered := 0
categoryCounts := map[string]*ISOCategorySummary{
"A.5": {CategoryID: "A.5", CategoryName: "Organisatorische Massnahmen"},
"A.6": {CategoryID: "A.6", CategoryName: "Personelle Massnahmen"},
"A.7": {CategoryID: "A.7", CategoryName: "Physische Massnahmen"},
"A.8": {CategoryID: "A.8", CategoryName: "Technologische Massnahmen"},
}
for _, control := range ISOControls {
coveredBy := controlCoverage[control.ID]
level := controlCoverageLevel[control.ID]
if len(coveredBy) == 0 {
level = "none"
}
// Calculate score based on module completion
score := 0.0
if len(coveredBy) > 0 {
scoreSum := 0.0
count := 0
for _, modID := range coveredBy {
if m, ok := moduleMap[modID]; ok && m.Assigned > 0 {
scoreSum += float64(m.Completed) / float64(m.Assigned) * 100
count++
}
}
if count > 0 {
score = scoreSum / float64(count)
}
// Adjust for coverage level
if level == "partial" {
score *= 0.7 // partial coverage reduces effective score
}
}
detail := ISOControlDetail{
Control: control,
CoverageLevel: level,
CoveredBy: coveredBy,
Score: math.Round(score*10) / 10,
}
details = append(details, detail)
// Count by category
cat := categoryCounts[control.CategoryID]
if cat != nil {
cat.TotalControls++
switch level {
case "full":
coveredFull++
cat.CoveredFull++
case "partial":
coveredPartial++
cat.CoveredPartial++
default:
notCovered++
cat.NotCovered++
// Generate gap recommendation
gap := ISOGap{
ControlID: control.ID,
ControlName: control.Name,
Category: control.Category,
Priority: determineGapPriority(control),
Recommendation: generateGapRecommendation(control),
}
gaps = append(gaps, gap)
}
}
}
totalControls := len(ISOControls)
coveragePercent := 0.0
if totalControls > 0 {
coveragePercent = math.Round(float64(coveredFull+coveredPartial)/float64(totalControls)*100*10) / 10
}
summaries := []ISOCategorySummary{}
for _, catID := range []string{"A.5", "A.6", "A.7", "A.8"} {
if cat, ok := categoryCounts[catID]; ok {
summaries = append(summaries, *cat)
}
}
return &ISOGapAnalysis{
TenantID: tenantID,
TotalControls: totalControls,
CoveredFull: coveredFull,
CoveredPartial: coveredPartial,
NotCovered: notCovered,
CoveragePercent: coveragePercent,
CategorySummaries: summaries,
ControlDetails: details,
Gaps: gaps,
}
}
func determineGapPriority(control ISOControl) string {
// High priority for access, incident, and data protection controls
highPriority := map[string]bool{
"A.5.15": true, "A.5.17": true, "A.5.24": true, "A.5.26": true,
"A.5.34": true, "A.8.2": true, "A.8.5": true, "A.8.7": true,
"A.8.10": true, "A.8.20": true,
}
if highPriority[control.ID] {
return "high"
}
// Medium for organizational and people controls
if control.CategoryID == "A.5" || control.CategoryID == "A.6" {
return "medium"
}
return "low"
}
func generateGapRecommendation(control ISOControl) string {
recommendations := map[string]string{
"organizational": "Erstellen Sie eine Richtlinie und weisen Sie Verantwortlichkeiten zu fuer: " + control.Name,
"people": "Implementieren Sie Schulungen und Prozesse fuer: " + control.Name,
"physical": "Definieren Sie physische Sicherheitsmassnahmen fuer: " + control.Name,
"technological": "Implementieren Sie technische Kontrollen fuer: " + control.Name,
}
if rec, ok := recommendations[control.Category]; ok {
return rec
}
return "Massnahmen implementieren fuer: " + control.Name
}

View File

@@ -1,207 +0,0 @@
package gci
// ISOControl represents an ISO 27001:2022 Annex A control
type ISOControl struct {
ID string `json:"id"` // e.g. "A.5.1"
Name string `json:"name"`
Category string `json:"category"` // organizational, people, physical, technological
CategoryID string `json:"category_id"` // A.5, A.6, A.7, A.8
Description string `json:"description"`
}
// ISOModuleMapping maps a course/module to ISO controls
type ISOModuleMapping struct {
ModuleID string `json:"module_id"`
ModuleName string `json:"module_name"`
ISOControls []string `json:"iso_controls"` // control IDs
CoverageLevel string `json:"coverage_level"` // full, partial, none
}
// ISO 27001:2022 Annex A controls (representative selection)
var ISOControls = []ISOControl{
// A.5 Organizational Controls (37 controls, showing key ones)
{ID: "A.5.1", Name: "Informationssicherheitsrichtlinien", Category: "organizational", CategoryID: "A.5", Description: "Informationssicherheitsleitlinie und themenspezifische Richtlinien"},
{ID: "A.5.2", Name: "Rollen und Verantwortlichkeiten", Category: "organizational", CategoryID: "A.5", Description: "Definition und Zuweisung von Informationssicherheitsrollen"},
{ID: "A.5.3", Name: "Aufgabentrennung", Category: "organizational", CategoryID: "A.5", Description: "Trennung von konfligierenden Aufgaben und Verantwortlichkeiten"},
{ID: "A.5.4", Name: "Managementverantwortung", Category: "organizational", CategoryID: "A.5", Description: "Fuehrungskraefte muessen Sicherheitsrichtlinien einhalten und durchsetzen"},
{ID: "A.5.5", Name: "Kontakt mit Behoerden", Category: "organizational", CategoryID: "A.5", Description: "Pflege von Kontakten zu relevanten Aufsichtsbehoerden"},
{ID: "A.5.6", Name: "Kontakt mit Interessengruppen", Category: "organizational", CategoryID: "A.5", Description: "Kontakt zu Fachgruppen und Sicherheitsforen"},
{ID: "A.5.7", Name: "Bedrohungsintelligenz", Category: "organizational", CategoryID: "A.5", Description: "Sammlung und Analyse von Bedrohungsinformationen"},
{ID: "A.5.8", Name: "Informationssicherheit im Projektmanagement", Category: "organizational", CategoryID: "A.5", Description: "Integration von Sicherheit in Projektmanagement"},
{ID: "A.5.9", Name: "Inventar der Informationswerte", Category: "organizational", CategoryID: "A.5", Description: "Inventarisierung und Verwaltung von Informationswerten"},
{ID: "A.5.10", Name: "Zuleassige Nutzung", Category: "organizational", CategoryID: "A.5", Description: "Regeln fuer die zuleassige Nutzung von Informationswerten"},
{ID: "A.5.11", Name: "Rueckgabe von Werten", Category: "organizational", CategoryID: "A.5", Description: "Rueckgabe von Werten bei Beendigung"},
{ID: "A.5.12", Name: "Klassifizierung von Informationen", Category: "organizational", CategoryID: "A.5", Description: "Klassifizierungsschema fuer Informationen"},
{ID: "A.5.13", Name: "Kennzeichnung von Informationen", Category: "organizational", CategoryID: "A.5", Description: "Kennzeichnung gemaess Klassifizierung"},
{ID: "A.5.14", Name: "Informationsuebertragung", Category: "organizational", CategoryID: "A.5", Description: "Regeln fuer sichere Informationsuebertragung"},
{ID: "A.5.15", Name: "Zugangssteuerung", Category: "organizational", CategoryID: "A.5", Description: "Zugangssteuerungsrichtlinie"},
{ID: "A.5.16", Name: "Identitaetsmanagement", Category: "organizational", CategoryID: "A.5", Description: "Verwaltung des Lebenszyklus von Identitaeten"},
{ID: "A.5.17", Name: "Authentifizierungsinformationen", Category: "organizational", CategoryID: "A.5", Description: "Verwaltung von Authentifizierungsinformationen"},
{ID: "A.5.18", Name: "Zugriffsrechte", Category: "organizational", CategoryID: "A.5", Description: "Vergabe, Pruefung und Entzug von Zugriffsrechten"},
{ID: "A.5.19", Name: "Informationssicherheit in Lieferantenbeziehungen", Category: "organizational", CategoryID: "A.5", Description: "Sicherheitsanforderungen an Lieferanten"},
{ID: "A.5.20", Name: "Informationssicherheit in Lieferantenvereinbarungen", Category: "organizational", CategoryID: "A.5", Description: "Sicherheitsklauseln in Vertraegen"},
{ID: "A.5.21", Name: "IKT-Lieferkette", Category: "organizational", CategoryID: "A.5", Description: "Management der IKT-Lieferkette"},
{ID: "A.5.22", Name: "Ueberwachung von Lieferantenservices", Category: "organizational", CategoryID: "A.5", Description: "Ueberwachung und Pruefung von Lieferantenservices"},
{ID: "A.5.23", Name: "Cloud-Sicherheit", Category: "organizational", CategoryID: "A.5", Description: "Informationssicherheit fuer Cloud-Dienste"},
{ID: "A.5.24", Name: "Vorfallsmanagement - Planung", Category: "organizational", CategoryID: "A.5", Description: "Planung und Vorbereitung des Vorfallsmanagements"},
{ID: "A.5.25", Name: "Vorfallsbeurteilung", Category: "organizational", CategoryID: "A.5", Description: "Beurteilung und Entscheidung ueber Sicherheitsereignisse"},
{ID: "A.5.26", Name: "Vorfallsreaktion", Category: "organizational", CategoryID: "A.5", Description: "Reaktion auf Sicherheitsvorfaelle"},
{ID: "A.5.27", Name: "Aus Vorfaellen lernen", Category: "organizational", CategoryID: "A.5", Description: "Lessons Learned aus Sicherheitsvorfaellen"},
{ID: "A.5.28", Name: "Beweissicherung", Category: "organizational", CategoryID: "A.5", Description: "Identifikation und Sicherung von Beweisen"},
{ID: "A.5.29", Name: "Informationssicherheit bei Stoerungen", Category: "organizational", CategoryID: "A.5", Description: "Sicherheit waehrend Stoerungen und Krisen"},
{ID: "A.5.30", Name: "IKT-Bereitschaft fuer Business Continuity", Category: "organizational", CategoryID: "A.5", Description: "IKT-Bereitschaft zur Unterstuetzung der Geschaeftskontinuitaet"},
{ID: "A.5.31", Name: "Rechtliche Anforderungen", Category: "organizational", CategoryID: "A.5", Description: "Einhaltung rechtlicher und vertraglicher Anforderungen"},
{ID: "A.5.32", Name: "Geistige Eigentumsrechte", Category: "organizational", CategoryID: "A.5", Description: "Schutz geistigen Eigentums"},
{ID: "A.5.33", Name: "Schutz von Aufzeichnungen", Category: "organizational", CategoryID: "A.5", Description: "Schutz von Aufzeichnungen vor Verlust und Manipulation"},
{ID: "A.5.34", Name: "Datenschutz und PII", Category: "organizational", CategoryID: "A.5", Description: "Datenschutz und Schutz personenbezogener Daten"},
{ID: "A.5.35", Name: "Unabhaengige Ueberpruefung", Category: "organizational", CategoryID: "A.5", Description: "Unabhaengige Ueberpruefung der Informationssicherheit"},
{ID: "A.5.36", Name: "Richtlinienkonformitaet", Category: "organizational", CategoryID: "A.5", Description: "Einhaltung von Richtlinien und Standards"},
{ID: "A.5.37", Name: "Dokumentierte Betriebsverfahren", Category: "organizational", CategoryID: "A.5", Description: "Dokumentation von Betriebsverfahren"},
// A.6 People Controls (8 controls)
{ID: "A.6.1", Name: "Ueberpruefen", Category: "people", CategoryID: "A.6", Description: "Hintergrundpruefungen vor der Einstellung"},
{ID: "A.6.2", Name: "Beschaeftigungsbedingungen", Category: "people", CategoryID: "A.6", Description: "Sicherheitsanforderungen in Arbeitsvertraegen"},
{ID: "A.6.3", Name: "Sensibilisierung und Schulung", Category: "people", CategoryID: "A.6", Description: "Awareness-Programme und Schulungen"},
{ID: "A.6.4", Name: "Disziplinarverfahren", Category: "people", CategoryID: "A.6", Description: "Formales Disziplinarverfahren"},
{ID: "A.6.5", Name: "Verantwortlichkeiten nach Beendigung", Category: "people", CategoryID: "A.6", Description: "Sicherheitspflichten nach Beendigung des Beschaeftigungsverhaeltnisses"},
{ID: "A.6.6", Name: "Vertraulichkeitsvereinbarungen", Category: "people", CategoryID: "A.6", Description: "Vertraulichkeits- und Geheimhaltungsvereinbarungen"},
{ID: "A.6.7", Name: "Remote-Arbeit", Category: "people", CategoryID: "A.6", Description: "Sicherheitsmassnahmen fuer Remote-Arbeit"},
{ID: "A.6.8", Name: "Meldung von Sicherheitsereignissen", Category: "people", CategoryID: "A.6", Description: "Mechanismen zur Meldung von Sicherheitsereignissen"},
// A.7 Physical Controls (14 controls, showing key ones)
{ID: "A.7.1", Name: "Physische Sicherheitsperimeter", Category: "physical", CategoryID: "A.7", Description: "Definition physischer Sicherheitszonen"},
{ID: "A.7.2", Name: "Physischer Zutritt", Category: "physical", CategoryID: "A.7", Description: "Zutrittskontrolle zu Sicherheitszonen"},
{ID: "A.7.3", Name: "Sicherung von Bueros und Raeumen", Category: "physical", CategoryID: "A.7", Description: "Physische Sicherheit fuer Bueros und Raeume"},
{ID: "A.7.4", Name: "Physische Sicherheitsueberwachung", Category: "physical", CategoryID: "A.7", Description: "Ueberwachung physischer Sicherheit"},
{ID: "A.7.5", Name: "Schutz vor Umweltgefahren", Category: "physical", CategoryID: "A.7", Description: "Schutz gegen natuerliche und menschgemachte Gefahren"},
{ID: "A.7.6", Name: "Arbeit in Sicherheitszonen", Category: "physical", CategoryID: "A.7", Description: "Regeln fuer das Arbeiten in Sicherheitszonen"},
{ID: "A.7.7", Name: "Aufgeraemter Schreibtisch", Category: "physical", CategoryID: "A.7", Description: "Clean-Desk und Clear-Screen Richtlinie"},
{ID: "A.7.8", Name: "Geraeteplatzierung", Category: "physical", CategoryID: "A.7", Description: "Platzierung und Schutz von Geraeten"},
{ID: "A.7.9", Name: "Sicherheit von Geraeten ausserhalb", Category: "physical", CategoryID: "A.7", Description: "Sicherheit von Geraeten ausserhalb der Raeumlichkeiten"},
{ID: "A.7.10", Name: "Speichermedien", Category: "physical", CategoryID: "A.7", Description: "Verwaltung von Speichermedien"},
{ID: "A.7.11", Name: "Versorgungseinrichtungen", Category: "physical", CategoryID: "A.7", Description: "Schutz vor Ausfaellen der Versorgungseinrichtungen"},
{ID: "A.7.12", Name: "Verkabelungssicherheit", Category: "physical", CategoryID: "A.7", Description: "Schutz der Verkabelung"},
{ID: "A.7.13", Name: "Instandhaltung von Geraeten", Category: "physical", CategoryID: "A.7", Description: "Korrekte Instandhaltung von Geraeten"},
{ID: "A.7.14", Name: "Sichere Entsorgung", Category: "physical", CategoryID: "A.7", Description: "Sichere Entsorgung oder Wiederverwendung"},
// A.8 Technological Controls (34 controls, showing key ones)
{ID: "A.8.1", Name: "Endbenutzergeraete", Category: "technological", CategoryID: "A.8", Description: "Sicherheit von Endbenutzergeraeten"},
{ID: "A.8.2", Name: "Privilegierte Zugriffsrechte", Category: "technological", CategoryID: "A.8", Description: "Verwaltung privilegierter Zugriffsrechte"},
{ID: "A.8.3", Name: "Informationszugangsbeschraenkung", Category: "technological", CategoryID: "A.8", Description: "Beschraenkung des Zugangs zu Informationen"},
{ID: "A.8.4", Name: "Zugang zu Quellcode", Category: "technological", CategoryID: "A.8", Description: "Sicherer Zugang zu Quellcode"},
{ID: "A.8.5", Name: "Sichere Authentifizierung", Category: "technological", CategoryID: "A.8", Description: "Sichere Authentifizierungstechnologien"},
{ID: "A.8.6", Name: "Kapazitaetsmanagement", Category: "technological", CategoryID: "A.8", Description: "Ueberwachung und Anpassung der Kapazitaet"},
{ID: "A.8.7", Name: "Schutz gegen Malware", Category: "technological", CategoryID: "A.8", Description: "Schutz vor Schadprogrammen"},
{ID: "A.8.8", Name: "Management technischer Schwachstellen", Category: "technological", CategoryID: "A.8", Description: "Identifikation und Behebung von Schwachstellen"},
{ID: "A.8.9", Name: "Konfigurationsmanagement", Category: "technological", CategoryID: "A.8", Description: "Sichere Konfiguration von Systemen"},
{ID: "A.8.10", Name: "Datensicherung", Category: "technological", CategoryID: "A.8", Description: "Erstellen und Testen von Datensicherungen"},
{ID: "A.8.11", Name: "Datenredundanz", Category: "technological", CategoryID: "A.8", Description: "Redundanz von Informationsverarbeitungseinrichtungen"},
{ID: "A.8.12", Name: "Protokollierung", Category: "technological", CategoryID: "A.8", Description: "Aufzeichnung und Ueberwachung von Aktivitaeten"},
{ID: "A.8.13", Name: "Ueberwachung von Aktivitaeten", Category: "technological", CategoryID: "A.8", Description: "Ueberwachung von Netzwerken und Systemen"},
{ID: "A.8.14", Name: "Zeitsynchronisation", Category: "technological", CategoryID: "A.8", Description: "Synchronisation von Uhren"},
{ID: "A.8.15", Name: "Nutzung privilegierter Hilfsprogramme", Category: "technological", CategoryID: "A.8", Description: "Einschraenkung privilegierter Hilfsprogramme"},
{ID: "A.8.16", Name: "Softwareinstallation", Category: "technological", CategoryID: "A.8", Description: "Kontrolle der Softwareinstallation"},
{ID: "A.8.17", Name: "Netzwerksicherheit", Category: "technological", CategoryID: "A.8", Description: "Sicherheit von Netzwerken"},
{ID: "A.8.18", Name: "Netzwerksegmentierung", Category: "technological", CategoryID: "A.8", Description: "Segmentierung von Netzwerken"},
{ID: "A.8.19", Name: "Webfilterung", Category: "technological", CategoryID: "A.8", Description: "Filterung des Webzugangs"},
{ID: "A.8.20", Name: "Kryptografie", Category: "technological", CategoryID: "A.8", Description: "Einsatz kryptografischer Massnahmen"},
{ID: "A.8.21", Name: "Sichere Entwicklung", Category: "technological", CategoryID: "A.8", Description: "Sichere Entwicklungslebenszyklus"},
{ID: "A.8.22", Name: "Sicherheitsanforderungen bei Applikationen", Category: "technological", CategoryID: "A.8", Description: "Sicherheitsanforderungen bei Anwendungen"},
{ID: "A.8.23", Name: "Sichere Systemarchitektur", Category: "technological", CategoryID: "A.8", Description: "Sicherheitsprinzipien in der Systemarchitektur"},
{ID: "A.8.24", Name: "Sicheres Programmieren", Category: "technological", CategoryID: "A.8", Description: "Sichere Programmierpraktiken"},
{ID: "A.8.25", Name: "Sicherheitstests", Category: "technological", CategoryID: "A.8", Description: "Sicherheitstests in der Entwicklung und Abnahme"},
{ID: "A.8.26", Name: "Auslagerung der Entwicklung", Category: "technological", CategoryID: "A.8", Description: "Ueberwachung ausgelagerter Entwicklung"},
{ID: "A.8.27", Name: "Trennung von Umgebungen", Category: "technological", CategoryID: "A.8", Description: "Trennung von Entwicklungs-, Test- und Produktionsumgebungen"},
{ID: "A.8.28", Name: "Aenderungsmanagement", Category: "technological", CategoryID: "A.8", Description: "Formales Aenderungsmanagement"},
{ID: "A.8.29", Name: "Sicherheitstests in der Abnahme", Category: "technological", CategoryID: "A.8", Description: "Durchfuehrung von Sicherheitstests vor Abnahme"},
{ID: "A.8.30", Name: "Datenloeschung", Category: "technological", CategoryID: "A.8", Description: "Sichere Datenloeschung"},
{ID: "A.8.31", Name: "Datenmaskierung", Category: "technological", CategoryID: "A.8", Description: "Techniken zur Datenmaskierung"},
{ID: "A.8.32", Name: "Verhinderung von Datenverlust", Category: "technological", CategoryID: "A.8", Description: "DLP-Massnahmen"},
{ID: "A.8.33", Name: "Testinformationen", Category: "technological", CategoryID: "A.8", Description: "Schutz von Testinformationen"},
{ID: "A.8.34", Name: "Audit-Informationssysteme", Category: "technological", CategoryID: "A.8", Description: "Schutz von Audit-Tools und -systemen"},
}
// Default mappings: which modules cover which ISO controls
var DefaultISOModuleMappings = []ISOModuleMapping{
{
ModuleID: "iso-isms", ModuleName: "ISMS Grundlagen",
ISOControls: []string{"A.5.1", "A.5.2", "A.5.3", "A.5.4", "A.5.35", "A.5.36"},
CoverageLevel: "full",
},
{
ModuleID: "iso-risikobewertung", ModuleName: "Risikobewertung",
ISOControls: []string{"A.5.7", "A.5.8", "A.5.9", "A.5.10", "A.5.12", "A.5.13"},
CoverageLevel: "full",
},
{
ModuleID: "iso-zugangssteuerung", ModuleName: "Zugangssteuerung",
ISOControls: []string{"A.5.15", "A.5.16", "A.5.17", "A.5.18", "A.8.2", "A.8.3", "A.8.5"},
CoverageLevel: "full",
},
{
ModuleID: "iso-kryptografie", ModuleName: "Kryptografie",
ISOControls: []string{"A.8.20", "A.8.21", "A.8.24"},
CoverageLevel: "partial",
},
{
ModuleID: "iso-physisch", ModuleName: "Physische Sicherheit",
ISOControls: []string{"A.7.1", "A.7.2", "A.7.3", "A.7.4", "A.7.5", "A.7.7", "A.7.8"},
CoverageLevel: "full",
},
{
ModuleID: "dsgvo-tom", ModuleName: "Technisch-Organisatorische Massnahmen",
ISOControls: []string{"A.5.34", "A.8.10", "A.8.12", "A.8.30", "A.8.31"},
CoverageLevel: "partial",
},
{
ModuleID: "nis2-incident-response", ModuleName: "NIS2 Incident Response",
ISOControls: []string{"A.5.24", "A.5.25", "A.5.26", "A.5.27", "A.5.28", "A.6.8"},
CoverageLevel: "full",
},
{
ModuleID: "nis2-supply-chain", ModuleName: "NIS2 Lieferkettensicherheit",
ISOControls: []string{"A.5.19", "A.5.20", "A.5.21", "A.5.22", "A.5.23"},
CoverageLevel: "full",
},
{
ModuleID: "nis2-risikomanagement", ModuleName: "NIS2 Risikomanagement",
ISOControls: []string{"A.5.29", "A.5.30", "A.8.6", "A.8.7", "A.8.8", "A.8.9"},
CoverageLevel: "partial",
},
{
ModuleID: "dsgvo-grundlagen", ModuleName: "DSGVO Grundlagen",
ISOControls: []string{"A.5.31", "A.5.34", "A.6.2", "A.6.3"},
CoverageLevel: "partial",
},
}
// GetISOControlByID returns a control by its ID
func GetISOControlByID(id string) (ISOControl, bool) {
for _, c := range ISOControls {
if c.ID == id {
return c, true
}
}
return ISOControl{}, false
}
// GetISOControlsByCategory returns all controls in a category
func GetISOControlsByCategory(categoryID string) []ISOControl {
var result []ISOControl
for _, c := range ISOControls {
if c.CategoryID == categoryID {
result = append(result, c)
}
}
return result
}
// ISOCategorySummary provides a summary per ISO category
type ISOCategorySummary struct {
CategoryID string `json:"category_id"`
CategoryName string `json:"category_name"`
TotalControls int `json:"total_controls"`
CoveredFull int `json:"covered_full"`
CoveredPartial int `json:"covered_partial"`
NotCovered int `json:"not_covered"`
}

View File

@@ -1,74 +0,0 @@
package gci
import "time"
// MockModuleData provides fallback data when academy store is empty
func MockModuleData(tenantID string) []ModuleScore {
return []ModuleScore{
// DSGVO modules
{ModuleID: "dsgvo-grundlagen", ModuleName: "DSGVO Grundlagen", Assigned: 25, Completed: 22, Category: "dsgvo", RiskWeight: 2.0},
{ModuleID: "dsgvo-betroffenenrechte", ModuleName: "Betroffenenrechte", Assigned: 25, Completed: 18, Category: "dsgvo", RiskWeight: 2.5},
{ModuleID: "dsgvo-tom", ModuleName: "Technisch-Organisatorische Massnahmen", Assigned: 20, Completed: 17, Category: "dsgvo", RiskWeight: 2.5},
{ModuleID: "dsgvo-dsfa", ModuleName: "Datenschutz-Folgenabschaetzung", Assigned: 15, Completed: 10, Category: "dsgvo", RiskWeight: 2.0},
{ModuleID: "dsgvo-auftragsverarbeitung", ModuleName: "Auftragsverarbeitung", Assigned: 20, Completed: 16, Category: "dsgvo", RiskWeight: 2.0},
// NIS2 modules
{ModuleID: "nis2-risikomanagement", ModuleName: "NIS2 Risikomanagement", Assigned: 15, Completed: 11, Category: "nis2", RiskWeight: 3.0},
{ModuleID: "nis2-incident-response", ModuleName: "NIS2 Incident Response", Assigned: 15, Completed: 9, Category: "nis2", RiskWeight: 3.0},
{ModuleID: "nis2-supply-chain", ModuleName: "NIS2 Lieferkettensicherheit", Assigned: 10, Completed: 6, Category: "nis2", RiskWeight: 2.0},
{ModuleID: "nis2-management", ModuleName: "NIS2 Geschaeftsleitungspflicht", Assigned: 10, Completed: 8, Category: "nis2", RiskWeight: 3.0},
// ISO 27001 modules
{ModuleID: "iso-isms", ModuleName: "ISMS Grundlagen", Assigned: 20, Completed: 16, Category: "iso27001", RiskWeight: 2.0},
{ModuleID: "iso-risikobewertung", ModuleName: "Risikobewertung", Assigned: 15, Completed: 12, Category: "iso27001", RiskWeight: 2.0},
{ModuleID: "iso-zugangssteuerung", ModuleName: "Zugangssteuerung", Assigned: 20, Completed: 18, Category: "iso27001", RiskWeight: 2.0},
{ModuleID: "iso-kryptografie", ModuleName: "Kryptografie", Assigned: 10, Completed: 7, Category: "iso27001", RiskWeight: 1.5},
{ModuleID: "iso-physisch", ModuleName: "Physische Sicherheit", Assigned: 10, Completed: 9, Category: "iso27001", RiskWeight: 1.0},
// AI Act modules
{ModuleID: "ai-risikokategorien", ModuleName: "KI-Risikokategorien", Assigned: 15, Completed: 12, Category: "ai_act", RiskWeight: 2.5},
{ModuleID: "ai-transparenz", ModuleName: "KI-Transparenzpflichten", Assigned: 15, Completed: 10, Category: "ai_act", RiskWeight: 2.0},
{ModuleID: "ai-hochrisiko", ModuleName: "Hochrisiko-KI-Systeme", Assigned: 10, Completed: 6, Category: "ai_act", RiskWeight: 2.5},
{ModuleID: "ai-governance", ModuleName: "KI-Governance", Assigned: 10, Completed: 7, Category: "ai_act", RiskWeight: 2.0},
}
}
// MockCertificateData provides mock certificate validity dates
func MockCertificateData() map[string]time.Time {
now := time.Now()
return map[string]time.Time{
"dsgvo-grundlagen": now.AddDate(0, 8, 0), // valid 8 months
"dsgvo-betroffenenrechte": now.AddDate(0, 3, 0), // expiring in 3 months
"dsgvo-tom": now.AddDate(0, 10, 0), // valid
"dsgvo-dsfa": now.AddDate(0, -1, 0), // expired 1 month ago
"dsgvo-auftragsverarbeitung": now.AddDate(0, 6, 0),
"nis2-risikomanagement": now.AddDate(0, 5, 0),
"nis2-incident-response": now.AddDate(0, 2, 0), // expiring soon
"nis2-supply-chain": now.AddDate(0, -2, 0), // expired 2 months
"nis2-management": now.AddDate(0, 9, 0),
"iso-isms": now.AddDate(1, 0, 0),
"iso-risikobewertung": now.AddDate(0, 4, 0),
"iso-zugangssteuerung": now.AddDate(0, 11, 0),
"iso-kryptografie": now.AddDate(0, 1, 0), // expiring in 1 month
"iso-physisch": now.AddDate(0, 7, 0),
"ai-risikokategorien": now.AddDate(0, 6, 0),
"ai-transparenz": now.AddDate(0, 3, 0),
"ai-hochrisiko": now.AddDate(0, -3, 0), // expired 3 months
"ai-governance": now.AddDate(0, 5, 0),
}
}
// MockIncidentData returns mock incident counts for adjustment
func MockIncidentData() (openIncidents int, criticalIncidents int) {
return 3, 1
}
// MockGCIHistory returns mock historical GCI snapshots
func MockGCIHistory(tenantID string) []GCISnapshot {
now := time.Now()
return []GCISnapshot{
{TenantID: tenantID, Score: 58.2, MaturityLevel: MaturityReactive, AreaScores: map[string]float64{"dsgvo": 62, "nis2": 48, "iso27001": 60, "ai_act": 55}, CalculatedAt: now.AddDate(0, -3, 0)},
{TenantID: tenantID, Score: 62.5, MaturityLevel: MaturityDefined, AreaScores: map[string]float64{"dsgvo": 65, "nis2": 55, "iso27001": 63, "ai_act": 58}, CalculatedAt: now.AddDate(0, -2, 0)},
{TenantID: tenantID, Score: 67.8, MaturityLevel: MaturityDefined, AreaScores: map[string]float64{"dsgvo": 70, "nis2": 60, "iso27001": 68, "ai_act": 62}, CalculatedAt: now.AddDate(0, -1, 0)},
}
}

View File

@@ -1,104 +0,0 @@
package gci
import "time"
// Level 1: Module Score
type ModuleScore struct {
ModuleID string `json:"module_id"`
ModuleName string `json:"module_name"`
Assigned int `json:"assigned"`
Completed int `json:"completed"`
RawScore float64 `json:"raw_score"` // completions/assigned
ValidityFactor float64 `json:"validity_factor"` // 0.0-1.0
FinalScore float64 `json:"final_score"` // RawScore * ValidityFactor
RiskWeight float64 `json:"risk_weight"` // module criticality weight
Category string `json:"category"` // dsgvo, nis2, iso27001, ai_act
}
// Level 2: Risk-weighted Module Score per regulation area
type RiskWeightedScore struct {
AreaID string `json:"area_id"`
AreaName string `json:"area_name"`
Modules []ModuleScore `json:"modules"`
WeightedSum float64 `json:"weighted_sum"`
TotalWeight float64 `json:"total_weight"`
AreaScore float64 `json:"area_score"` // WeightedSum / TotalWeight
}
// Level 3: Regulation Area Score
type RegulationAreaScore struct {
RegulationID string `json:"regulation_id"` // dsgvo, nis2, iso27001, ai_act
RegulationName string `json:"regulation_name"` // Display name
Score float64 `json:"score"` // 0-100
Weight float64 `json:"weight"` // regulation weight in GCI
WeightedScore float64 `json:"weighted_score"` // Score * Weight
ModuleCount int `json:"module_count"`
CompletedCount int `json:"completed_count"`
}
// Level 4: GCI Result
type GCIResult struct {
TenantID string `json:"tenant_id"`
GCIScore float64 `json:"gci_score"` // 0-100
MaturityLevel string `json:"maturity_level"` // Optimized, Managed, Defined, Reactive, HighRisk
MaturityLabel string `json:"maturity_label"` // German label
CalculatedAt time.Time `json:"calculated_at"`
Profile string `json:"profile"` // default, nis2_relevant, ki_nutzer
AreaScores []RegulationAreaScore `json:"area_scores"`
CriticalityMult float64 `json:"criticality_multiplier"`
IncidentAdj float64 `json:"incident_adjustment"`
AuditTrail []AuditEntry `json:"audit_trail"`
}
// GCI Breakdown with all 4 levels
type GCIBreakdown struct {
GCIResult
Level1Modules []ModuleScore `json:"level1_modules"`
Level2Areas []RiskWeightedScore `json:"level2_areas"`
}
// MaturityLevel constants
const (
MaturityOptimized = "OPTIMIZED"
MaturityManaged = "MANAGED"
MaturityDefined = "DEFINED"
MaturityReactive = "REACTIVE"
MaturityHighRisk = "HIGH_RISK"
)
// Maturity level labels (German)
var MaturityLabels = map[string]string{
MaturityOptimized: "Optimiert",
MaturityManaged: "Gesteuert",
MaturityDefined: "Definiert",
MaturityReactive: "Reaktiv",
MaturityHighRisk: "Hohes Risiko",
}
// AuditEntry for score transparency
type AuditEntry struct {
Timestamp time.Time `json:"timestamp"`
Factor string `json:"factor"`
Description string `json:"description"`
Value float64 `json:"value"`
Impact string `json:"impact"` // positive, negative, neutral
}
// ComplianceMatrixEntry maps roles to regulations
type ComplianceMatrixEntry struct {
Role string `json:"role"`
RoleName string `json:"role_name"`
Regulations map[string]float64 `json:"regulations"` // regulation_id -> score
OverallScore float64 `json:"overall_score"`
RequiredModules int `json:"required_modules"`
CompletedModules int `json:"completed_modules"`
}
// GCI History snapshot
type GCISnapshot struct {
TenantID string `json:"tenant_id"`
Score float64 `json:"score"`
MaturityLevel string `json:"maturity_level"`
AreaScores map[string]float64 `json:"area_scores"`
CalculatedAt time.Time `json:"calculated_at"`
}

View File

@@ -1,118 +0,0 @@
package gci
// NIS2Role defines a NIS2 role classification
type NIS2Role struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
MandatoryModules []string `json:"mandatory_modules"`
Priority int `json:"priority"` // 1=highest
}
// NIS2RoleAssignment represents a user's NIS2 role
type NIS2RoleAssignment struct {
TenantID string `json:"tenant_id"`
UserID string `json:"user_id"`
UserName string `json:"user_name"`
RoleID string `json:"role_id"`
RoleName string `json:"role_name"`
AssignedAt string `json:"assigned_at"`
}
// NIS2 role definitions
var NIS2Roles = map[string]NIS2Role{
"N1": {
ID: "N1",
Name: "Geschaeftsleitung",
Description: "Leitungsorgane mit persoenlicher Haftung gemaess NIS2 Art. 20",
Priority: 1,
MandatoryModules: []string{
"nis2-management",
"nis2-risikomanagement",
"dsgvo-grundlagen",
"iso-isms",
},
},
"N2": {
ID: "N2",
Name: "IT-Sicherheit / CISO",
Description: "Verantwortliche fuer IT-Sicherheit und Cybersecurity",
Priority: 2,
MandatoryModules: []string{
"nis2-risikomanagement",
"nis2-incident-response",
"nis2-supply-chain",
"iso-zugangssteuerung",
"iso-kryptografie",
},
},
"N3": {
ID: "N3",
Name: "Kritische Funktionen",
Description: "Mitarbeiter in kritischen Geschaeftsprozessen",
Priority: 3,
MandatoryModules: []string{
"nis2-risikomanagement",
"nis2-incident-response",
"dsgvo-tom",
"iso-zugangssteuerung",
},
},
"N4": {
ID: "N4",
Name: "Allgemeine Mitarbeiter",
Description: "Alle Mitarbeiter mit IT-Zugang",
Priority: 4,
MandatoryModules: []string{
"nis2-risikomanagement",
"dsgvo-grundlagen",
"iso-isms",
},
},
"N5": {
ID: "N5",
Name: "Incident Response Team",
Description: "Mitglieder des IRT/CSIRT gemaess NIS2 Art. 21",
Priority: 2,
MandatoryModules: []string{
"nis2-incident-response",
"nis2-risikomanagement",
"nis2-supply-chain",
"iso-zugangssteuerung",
"iso-kryptografie",
"iso-isms",
},
},
}
// GetNIS2Role returns a NIS2 role by ID
func GetNIS2Role(roleID string) (NIS2Role, bool) {
r, ok := NIS2Roles[roleID]
return r, ok
}
// ListNIS2Roles returns all NIS2 roles sorted by priority
func ListNIS2Roles() []NIS2Role {
roles := []NIS2Role{}
// Return in priority order
order := []string{"N1", "N2", "N5", "N3", "N4"}
for _, id := range order {
if r, ok := NIS2Roles[id]; ok {
roles = append(roles, r)
}
}
return roles
}
// MockNIS2RoleAssignments returns mock role assignments
func MockNIS2RoleAssignments(tenantID string) []NIS2RoleAssignment {
return []NIS2RoleAssignment{
{TenantID: tenantID, UserID: "user-001", UserName: "Dr. Schmidt", RoleID: "N1", RoleName: "Geschaeftsleitung", AssignedAt: "2025-06-01"},
{TenantID: tenantID, UserID: "user-002", UserName: "M. Weber", RoleID: "N2", RoleName: "IT-Sicherheit / CISO", AssignedAt: "2025-06-01"},
{TenantID: tenantID, UserID: "user-003", UserName: "S. Mueller", RoleID: "N5", RoleName: "Incident Response Team", AssignedAt: "2025-07-15"},
{TenantID: tenantID, UserID: "user-004", UserName: "K. Fischer", RoleID: "N3", RoleName: "Kritische Funktionen", AssignedAt: "2025-08-01"},
{TenantID: tenantID, UserID: "user-005", UserName: "L. Braun", RoleID: "N3", RoleName: "Kritische Funktionen", AssignedAt: "2025-08-01"},
{TenantID: tenantID, UserID: "user-006", UserName: "A. Schwarz", RoleID: "N4", RoleName: "Allgemeine Mitarbeiter", AssignedAt: "2025-09-01"},
{TenantID: tenantID, UserID: "user-007", UserName: "T. Wagner", RoleID: "N4", RoleName: "Allgemeine Mitarbeiter", AssignedAt: "2025-09-01"},
}
}

View File

@@ -1,147 +0,0 @@
package gci
import "math"
// NIS2Score represents the NIS2-specific compliance score
type NIS2Score struct {
TenantID string `json:"tenant_id"`
OverallScore float64 `json:"overall_score"`
MaturityLevel string `json:"maturity_level"`
MaturityLabel string `json:"maturity_label"`
AreaScores []NIS2AreaScore `json:"area_scores"`
RoleCompliance []NIS2RoleScore `json:"role_compliance"`
}
// NIS2AreaScore represents a NIS2 compliance area
type NIS2AreaScore struct {
AreaID string `json:"area_id"`
AreaName string `json:"area_name"`
Score float64 `json:"score"`
Weight float64 `json:"weight"`
ModuleIDs []string `json:"module_ids"`
}
// NIS2RoleScore represents completion per NIS2 role
type NIS2RoleScore struct {
RoleID string `json:"role_id"`
RoleName string `json:"role_name"`
AssignedUsers int `json:"assigned_users"`
CompletionRate float64 `json:"completion_rate"`
MandatoryTotal int `json:"mandatory_total"`
MandatoryDone int `json:"mandatory_done"`
}
// NIS2 scoring areas with weights
// NIS2Score = 25% Management + 25% Incident + 30% IT Security + 20% Supply Chain
var nis2Areas = []struct {
ID string
Name string
Weight float64
ModuleIDs []string
}{
{
ID: "management", Name: "Management & Governance", Weight: 0.25,
ModuleIDs: []string{"nis2-management", "dsgvo-grundlagen", "iso-isms"},
},
{
ID: "incident", Name: "Vorfallsbehandlung", Weight: 0.25,
ModuleIDs: []string{"nis2-incident-response"},
},
{
ID: "it_security", Name: "IT-Sicherheit", Weight: 0.30,
ModuleIDs: []string{"nis2-risikomanagement", "iso-zugangssteuerung", "iso-kryptografie"},
},
{
ID: "supply_chain", Name: "Lieferkettensicherheit", Weight: 0.20,
ModuleIDs: []string{"nis2-supply-chain", "dsgvo-auftragsverarbeitung"},
},
}
// CalculateNIS2Score computes the NIS2-specific compliance score
func CalculateNIS2Score(tenantID string) *NIS2Score {
modules := MockModuleData(tenantID)
moduleMap := map[string]ModuleScore{}
for _, m := range modules {
moduleMap[m.ModuleID] = m
}
areaScores := []NIS2AreaScore{}
totalWeighted := 0.0
for _, area := range nis2Areas {
areaScore := NIS2AreaScore{
AreaID: area.ID,
AreaName: area.Name,
Weight: area.Weight,
ModuleIDs: area.ModuleIDs,
}
scoreSum := 0.0
count := 0
for _, modID := range area.ModuleIDs {
if m, ok := moduleMap[modID]; ok {
if m.Assigned > 0 {
scoreSum += float64(m.Completed) / float64(m.Assigned) * 100
}
count++
}
}
if count > 0 {
areaScore.Score = math.Round(scoreSum/float64(count)*10) / 10
}
totalWeighted += areaScore.Score * areaScore.Weight
areaScores = append(areaScores, areaScore)
}
overallScore := math.Round(totalWeighted*10) / 10
// Calculate role compliance
roleAssignments := MockNIS2RoleAssignments(tenantID)
roleScores := calculateNIS2RoleScores(roleAssignments, moduleMap)
return &NIS2Score{
TenantID: tenantID,
OverallScore: overallScore,
MaturityLevel: determineMaturityLevel(overallScore),
MaturityLabel: MaturityLabels[determineMaturityLevel(overallScore)],
AreaScores: areaScores,
RoleCompliance: roleScores,
}
}
func calculateNIS2RoleScores(assignments []NIS2RoleAssignment, moduleMap map[string]ModuleScore) []NIS2RoleScore {
// Count users per role
roleCounts := map[string]int{}
for _, a := range assignments {
roleCounts[a.RoleID]++
}
scores := []NIS2RoleScore{}
for roleID, role := range NIS2Roles {
rs := NIS2RoleScore{
RoleID: roleID,
RoleName: role.Name,
AssignedUsers: roleCounts[roleID],
MandatoryTotal: len(role.MandatoryModules),
}
completionSum := 0.0
for _, modID := range role.MandatoryModules {
if m, ok := moduleMap[modID]; ok {
if m.Assigned > 0 {
rate := float64(m.Completed) / float64(m.Assigned)
completionSum += rate
if rate >= 0.8 { // 80%+ = considered done
rs.MandatoryDone++
}
}
}
}
if rs.MandatoryTotal > 0 {
rs.CompletionRate = math.Round(completionSum/float64(rs.MandatoryTotal)*100*10) / 10
}
scores = append(scores, rs)
}
return scores
}

View File

@@ -1,59 +0,0 @@
package gci
import (
"math"
"time"
)
const (
// GracePeriodDays is the number of days after expiry during which
// the certificate still contributes (with declining factor)
GracePeriodDays = 180
// DecayStartDays is how many days before expiry the linear decay begins
DecayStartDays = 180
)
// CalculateValidityFactor computes the validity factor for a certificate
// based on its expiry date.
//
// Rules:
// - Certificate not yet expiring (>6 months): factor = 1.0
// - Certificate expiring within 6 months: linear decay from 1.0 to 0.5
// - Certificate expired: linear decay from 0.5 to 0.0 over grace period
// - Certificate expired beyond grace period: factor = 0.0
func CalculateValidityFactor(validUntil time.Time, now time.Time) float64 {
daysUntilExpiry := validUntil.Sub(now).Hours() / 24.0
if daysUntilExpiry > float64(DecayStartDays) {
// Not yet in decay window
return 1.0
}
if daysUntilExpiry > 0 {
// In pre-expiry decay window: linear from 1.0 to 0.5
fraction := daysUntilExpiry / float64(DecayStartDays)
return 0.5 + 0.5*fraction
}
// Certificate is expired
daysExpired := -daysUntilExpiry
if daysExpired > float64(GracePeriodDays) {
return 0.0
}
// In grace period: linear from 0.5 to 0.0
fraction := 1.0 - (daysExpired / float64(GracePeriodDays))
return math.Max(0, 0.5*fraction)
}
// IsExpired returns true if the certificate is past its validity date
func IsExpired(validUntil time.Time, now time.Time) bool {
return now.After(validUntil)
}
// IsExpiringSoon returns true if the certificate expires within the decay window
func IsExpiringSoon(validUntil time.Time, now time.Time) bool {
daysUntil := validUntil.Sub(now).Hours() / 24.0
return daysUntil > 0 && daysUntil <= float64(DecayStartDays)
}

View File

@@ -1,78 +0,0 @@
package gci
// WeightProfile defines regulation weights for different compliance profiles
type WeightProfile struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Weights map[string]float64 `json:"weights"` // regulation_id -> weight (0.0-1.0)
}
// Default weight profiles
var DefaultProfiles = map[string]WeightProfile{
"default": {
ID: "default",
Name: "Standard",
Description: "Ausgewogenes Profil fuer allgemeine Compliance",
Weights: map[string]float64{
"dsgvo": 0.30,
"nis2": 0.25,
"iso27001": 0.25,
"ai_act": 0.20,
},
},
"nis2_relevant": {
ID: "nis2_relevant",
Name: "NIS2-relevant",
Description: "Fuer Betreiber kritischer Infrastrukturen",
Weights: map[string]float64{
"dsgvo": 0.25,
"nis2": 0.35,
"iso27001": 0.25,
"ai_act": 0.15,
},
},
"ki_nutzer": {
ID: "ki_nutzer",
Name: "KI-Nutzer",
Description: "Fuer Organisationen mit KI-Einsatz",
Weights: map[string]float64{
"dsgvo": 0.25,
"nis2": 0.25,
"iso27001": 0.20,
"ai_act": 0.30,
},
},
}
// ModuleRiskWeights defines risk criticality per module type
var ModuleRiskWeights = map[string]float64{
"incident_response": 3.0,
"management_awareness": 3.0,
"data_protection": 2.5,
"it_security": 2.5,
"supply_chain": 2.0,
"risk_assessment": 2.0,
"access_control": 2.0,
"business_continuity": 2.0,
"employee_training": 1.5,
"documentation": 1.5,
"physical_security": 1.0,
"general": 1.0,
}
// GetProfile returns a weight profile by ID, defaulting to "default"
func GetProfile(profileID string) WeightProfile {
if p, ok := DefaultProfiles[profileID]; ok {
return p
}
return DefaultProfiles["default"]
}
// GetModuleRiskWeight returns the risk weight for a module category
func GetModuleRiskWeight(category string) float64 {
if w, ok := ModuleRiskWeights[category]; ok {
return w
}
return 1.0
}

View File

@@ -1,65 +0,0 @@
package industry
// ============================================================================
// Industry-Specific Compliance Templates (Phase 3.3)
// Static reference data — no database migration needed.
// ============================================================================
// IndustryTemplate represents a complete compliance package for a specific industry
type IndustryTemplate struct {
Slug string `json:"slug"`
Name string `json:"name"`
Description string `json:"description"`
Icon string `json:"icon"`
Regulations []string `json:"regulations"`
VVTTemplates []VVTTemplate `json:"vvt_templates"`
TOMRecommendations []TOMRecommendation `json:"tom_recommendations"`
RiskScenarios []RiskScenario `json:"risk_scenarios"`
}
// VVTTemplate represents a pre-configured processing activity record template
type VVTTemplate struct {
Name string `json:"name"`
Purpose string `json:"purpose"`
LegalBasis string `json:"legal_basis"`
DataCategories []string `json:"data_categories"`
DataSubjects []string `json:"data_subjects"`
RetentionPeriod string `json:"retention_period"`
}
// TOMRecommendation represents a recommended technical/organizational measure
type TOMRecommendation struct {
Category string `json:"category"`
Name string `json:"name"`
Description string `json:"description"`
Priority string `json:"priority"`
}
// RiskScenario represents an industry-specific data protection risk scenario
type RiskScenario struct {
Name string `json:"name"`
Description string `json:"description"`
Likelihood string `json:"likelihood"`
Impact string `json:"impact"`
Mitigation string `json:"mitigation"`
}
// ============================================================================
// API Response Types
// ============================================================================
// IndustryListResponse is the API response for listing all industries
type IndustryListResponse struct {
Industries []IndustrySummary `json:"industries"`
Total int `json:"total"`
}
// IndustrySummary is a condensed view of an industry template for list endpoints
type IndustrySummary struct {
Slug string `json:"slug"`
Name string `json:"name"`
Description string `json:"description"`
Icon string `json:"icon"`
RegulationCount int `json:"regulation_count"`
TemplateCount int `json:"template_count"`
}

View File

@@ -1,558 +0,0 @@
package industry
// ============================================================================
// Static Industry Template Data
// ============================================================================
// allTemplates holds all pre-configured industry compliance packages.
// This is static reference data embedded in the binary — no database required.
var allTemplates = []IndustryTemplate{
itSoftwareTemplate(),
healthcareTemplate(),
financeTemplate(),
manufacturingTemplate(),
}
// GetAllTemplates returns all available industry templates.
func GetAllTemplates() []IndustryTemplate {
return allTemplates
}
// GetTemplateBySlug returns the industry template matching the given slug,
// or nil if no match is found.
func GetTemplateBySlug(slug string) *IndustryTemplate {
for i := range allTemplates {
if allTemplates[i].Slug == slug {
return &allTemplates[i]
}
}
return nil
}
// ============================================================================
// IT & Software
// ============================================================================
func itSoftwareTemplate() IndustryTemplate {
return IndustryTemplate{
Slug: "it-software",
Name: "IT & Software",
Description: "Compliance-Paket fuer IT-Unternehmen, SaaS-Anbieter und Softwareentwickler mit Fokus auf AI Act, DSGVO fuer Cloud-Dienste und NIS2.",
Icon: "\U0001F4BB",
Regulations: []string{"DSGVO", "AI Act", "NIS2", "ePrivacy"},
VVTTemplates: []VVTTemplate{
{
Name: "SaaS-Kundendaten",
Purpose: "Verarbeitung personenbezogener Daten von SaaS-Kunden zur Bereitstellung der vertraglichen Dienstleistung, einschliesslich Account-Verwaltung, Nutzungsanalyse und Abrechnung.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung)",
DataCategories: []string{"Name", "E-Mail-Adresse", "Unternehmenszugehoerigkeit", "Nutzungsdaten", "Rechnungsdaten", "IP-Adresse"},
DataSubjects: []string{"Kunden", "Endnutzer der SaaS-Plattform"},
RetentionPeriod: "Vertragsdauer + 10 Jahre (handelsrechtliche Aufbewahrungspflicht)",
},
{
Name: "Cloud-Hosting",
Purpose: "Speicherung und Verarbeitung von Kundendaten in Cloud-Infrastruktur (IaaS/PaaS) zur Gewaehrleistung der Verfuegbarkeit und Skalierbarkeit der Dienste.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), Art. 28 DSGVO (Auftragsverarbeitung)",
DataCategories: []string{"Alle vom Kunden eingestellten Daten", "Metadaten", "Logdateien", "Zugangsdaten"},
DataSubjects: []string{"Kunden", "Endnutzer", "Mitarbeiter der Kunden"},
RetentionPeriod: "Vertragsdauer + 30 Tage Backup-Retention",
},
{
Name: "KI-Modelltraining",
Purpose: "Verwendung von (pseudonymisierten) Daten zum Training, zur Validierung und Verbesserung von KI-/ML-Modellen unter Einhaltung des AI Act.",
LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse), ggf. Art. 6 Abs. 1 lit. a (Einwilligung)",
DataCategories: []string{"Pseudonymisierte Nutzungsdaten", "Textdaten", "Interaktionsmuster", "Feedback-Daten"},
DataSubjects: []string{"Nutzer der KI-Funktionen", "Trainingsdaten-Quellen"},
RetentionPeriod: "Bis Modell-Abloesung, max. 5 Jahre; Trainingsdaten nach Pseudonymisierung unbegrenzt",
},
{
Name: "Software-Analytics",
Purpose: "Erhebung anonymisierter und pseudonymisierter Nutzungsstatistiken zur Produktverbesserung, Fehleranalyse und Performance-Monitoring.",
LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse)",
DataCategories: []string{"Geraetemertkmale", "Browserinformationen", "Nutzungsverhalten", "Crash-Reports", "Performance-Metriken"},
DataSubjects: []string{"Endnutzer der Software"},
RetentionPeriod: "Rohdaten 90 Tage, aggregierte Daten 2 Jahre",
},
{
Name: "Newsletter/Marketing",
Purpose: "Versand von Produkt-Newslettern, Release-Benachrichtigungen und Marketing-Kommunikation an registrierte Nutzer und Interessenten.",
LegalBasis: "Art. 6 Abs. 1 lit. a DSGVO (Einwilligung)",
DataCategories: []string{"E-Mail-Adresse", "Name", "Unternehmen", "Oeffnungs- und Klickraten", "Abonnement-Praeferenzen"},
DataSubjects: []string{"Newsletter-Abonnenten", "Leads", "Bestandskunden"},
RetentionPeriod: "Bis Widerruf der Einwilligung + 30 Tage Abwicklung",
},
{
Name: "Bewerbermanagement",
Purpose: "Verarbeitung von Bewerberdaten im Rahmen des Recruiting-Prozesses einschliesslich Sichtung, Kommunikation und Entscheidungsfindung.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (vorvertragliche Massnahmen), ss 26 BDSG",
DataCategories: []string{"Lebenslauf", "Anschreiben", "Zeugnisse", "Kontaktdaten", "Gehaltsvorstellungen", "Bewertungsnotizen"},
DataSubjects: []string{"Bewerber", "Empfehlungsgeber"},
RetentionPeriod: "6 Monate nach Abschluss des Verfahrens (AGG-Frist), bei Einwilligung laenger",
},
},
TOMRecommendations: []TOMRecommendation{
{
Category: "encryption",
Name: "Verschluesselung at rest und in transit",
Description: "Alle gespeicherten Daten mit AES-256 verschluesseln. Saemtlichen Netzwerkverkehr ueber TLS 1.3 absichern. Zertifikats-Management automatisieren.",
Priority: "critical",
},
{
Category: "access_control",
Name: "Multi-Faktor-Authentifizierung (MFA)",
Description: "MFA fuer alle administrativen Zugaenge, Produktionssysteme und CI/CD-Pipelines erzwingen. FIDO2/WebAuthn bevorzugen.",
Priority: "critical",
},
{
Category: "monitoring",
Name: "Penetration Testing",
Description: "Regelmaessige externe Penetrationstests (mind. jaehrlich) und kontinuierliche Schwachstellenscans der oeffentlich erreichbaren Infrastruktur durchfuehren.",
Priority: "high",
},
{
Category: "development",
Name: "Code Reviews und Secure Coding",
Description: "Verpflichtende Code-Reviews fuer alle Aenderungen. SAST/DAST-Tools in die CI/CD-Pipeline integrieren. OWASP Top 10 als Mindeststandard.",
Priority: "high",
},
{
Category: "supply_chain",
Name: "Dependency Scanning",
Description: "Automatisiertes Scanning aller Abhaengigkeiten (SBOM) auf bekannte Schwachstellen. Alerts bei kritischen CVEs. Regelmaessige Updates erzwingen.",
Priority: "high",
},
{
Category: "incident_response",
Name: "Incident Response Plan",
Description: "Dokumentierter Incident-Response-Prozess mit definierten Eskalationsstufen, Meldepflichten (72h DSGVO) und regelmaessigen Uebungen (Tabletop Exercises).",
Priority: "critical",
},
},
RiskScenarios: []RiskScenario{
{
Name: "Datenleck durch Cloud-Fehlkonfiguration",
Description: "Oeffentlich zugaengliche S3-Buckets, fehlende Netzwerk-Segmentierung oder falsch konfigurierte Firewalls legen Kundendaten offen.",
Likelihood: "high",
Impact: "critical",
Mitigation: "Infrastructure-as-Code mit automatisierten Compliance-Checks (z.B. Checkov, tfsec), Cloud Security Posture Management (CSPM) einsetzen, regelmaessige Audits der Cloud-Konfiguration.",
},
{
Name: "Supply-Chain-Angriff",
Description: "Kompromittierte Abhaengigkeit (npm, PyPI, Go-Module) schleust Schadcode in den Build-Prozess ein und gelangt in die Produktionsumgebung.",
Likelihood: "medium",
Impact: "critical",
Mitigation: "Dependency Pinning, Signaturtruefung, SBOM-Generierung, private Registries, regelmaessige Audits aller Drittanbieter-Komponenten.",
},
{
Name: "KI-Bias und Diskriminierung",
Description: "KI-Modelle produzieren diskriminierende Ergebnisse aufgrund verzerrter Trainingsdaten. Verstoss gegen AI Act und Gleichbehandlungsgrundsaetze.",
Likelihood: "medium",
Impact: "high",
Mitigation: "Bias-Audits vor und nach Deployment, diverse Trainingsdaten, Erklaerbarkeits-Dokumentation gemaess AI Act, menschliche Ueberpruefung (Human-in-the-Loop).",
},
{
Name: "Insider-Bedrohung",
Description: "Ein Mitarbeiter mit privilegiertem Zugang exfiltriert Kundendaten, Quellcode oder Geschaeftsgeheimnisse — absichtlich oder durch Social Engineering.",
Likelihood: "low",
Impact: "critical",
Mitigation: "Least-Privilege-Prinzip, privilegierte Zugangssteuerung (PAM), Audit-Logging aller Admin-Aktionen, Vier-Augen-Prinzip fuer kritische Operationen, Security-Awareness-Trainings.",
},
},
}
}
// ============================================================================
// Gesundheitswesen
// ============================================================================
func healthcareTemplate() IndustryTemplate {
return IndustryTemplate{
Slug: "healthcare",
Name: "Gesundheitswesen",
Description: "Compliance-Paket fuer Arztpraxen, Krankenhaeuser, Labore und Gesundheits-IT mit besonderem Fokus auf Art. 9 DSGVO (besondere Datenkategorien) und Patientendatenschutz.",
Icon: "\U0001F3E5",
Regulations: []string{"DSGVO", "BDSG \u00a722", "SGB V", "MDR", "DiGAV"},
VVTTemplates: []VVTTemplate{
{
Name: "Patientenakte (ePA)",
Purpose: "Fuehrung elektronischer Patientenakten zur medizinischen Dokumentation, Behandlungsplanung und abrechnungstechnischen Erfassung.",
LegalBasis: "Art. 9 Abs. 2 lit. h DSGVO i.V.m. \u00a722 BDSG, \u00a7630f BGB (Dokumentationspflicht)",
DataCategories: []string{"Diagnosen", "Befunde", "Medikation", "Vitalwerte", "Anamnese", "Stammdaten", "Versicherungsdaten"},
DataSubjects: []string{"Patienten"},
RetentionPeriod: "10 Jahre nach Abschluss der Behandlung (\u00a7630f BGB), bei Strahlentherapie 30 Jahre",
},
{
Name: "Terminverwaltung",
Purpose: "Planung, Vergabe und Erinnerung von Behandlungsterminen einschliesslich Online-Terminbuchung.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), Art. 9 Abs. 2 lit. h DSGVO",
DataCategories: []string{"Name", "Kontaktdaten", "Terminzeitpunkt", "Fachrichtung/Behandlungsgrund", "Versicherungsstatus"},
DataSubjects: []string{"Patienten", "Angehoerige (bei Terminerstellung fuer Dritte)"},
RetentionPeriod: "Vergangene Termine: 1 Jahr, bei medizinischer Relevanz gemaess Patientenakte",
},
{
Name: "Labor- und Befunddaten",
Purpose: "Erfassung, Uebermittlung und Archivierung von Laborergebnissen, bildgebenden Befunden und pathologischen Berichten.",
LegalBasis: "Art. 9 Abs. 2 lit. h DSGVO, \u00a710 MBO-Ae",
DataCategories: []string{"Laborwerte", "Bildgebung (DICOM)", "Pathologiebefunde", "Mikrobiologische Ergebnisse", "Genetische Daten"},
DataSubjects: []string{"Patienten"},
RetentionPeriod: "10 Jahre, genetische Daten 30 Jahre",
},
{
Name: "Telemedizin",
Purpose: "Durchfuehrung von Videosprechstunden und telemedizinischen Konsultationen einschliesslich Uebertragung medizinischer Daten.",
LegalBasis: "Art. 9 Abs. 2 lit. h DSGVO, \u00a7630a BGB, Fernbehandlungs-Richtlinien",
DataCategories: []string{"Audio-/Videodaten", "Chatprotokolle", "Uebermittelte Dokumente", "Verbindungsmetadaten", "Behandlungsnotizen"},
DataSubjects: []string{"Patienten", "Behandelnde Aerzte"},
RetentionPeriod: "Aufzeichnungen gemaess Patientenakte (10 Jahre), Verbindungsdaten 90 Tage",
},
{
Name: "Forschungsdaten",
Purpose: "Verwendung pseudonymisierter oder anonymisierter Patientendaten fuer klinische Studien und medizinische Forschung.",
LegalBasis: "Art. 9 Abs. 2 lit. j DSGVO, \u00a727 BDSG, ggf. Einwilligung gemaess Art. 9 Abs. 2 lit. a",
DataCategories: []string{"Pseudonymisierte Diagnosen", "Behandlungsverlaeufe", "Demografische Daten", "Genetische Daten (anonymisiert)", "Studienergebnisse"},
DataSubjects: []string{"Studienteilnehmer", "Patienten (retrospektiv, pseudonymisiert)"},
RetentionPeriod: "Studienende + 15 Jahre (GCP-ICH), Forschungsdaten gemaess Foerderrichtlinien",
},
{
Name: "Abrechnung (KV/Krankenversicherung)",
Purpose: "Erstellung und Uebermittlung von Abrechnungsdaten an Kassenaerztliche Vereinigungen und Krankenkassen.",
LegalBasis: "Art. 6 Abs. 1 lit. c DSGVO (rechtliche Verpflichtung), \u00a7284 SGB V, \u00a7295 SGB V",
DataCategories: []string{"Versichertennummer", "Diagnose-Codes (ICD-10)", "Leistungsziffern (EBM/GOAe)", "Behandlungsdaten", "Zuzahlungsstatus"},
DataSubjects: []string{"Patienten", "Versicherte"},
RetentionPeriod: "10 Jahre (steuerrechtlich), Abrechnungsdaten 4 Jahre (\u00a7305 SGB V)",
},
},
TOMRecommendations: []TOMRecommendation{
{
Category: "encryption",
Name: "Ende-zu-Ende-Verschluesselung",
Description: "Saemtliche Kommunikation mit Gesundheitsdaten (E-Mail, Telemedizin, Befunduebermittlung) Ende-zu-Ende verschluesseln. Zertifizierte Loesungen gemaess gematik-Spezifikation einsetzen.",
Priority: "critical",
},
{
Category: "access_control",
Name: "Rollenbasierte Zugriffskontrolle (RBAC)",
Description: "Feingranulare Zugriffsrechte basierend auf Behandlungskontext: Nur behandelnde Aerzte sehen relevante Patientendaten. Need-to-know-Prinzip konsequent umsetzen.",
Priority: "critical",
},
{
Category: "monitoring",
Name: "Audit-Logging",
Description: "Lueckenloses Protokollieren aller Zugriffe auf Patientendaten mit Zeitstempel, Benutzer, Aktion und Begruendung. Logs manipulationssicher speichern (WORM).",
Priority: "critical",
},
{
Category: "physical_security",
Name: "Physische Sicherheit",
Description: "Zutrittskontrolle zu Serverraeumen und medizinischen Arbeitsbereichen. Bildschirmsperren, Clean-Desk-Policy. Sicherer Umgang mit physischen Patientenakten.",
Priority: "high",
},
{
Category: "data_minimization",
Name: "Pseudonymisierung",
Description: "Konsequente Pseudonymisierung bei Datenweitergabe (Forschung, Qualitaetssicherung, Abrechnung). Zuordnungstabellen separat und besonders geschuetzt speichern.",
Priority: "high",
},
},
RiskScenarios: []RiskScenario{
{
Name: "Unbefugter Zugriff auf Patientendaten",
Description: "Mitarbeiter ohne Behandlungsbezug greifen auf Patientenakten zu (z.B. prominente Patienten). Verstoss gegen aerztliche Schweigepflicht und DSGVO.",
Likelihood: "high",
Impact: "critical",
Mitigation: "Striktes RBAC mit Behandlungskontext-Pruefung, automatische Anomalie-Erkennung bei ungewoehnlichen Zugriffen, regelmaessige Audit-Log-Auswertung, Sanktionskatalog.",
},
{
Name: "Ransomware-Angriff auf Krankenhaus-IT",
Description: "Verschluesselungstrojaner legt Krankenhaus-Informationssystem lahm. Patientenversorgung gefaehrdet, Notbetrieb erforderlich.",
Likelihood: "medium",
Impact: "critical",
Mitigation: "Netzwerksegmentierung (Medizingeraete, Verwaltung, Gaeste), Offline-Backups, Notfallplaene fuer Papierbetrieb, regelmaessige Sicherheitsupdates, Mitarbeiterschulung gegen Phishing.",
},
{
Name: "Datenverlust bei Systemausfall",
Description: "Hardware-Defekt oder Softwarefehler fuehrt zum Verlust aktueller Patientendaten, Befunde oder Medikationsplaene.",
Likelihood: "medium",
Impact: "high",
Mitigation: "Redundante Systeme (Clustering), automatische Backups mit verifizierter Wiederherstellung, unterbrechungsfreie Stromversorgung (USV), Disaster-Recovery-Plan mit RTOs unter 4 Stunden.",
},
{
Name: "Verletzung der aerztlichen Schweigepflicht",
Description: "Versehentliche oder vorsaetzliche Weitergabe von Patientendaten an Unberechtigte (z.B. Angehoerige ohne Vollmacht, Arbeitgeber, Medien).",
Likelihood: "medium",
Impact: "high",
Mitigation: "Schulungen zur Schweigepflicht (\u00a7203 StGB), klare Prozesse fuer Auskunftsersuchen, Dokumentation von Einwilligungen und Vollmachten, sichere Kommunikationskanaele.",
},
},
}
}
// ============================================================================
// Finanzdienstleister
// ============================================================================
func financeTemplate() IndustryTemplate {
return IndustryTemplate{
Slug: "finance",
Name: "Finanzdienstleister",
Description: "Compliance-Paket fuer Banken, Versicherungen, Zahlungsdienstleister und FinTechs mit Fokus auf BaFin-Anforderungen, PSD2 und Geldwaeschepraeventions.",
Icon: "\U0001F3E6",
Regulations: []string{"DSGVO", "KWG", "ZAG", "GwG", "MaRisk", "BAIT/DORA", "PSD2"},
VVTTemplates: []VVTTemplate{
{
Name: "Kontoeroeffnung / KYC",
Purpose: "Identitaetspruefung und Legitimation von Neukunden im Rahmen der Know-Your-Customer-Pflichten gemaess Geldwaeschegesetz.",
LegalBasis: "Art. 6 Abs. 1 lit. c DSGVO (rechtliche Verpflichtung), \u00a710 GwG, \u00a7154 AO",
DataCategories: []string{"Personalausweisdaten", "Adressdaten", "Geburtsdatum", "Staatsangehoerigkeit", "PEP-Status", "Wirtschaftliche Berechtigung", "Video-Identifikation"},
DataSubjects: []string{"Neukunden", "Wirtschaftlich Berechtigte", "Vertretungsberechtigte"},
RetentionPeriod: "5 Jahre nach Ende der Geschaeftsbeziehung (\u00a78 GwG), Identifizierungsdaten 10 Jahre",
},
{
Name: "Zahlungsverarbeitung",
Purpose: "Ausfuehrung und Dokumentation von Zahlungstransaktionen (Ueberweisungen, Lastschriften, Kartenzahlungen) im Rahmen der Kontovertragserfullung.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), \u00a7675f BGB, PSD2",
DataCategories: []string{"IBAN/Kontonummer", "Transaktionsbetrag", "Verwendungszweck", "Empfaengerdaten", "Zeitstempel", "Autorisierungsdaten"},
DataSubjects: []string{"Kontoinhaber", "Zahlungsempfaenger", "Zahlungspflichtige"},
RetentionPeriod: "10 Jahre (\u00a7257 HGB, \u00a7147 AO)",
},
{
Name: "Kreditpruefung / Scoring",
Purpose: "Bonitaetspruefung und Kreditwuerdigkeitsbewertung auf Basis interner und externer Daten zur Kreditentscheidung.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (vorvertragliche Massnahmen), \u00a731 BDSG (Scoring)",
DataCategories: []string{"Einkommensnachweise", "Schufa-Score", "Beschaeftigungsstatus", "Bestehende Verbindlichkeiten", "Sicherheiten", "Scoring-Ergebnis"},
DataSubjects: []string{"Kreditantragsteller", "Buergen", "Mithaftende"},
RetentionPeriod: "Kreditlaufzeit + 3 Jahre, bei Ablehnung 6 Monate",
},
{
Name: "Wertpapierhandel",
Purpose: "Ausfuehrung und Dokumentation von Wertpapiergeschaeften, Anlageberatung und Geeignetheitspruefung.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO, \u00a763 WpHG (Aufzeichnungspflichten), MiFID II",
DataCategories: []string{"Depotdaten", "Orderdaten", "Risikoprofil", "Anlageerfahrung", "Geeignetheitserklaerung", "Telefonaufzeichnungen"},
DataSubjects: []string{"Depotinhaber", "Bevollmaechtigte", "Anlageberater"},
RetentionPeriod: "10 Jahre (\u00a7257 HGB), Telefonaufzeichnungen 5 Jahre (MiFID II)",
},
{
Name: "Geldwaesche-Monitoring",
Purpose: "Kontinuierliche Ueberwachung von Transaktionsmustern zur Erkennung verdaechtiger Aktivitaeten und Erfuellung der Meldepflichten gegenueber der FIU.",
LegalBasis: "Art. 6 Abs. 1 lit. c DSGVO (rechtliche Verpflichtung), \u00a325h KWG, \u00a756 GwG",
DataCategories: []string{"Transaktionshistorie", "Risikobewertung", "Verdachtsmeldungen (SAR)", "PEP-Screening-Ergebnisse", "Sanktionslistenabgleich"},
DataSubjects: []string{"Kunden", "Transaktionspartner", "Verdachtspersonen"},
RetentionPeriod: "5 Jahre nach Ende der Geschaeftsbeziehung (\u00a78 GwG), Verdachtsmeldungen 10 Jahre",
},
{
Name: "Versicherungsantraege",
Purpose: "Verarbeitung von Antrags- und Risikodaten zur Pruefung, Annahme und Verwaltung von Versicherungsvertraegen.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), bei Gesundheitsdaten Art. 9 Abs. 2 lit. f DSGVO",
DataCategories: []string{"Antragsdaten", "Gesundheitsfragen", "Schadenhistorie", "Risikofaktoren", "Praemienberechnung", "Leistungsansprueche"},
DataSubjects: []string{"Versicherungsnehmer", "Versicherte Personen", "Bezugsberechtigte", "Geschaedigte"},
RetentionPeriod: "Vertragsdauer + 10 Jahre (Verjaehrung), Lebensversicherung bis Ablauf aller Ansprueche",
},
},
TOMRecommendations: []TOMRecommendation{
{
Category: "encryption",
Name: "HSM fuer Schluesselverwaltung",
Description: "Hardware Security Modules (HSM) fuer kryptographische Schluessel, insbesondere bei Zahlungsverkehr und digitalen Signaturen. PCI-DSS-konform.",
Priority: "critical",
},
{
Category: "monitoring",
Name: "Transaktionsmonitoring",
Description: "Echtzeit-Ueberwachung aller Finanztransaktionen auf Anomalien, Betrugsversuche und verdaechtige Muster. Regelbasierte und KI-gestuetzte Erkennung.",
Priority: "critical",
},
{
Category: "access_control",
Name: "Vier-Augen-Prinzip",
Description: "Kritische Transaktionen (Kreditfreigaben, Grossueberweisungen, Konfigurationsaenderungen) benoetigen Freigabe durch zwei unabhaengige Personen.",
Priority: "critical",
},
{
Category: "network_security",
Name: "DDoS-Schutz",
Description: "Mehrstufiger DDoS-Schutz fuer Online-Banking und Zahlungsverkehr-Infrastruktur. Redundante Anbindung, Traffic-Scrubbing, automatische Skalierung.",
Priority: "high",
},
{
Category: "business_continuity",
Name: "Backup und Disaster Recovery",
Description: "Taeglich gesicherte Datenbanken mit geografisch getrennter Aufbewahrung. RTO unter 2 Stunden fuer Kernbanksysteme, RPO unter 15 Minuten.",
Priority: "critical",
},
{
Category: "testing",
Name: "Penetration Testing (TIBER-EU)",
Description: "Threat-Intelligence-basierte Red-Teaming-Tests gemaess TIBER-EU-Framework. Jaehrliche Durchfuehrung durch externe, BaFin-akkreditierte Tester.",
Priority: "high",
},
},
RiskScenarios: []RiskScenario{
{
Name: "Betrug und Identitaetsdiebstahl",
Description: "Kriminelle nutzen gestohlene Identitaetsdaten zur Kontoeroeffnung, Kreditaufnahme oder fuer nicht autorisierte Transaktionen.",
Likelihood: "high",
Impact: "high",
Mitigation: "Starke Kundenauthentifizierung (SCA) gemaess PSD2, Echtzeit-Betrugs-Scoring, Video-Ident mit Liveness-Detection, biometrische Verifikation, Transaktionslimits.",
},
{
Name: "Insiderhandel-Datenleck",
Description: "Vorabinformationen ueber boersenrelevante Entscheidungen (M&A, Quartalsberichte) gelangen an Unberechtigte.",
Likelihood: "low",
Impact: "critical",
Mitigation: "Insiderverzeichnisse fuehren, Chinese Walls zwischen Abteilungen, Kommunikations-Monitoring, Handelsverbote fuer Insider, regelmaessige Compliance-Schulungen.",
},
{
Name: "Systemausfall bei Zahlungsverkehr",
Description: "Ausfall des Kernbanksystems oder der Zahlungsverkehrsinfrastruktur fuehrt zu Nicht-Verfuegbarkeit von Transaktionen, Geldautomaten und Online-Banking.",
Likelihood: "medium",
Impact: "critical",
Mitigation: "Hochverfuegbarkeits-Architektur (Active-Active), automatischer Failover, regelmaessige Disaster-Recovery-Tests, Notfall-Kommunikationsplan fuer Kunden und BaFin.",
},
{
Name: "Geldwaesche-Compliance-Verstoss",
Description: "Mangelhafte KYC-Prozesse oder unzureichendes Transaktionsmonitoring fuehren zu einem Compliance-Verstoss mit BaFin-Sanktionen.",
Likelihood: "medium",
Impact: "critical",
Mitigation: "Automatisiertes Transaction-Monitoring mit regelmaessiger Kalibrierung, jaehrliche GwG-Schulungen, interne Revision der AML-Prozesse, PEP- und Sanktionslisten-Screening in Echtzeit.",
},
},
}
}
// ============================================================================
// Produktion / Industrie
// ============================================================================
func manufacturingTemplate() IndustryTemplate {
return IndustryTemplate{
Slug: "manufacturing",
Name: "Produktion / Industrie",
Description: "Compliance-Paket fuer produzierende Unternehmen mit Fokus auf NIS2-Anforderungen, OT-Security, IoT-Sicherheit und Schutz industrieller Steuerungssysteme.",
Icon: "\U0001F3ED",
Regulations: []string{"DSGVO", "NIS2", "Maschinenverordnung", "BetrSichV", "IT-Sicherheitsgesetz 2.0"},
VVTTemplates: []VVTTemplate{
{
Name: "Mitarbeiterdaten / Zeiterfassung",
Purpose: "Erfassung von Arbeitszeiten, Schichtplanung und Anwesenheitsdaten zur Lohnabrechnung und Einhaltung des Arbeitszeitgesetzes.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), \u00a726 BDSG, \u00a716 ArbZG",
DataCategories: []string{"Mitarbeiterstammdaten", "Arbeitszeitdaten", "Schichtplaene", "Fehlzeiten", "Ueberstunden", "Zutrittsdaten"},
DataSubjects: []string{"Mitarbeiter", "Leiharbeiter", "Praktikanten"},
RetentionPeriod: "Lohnunterlagen 6 Jahre (\u00a7257 HGB), Arbeitszeitnachweise 2 Jahre (\u00a716 ArbZG)",
},
{
Name: "Lieferantenmanagement",
Purpose: "Verwaltung von Lieferantendaten, Bestellprozessen und Qualitaetsbewertungen im Rahmen der Supply-Chain.",
LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), Art. 6 Abs. 1 lit. f (berechtigtes Interesse)",
DataCategories: []string{"Ansprechpartner", "Kontaktdaten", "Lieferkonditionen", "Qualitaetsbewertungen", "Zertifizierungen", "Bankverbindungen"},
DataSubjects: []string{"Ansprechpartner der Lieferanten", "Subunternehmer"},
RetentionPeriod: "Vertragsdauer + 10 Jahre (Gewaehrleistung und Steuerrecht)",
},
{
Name: "IoT-Sensordaten",
Purpose: "Erfassung und Auswertung von Sensor- und Maschinendaten fuer Produktionsoptimierung, Predictive Maintenance und Qualitaetssicherung.",
LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse), bei Personenbezug ggf. Art. 6 Abs. 1 lit. a (Einwilligung)",
DataCategories: []string{"Maschinenkennung", "Temperatur/Druck/Vibration", "Produktionszaehler", "Energieverbrauch", "Standortdaten (Intralogistik)", "Bediener-ID (falls zugeordnet)"},
DataSubjects: []string{"Maschinenbediener (indirekt)", "Instandhalter"},
RetentionPeriod: "Rohdaten 1 Jahr, aggregierte Daten 5 Jahre, qualitaetsrelevant 10 Jahre",
},
{
Name: "Qualitaetskontrolle",
Purpose: "Dokumentation von Qualitaetspruefungen, Chargenrueckverfolgbarkeit und Reklamationsmanagement.",
LegalBasis: "Art. 6 Abs. 1 lit. c DSGVO (rechtliche Verpflichtung), Maschinenverordnung, Produkthaftung",
DataCategories: []string{"Pruefprotokolle", "Chargennnummern", "Messwerte", "Pruefer-ID", "Fotos/Videos der Pruefung", "Reklamationsdaten"},
DataSubjects: []string{"Pruefer", "Reklamierende Kunden"},
RetentionPeriod: "Produktlebensdauer + 10 Jahre (Produkthaftung), sicherheitskritisch 30 Jahre",
},
{
Name: "Videoueberwachung",
Purpose: "Ueberwachung von Produktionshallen, Lagerbereichen und Aussenbereichen zum Schutz vor Diebstahl, Sabotage und zur Arbeitssicherheit.",
LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse), Betriebsvereinbarung",
DataCategories: []string{"Videoaufnahmen", "Zeitstempel", "Kamerastandort", "Bewegungserkennung"},
DataSubjects: []string{"Mitarbeiter", "Besucher", "Lieferanten", "Unbefugte"},
RetentionPeriod: "72 Stunden Standard, bei Vorfaellen bis Abschluss der Ermittlung (max. 10 Tage ohne konkreten Anlass)",
},
{
Name: "Zugangskontrolle (physisch und logisch)",
Purpose: "Steuerung und Protokollierung des Zutritts zu Produktionsbereichen, Gefahrstofflagern und IT-Raeumen mittels Chipkarten/Biometrie.",
LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse), BetrSichV, bei Biometrie Art. 9 Abs. 2 lit. b DSGVO",
DataCategories: []string{"Mitarbeiter-ID", "Zutrittszeitpunkt", "Zutrittsbereich", "Chipkartennummer", "Biometrische Daten (optional)"},
DataSubjects: []string{"Mitarbeiter", "Externe Dienstleister", "Besucher"},
RetentionPeriod: "Zutrittsprotokolle 90 Tage, sicherheitsrelevante Bereiche 1 Jahr",
},
},
TOMRecommendations: []TOMRecommendation{
{
Category: "network_security",
Name: "Netzwerksegmentierung (IT/OT)",
Description: "Strikte Trennung von Office-IT und Operational Technology (OT) durch DMZ, Firewalls und unidirektionale Gateways. Purdue-Modell als Referenzarchitektur.",
Priority: "critical",
},
{
Category: "patch_management",
Name: "IoT-Patch-Management",
Description: "Zentrales Management aller IoT-Geraete und Firmware-Versionen. Geplante Wartungsfenster fuer Updates, Risikobewertung vor Patches auf Produktionssystemen.",
Priority: "high",
},
{
Category: "physical_security",
Name: "Physische Zutrittskontrolle",
Description: "Mehrstufiges Zutrittskonzept (Gelaende, Gebaeude, Produktionshalle, Leitstand). Besuchermanagement, Begleitung in Sicherheitsbereichen, Videoprotokollierung.",
Priority: "high",
},
{
Category: "business_continuity",
Name: "Backup industrieller Steuerungen",
Description: "Regelmaessige Sicherung von SPS-Programmen, SCADA-Konfigurationen und Roboterprogrammen. Offline-Aufbewahrung der Backups, dokumentierte Restore-Prozeduren.",
Priority: "critical",
},
{
Category: "incident_response",
Name: "Notfallplaene fuer Produktionsausfall",
Description: "Dokumentierte Notfallplaene fuer Cyber-Angriffe auf OT-Systeme. Manuelle Rueckfallebenen, Kommunikationsketten, Kontakt zu BSI und CERT. Jaehrliche Uebungen.",
Priority: "critical",
},
},
RiskScenarios: []RiskScenario{
{
Name: "OT-Cyberangriff auf Produktionsanlage",
Description: "Angreifer kompromittiert SCADA/SPS-Systeme und manipuliert Produktionsprozesse. Moegliche Folgen: Produktionsausfall, Qualitaetsmaengel, Personengefaehrdung.",
Likelihood: "medium",
Impact: "critical",
Mitigation: "Netzwerksegmentierung (IT/OT), Anomalie-Erkennung im OT-Netzwerk, Haertung der Steuerungssysteme, Deaktivierung nicht benoetigter Dienste und Ports, regelmaessige Sicherheitsaudits.",
},
{
Name: "Ausfall der Lieferkette durch Cybervorfall",
Description: "Ein Cyberangriff auf einen kritischen Zulieferer fuehrt zum Stillstand der eigenen Produktion mangels Materialverfuegbarkeit oder kompromittierter Daten.",
Likelihood: "medium",
Impact: "high",
Mitigation: "Diversifikation der Lieferantenbasis, vertragliche Cybersecurity-Anforderungen an Zulieferer, regelmaessige Risikobewertung der Supply Chain, Notfallbestaende fuer kritische Komponenten.",
},
{
Name: "Industriespionage",
Description: "Wettbewerber oder staatliche Akteure greifen Konstruktionsdaten, Fertigungsverfahren oder strategische Planungen ab.",
Likelihood: "medium",
Impact: "critical",
Mitigation: "DLP-Loesungen (Data Loss Prevention), Verschluesselung von CAD/CAM-Daten, Geheimhaltungsvereinbarungen, Informationsklassifizierung, USB-Port-Kontrolle, Mitarbeiter-Sensibilisierung.",
},
{
Name: "IoT-Botnet-Kompromittierung",
Description: "Ungepatchte IoT-Sensoren und Aktoren werden Teil eines Botnets und dienen als Angriffsinfrastruktur oder Einfallstor ins Unternehmensnetz.",
Likelihood: "high",
Impact: "high",
Mitigation: "Default-Passwoerter aendern, Firmware-Updates automatisieren, IoT-Geraete in eigenem VLAN isolieren, Netzwerk-Traffic-Monitoring, Geraete-Inventar fuehren, unsichere Geraete ersetzen.",
},
},
}
}

View File

@@ -1,77 +0,0 @@
package multitenant
import (
"time"
"github.com/google/uuid"
)
// TenantOverview provides a consolidated view of a tenant's compliance status
// including scores, module highlights, and namespace information.
type TenantOverview struct {
ID uuid.UUID `json:"id"`
Name string `json:"name"`
Slug string `json:"slug"`
Status string `json:"status"`
MaxUsers int `json:"max_users"`
LLMQuotaMonthly int `json:"llm_quota_monthly"`
ComplianceScore int `json:"compliance_score"`
RiskLevel string `json:"risk_level"`
NamespaceCount int `json:"namespace_count"`
// Module highlights
OpenIncidents int `json:"open_incidents"`
OpenReports int `json:"open_reports"` // whistleblower
PendingDSRs int `json:"pending_dsrs"`
TrainingRate float64 `json:"training_completion_rate"`
VendorRiskHigh int `json:"vendor_risk_high"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// MultiTenantOverviewResponse wraps the list of tenant overviews with aggregate metrics.
type MultiTenantOverviewResponse struct {
Tenants []TenantOverview `json:"tenants"`
Total int `json:"total"`
AverageScore int `json:"average_score"`
GeneratedAt time.Time `json:"generated_at"`
}
// CreateTenantRequest represents a request to create a new tenant.
type CreateTenantRequest struct {
Name string `json:"name" binding:"required"`
Slug string `json:"slug" binding:"required"`
MaxUsers int `json:"max_users"`
LLMQuotaMonthly int `json:"llm_quota_monthly"`
}
// UpdateTenantRequest represents a partial update to an existing tenant.
// Pointer fields allow distinguishing between "not provided" and "zero value".
type UpdateTenantRequest struct {
Name *string `json:"name"`
MaxUsers *int `json:"max_users"`
LLMQuotaMonthly *int `json:"llm_quota_monthly"`
Status *string `json:"status"`
}
// CreateNamespaceRequest represents a request to create a new namespace within a tenant.
type CreateNamespaceRequest struct {
Name string `json:"name" binding:"required"`
Slug string `json:"slug" binding:"required"`
IsolationLevel string `json:"isolation_level"`
DataClassification string `json:"data_classification"`
}
// SwitchTenantRequest represents a request to switch the active tenant context.
type SwitchTenantRequest struct {
TenantID string `json:"tenant_id" binding:"required"`
}
// SwitchTenantResponse contains the tenant info needed for the frontend to switch context.
type SwitchTenantResponse struct {
TenantID uuid.UUID `json:"tenant_id"`
TenantName string `json:"tenant_name"`
TenantSlug string `json:"tenant_slug"`
Status string `json:"status"`
}

View File

@@ -1,148 +0,0 @@
package multitenant
import (
"context"
"fmt"
"log"
"time"
"github.com/breakpilot/ai-compliance-sdk/internal/rbac"
"github.com/breakpilot/ai-compliance-sdk/internal/reporting"
"github.com/google/uuid"
"github.com/jackc/pgx/v5/pgxpool"
)
// Store provides aggregated multi-tenant views by combining data from the
// existing RBAC store, reporting store, and direct SQL queries for module highlights.
type Store struct {
pool *pgxpool.Pool
rbacStore *rbac.Store
reportingStore *reporting.Store
}
// NewStore creates a new multi-tenant store.
func NewStore(pool *pgxpool.Pool, rbacStore *rbac.Store, reportingStore *reporting.Store) *Store {
return &Store{
pool: pool,
rbacStore: rbacStore,
reportingStore: reportingStore,
}
}
// GetOverview retrieves all tenants with their compliance scores and module highlights.
// It aggregates data from the RBAC tenant list, the reporting compliance score,
// and direct SQL counts for namespaces, incidents, reports, DSRs, training, and vendors.
// Individual query failures are tolerated and result in zero-value defaults.
func (s *Store) GetOverview(ctx context.Context) (*MultiTenantOverviewResponse, error) {
tenants, err := s.rbacStore.ListTenants(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list tenants: %w", err)
}
overviews := make([]TenantOverview, 0, len(tenants))
totalScore := 0
for _, tenant := range tenants {
overview := s.buildTenantOverview(ctx, tenant)
totalScore += overview.ComplianceScore
overviews = append(overviews, overview)
}
averageScore := 0
if len(overviews) > 0 {
averageScore = totalScore / len(overviews)
}
return &MultiTenantOverviewResponse{
Tenants: overviews,
Total: len(overviews),
AverageScore: averageScore,
GeneratedAt: time.Now().UTC(),
}, nil
}
// GetTenantDetail returns detailed compliance info for a specific tenant.
func (s *Store) GetTenantDetail(ctx context.Context, tenantID uuid.UUID) (*TenantOverview, error) {
tenant, err := s.rbacStore.GetTenant(ctx, tenantID)
if err != nil {
return nil, fmt.Errorf("failed to get tenant: %w", err)
}
overview := s.buildTenantOverview(ctx, tenant)
return &overview, nil
}
// buildTenantOverview constructs a TenantOverview by fetching compliance scores
// and module highlights for a single tenant. Errors are logged but do not
// propagate -- missing data defaults to zero values.
func (s *Store) buildTenantOverview(ctx context.Context, tenant *rbac.Tenant) TenantOverview {
overview := TenantOverview{
ID: tenant.ID,
Name: tenant.Name,
Slug: tenant.Slug,
Status: string(tenant.Status),
MaxUsers: tenant.MaxUsers,
LLMQuotaMonthly: tenant.LLMQuotaMonthly,
CreatedAt: tenant.CreatedAt,
UpdatedAt: tenant.UpdatedAt,
}
// Compliance score and risk level derived from an executive report.
// GenerateReport computes the compliance score and risk overview internally.
report, err := s.reportingStore.GenerateReport(ctx, tenant.ID)
if err != nil {
log.Printf("multitenant: failed to generate report for tenant %s: %v", tenant.ID, err)
} else {
overview.ComplianceScore = report.ComplianceScore
overview.RiskLevel = report.RiskOverview.OverallLevel
}
// Namespace count
overview.NamespaceCount = s.countSafe(ctx, tenant.ID,
"SELECT COUNT(*) FROM compliance_namespaces WHERE tenant_id = $1")
// Open incidents
overview.OpenIncidents = s.countSafe(ctx, tenant.ID,
"SELECT COUNT(*) FROM incidents WHERE tenant_id = $1 AND status IN ('new', 'investigating', 'containment')")
// Open whistleblower reports
overview.OpenReports = s.countSafe(ctx, tenant.ID,
"SELECT COUNT(*) FROM whistleblower_reports WHERE tenant_id = $1 AND status IN ('new', 'acknowledged', 'investigating')")
// Pending DSR requests
overview.PendingDSRs = s.countSafe(ctx, tenant.ID,
"SELECT COUNT(*) FROM dsr_requests WHERE tenant_id = $1 AND status IN ('new', 'in_progress')")
// Training completion rate (average progress, 0-100)
overview.TrainingRate = s.avgSafe(ctx, tenant.ID,
"SELECT COALESCE(AVG(CASE WHEN status = 'completed' THEN 100.0 ELSE progress END), 0) FROM academy_enrollments WHERE tenant_id = $1")
// High-risk vendors
overview.VendorRiskHigh = s.countSafe(ctx, tenant.ID,
"SELECT COUNT(*) FROM vendors WHERE tenant_id = $1 AND risk_level = 'high'")
return overview
}
// countSafe executes a COUNT(*) query that takes a single tenant_id parameter.
// If the query fails for any reason (e.g. table does not exist), it returns 0.
func (s *Store) countSafe(ctx context.Context, tenantID uuid.UUID, query string) int {
var count int
err := s.pool.QueryRow(ctx, query, tenantID).Scan(&count)
if err != nil {
// Tolerate errors -- table may not exist or query may fail
return 0
}
return count
}
// avgSafe executes an AVG query that takes a single tenant_id parameter.
// If the query fails for any reason, it returns 0.
func (s *Store) avgSafe(ctx context.Context, tenantID uuid.UUID, query string) float64 {
var avg float64
err := s.pool.QueryRow(ctx, query, tenantID).Scan(&avg)
if err != nil {
return 0
}
return avg
}

View File

@@ -1,97 +0,0 @@
package reporting
import "time"
type ExecutiveReport struct {
GeneratedAt time.Time `json:"generated_at"`
TenantID string `json:"tenant_id"`
ComplianceScore int `json:"compliance_score"` // 0-100 overall score
// Module summaries
DSGVO DSGVOSummary `json:"dsgvo"`
Vendors VendorSummary `json:"vendors"`
Incidents IncidentSummary `json:"incidents"`
Whistleblower WhistleblowerSummary `json:"whistleblower"`
Academy AcademySummary `json:"academy"`
// Cross-module metrics
RiskOverview RiskOverview `json:"risk_overview"`
UpcomingDeadlines []Deadline `json:"upcoming_deadlines"`
RecentActivity []ActivityEntry `json:"recent_activity"`
}
type DSGVOSummary struct {
ProcessingActivities int `json:"processing_activities"`
ActiveProcessings int `json:"active_processings"`
TOMsImplemented int `json:"toms_implemented"`
TOMsPlanned int `json:"toms_planned"`
TOMsTotal int `json:"toms_total"`
CompletionPercent int `json:"completion_percent"` // TOMsImplemented / total * 100
OpenDSRs int `json:"open_dsrs"`
OverdueDSRs int `json:"overdue_dsrs"`
DSFAsCompleted int `json:"dsfas_completed"`
RetentionPolicies int `json:"retention_policies"`
}
type VendorSummary struct {
TotalVendors int `json:"total_vendors"`
ActiveVendors int `json:"active_vendors"`
ByRiskLevel map[string]int `json:"by_risk_level"`
PendingReviews int `json:"pending_reviews"`
ExpiredContracts int `json:"expired_contracts"`
}
type IncidentSummary struct {
TotalIncidents int `json:"total_incidents"`
OpenIncidents int `json:"open_incidents"`
CriticalIncidents int `json:"critical_incidents"`
NotificationsPending int `json:"notifications_pending"`
AvgResolutionHours float64 `json:"avg_resolution_hours"`
}
type WhistleblowerSummary struct {
TotalReports int `json:"total_reports"`
OpenReports int `json:"open_reports"`
OverdueAcknowledgments int `json:"overdue_acknowledgments"`
OverdueFeedbacks int `json:"overdue_feedbacks"`
AvgResolutionDays float64 `json:"avg_resolution_days"`
}
type AcademySummary struct {
TotalCourses int `json:"total_courses"`
TotalEnrollments int `json:"total_enrollments"`
CompletionRate float64 `json:"completion_rate"` // 0-100
OverdueCount int `json:"overdue_count"`
AvgCompletionDays float64 `json:"avg_completion_days"`
}
type RiskOverview struct {
OverallLevel string `json:"overall_level"` // LOW, MEDIUM, HIGH, CRITICAL
ModuleRisks []ModuleRisk `json:"module_risks"`
OpenFindings int `json:"open_findings"`
CriticalFindings int `json:"critical_findings"`
}
type ModuleRisk struct {
Module string `json:"module"`
Level string `json:"level"` // LOW, MEDIUM, HIGH, CRITICAL
Score int `json:"score"` // 0-100
Issues int `json:"issues"`
}
type Deadline struct {
Module string `json:"module"`
Type string `json:"type"`
Description string `json:"description"`
DueDate time.Time `json:"due_date"`
DaysLeft int `json:"days_left"`
Severity string `json:"severity"` // INFO, WARNING, URGENT, OVERDUE
}
type ActivityEntry struct {
Timestamp time.Time `json:"timestamp"`
Module string `json:"module"`
Action string `json:"action"`
Description string `json:"description"`
UserID string `json:"user_id,omitempty"`
}

View File

@@ -1,516 +0,0 @@
package reporting
import (
"context"
"math"
"sort"
"time"
"github.com/breakpilot/ai-compliance-sdk/internal/academy"
"github.com/breakpilot/ai-compliance-sdk/internal/whistleblower"
"github.com/google/uuid"
"github.com/jackc/pgx/v5/pgxpool"
)
type Store struct {
pool *pgxpool.Pool
whistleStore *whistleblower.Store
academyStore *academy.Store
}
func NewStore(pool *pgxpool.Pool, ws *whistleblower.Store, as *academy.Store) *Store {
return &Store{
pool: pool,
whistleStore: ws,
academyStore: as,
}
}
func (s *Store) GenerateReport(ctx context.Context, tenantID uuid.UUID) (*ExecutiveReport, error) {
report := &ExecutiveReport{
GeneratedAt: time.Now().UTC(),
TenantID: tenantID.String(),
}
// 1. Gather DSGVO stats via direct SQL (Python is now primary for DSGVO)
report.DSGVO = s.getDSGVOStats(ctx, tenantID)
// 2. Gather vendor stats via direct SQL (Python is now primary for vendors)
report.Vendors = s.getVendorStats(ctx, tenantID)
// 3. Gather incident stats via direct SQL (Python is now primary for incidents)
report.Incidents = s.getIncidentStats(ctx, tenantID)
// 4. Gather whistleblower stats
whistleStats, err := s.whistleStore.GetStatistics(ctx, tenantID)
if err == nil && whistleStats != nil {
openReports := 0
for status, count := range whistleStats.ByStatus {
if status != "CLOSED" && status != "ARCHIVED" {
openReports += count
}
}
report.Whistleblower = WhistleblowerSummary{
TotalReports: whistleStats.TotalReports,
OpenReports: openReports,
OverdueAcknowledgments: whistleStats.OverdueAcknowledgments,
OverdueFeedbacks: whistleStats.OverdueFeedbacks,
AvgResolutionDays: whistleStats.AvgResolutionDays,
}
}
// 5. Gather academy stats
academyStats, err := s.academyStore.GetStatistics(ctx, tenantID)
if err == nil && academyStats != nil {
report.Academy = AcademySummary{
TotalCourses: academyStats.TotalCourses,
TotalEnrollments: academyStats.TotalEnrollments,
CompletionRate: academyStats.CompletionRate,
OverdueCount: academyStats.OverdueCount,
AvgCompletionDays: academyStats.AvgCompletionDays,
}
}
// 6. Calculate risk overview
report.RiskOverview = s.calculateRiskOverview(report)
// 7. Calculate compliance score (0-100)
report.ComplianceScore = s.calculateComplianceScore(report)
// 8. Gather upcoming deadlines from DB
report.UpcomingDeadlines = s.getUpcomingDeadlines(ctx, tenantID)
// 9. Gather recent activity from DB
report.RecentActivity = s.getRecentActivity(ctx, tenantID)
return report, nil
}
// getDSGVOStats queries DSGVO tables directly (previously via dsgvo.Store)
func (s *Store) getDSGVOStats(ctx context.Context, tenantID uuid.UUID) DSGVOSummary {
summary := DSGVOSummary{}
// Processing activities
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*), COUNT(*) FILTER (WHERE status = 'ACTIVE') FROM compliance.vvt_entries WHERE tenant_id = $1`, tenantID,
).Scan(&summary.ProcessingActivities, &summary.ActiveProcessings)
// TOMs
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*) FILTER (WHERE status = 'IMPLEMENTED'), COUNT(*) FILTER (WHERE status = 'PLANNED') FROM compliance.tom_entries WHERE tenant_id = $1`, tenantID,
).Scan(&summary.TOMsImplemented, &summary.TOMsPlanned)
summary.TOMsTotal = summary.TOMsImplemented + summary.TOMsPlanned
if summary.TOMsTotal > 0 {
summary.CompletionPercent = int(math.Round(float64(summary.TOMsImplemented) / float64(summary.TOMsTotal) * 100))
}
// DSRs
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*) FILTER (WHERE status NOT IN ('COMPLETED','REJECTED')), COUNT(*) FILTER (WHERE deadline < NOW() AND status NOT IN ('COMPLETED','REJECTED')) FROM compliance.dsr_requests WHERE tenant_id = $1`, tenantID,
).Scan(&summary.OpenDSRs, &summary.OverdueDSRs)
// DSFAs
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*) FROM compliance.dsfa_entries WHERE tenant_id = $1 AND status = 'COMPLETED'`, tenantID,
).Scan(&summary.DSFAsCompleted)
// Retention policies
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*) FROM compliance.loeschfristen WHERE tenant_id = $1`, tenantID,
).Scan(&summary.RetentionPolicies)
return summary
}
// getVendorStats queries vendor tables directly (previously via vendor.Store)
func (s *Store) getVendorStats(ctx context.Context, tenantID uuid.UUID) VendorSummary {
summary := VendorSummary{ByRiskLevel: map[string]int{}}
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*), COUNT(*) FILTER (WHERE status = 'ACTIVE') FROM compliance.vendor_compliance WHERE tenant_id = $1`, tenantID,
).Scan(&summary.TotalVendors, &summary.ActiveVendors)
rows, err := s.pool.Query(ctx,
`SELECT COALESCE(risk_level, 'UNKNOWN'), COUNT(*) FROM compliance.vendor_compliance WHERE tenant_id = $1 GROUP BY risk_level`, tenantID,
)
if err == nil {
defer rows.Close()
for rows.Next() {
var level string
var count int
if rows.Scan(&level, &count) == nil {
summary.ByRiskLevel[level] = count
}
}
}
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*) FROM compliance.vendor_compliance WHERE tenant_id = $1 AND next_review_date < NOW()`, tenantID,
).Scan(&summary.PendingReviews)
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*) FROM compliance.vendor_compliance WHERE tenant_id = $1 AND contract_end < NOW()`, tenantID,
).Scan(&summary.ExpiredContracts)
return summary
}
// getIncidentStats queries incident tables directly (previously via incidents.Store)
func (s *Store) getIncidentStats(ctx context.Context, tenantID uuid.UUID) IncidentSummary {
summary := IncidentSummary{}
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*), COUNT(*) FILTER (WHERE status NOT IN ('RESOLVED','CLOSED')), COUNT(*) FILTER (WHERE severity = 'CRITICAL' AND status NOT IN ('RESOLVED','CLOSED')) FROM compliance.incidents WHERE tenant_id = $1`, tenantID,
).Scan(&summary.TotalIncidents, &summary.OpenIncidents, &summary.CriticalIncidents)
_ = s.pool.QueryRow(ctx,
`SELECT COUNT(*) FROM compliance.incidents WHERE tenant_id = $1 AND notification_required = true AND notification_sent = false`, tenantID,
).Scan(&summary.NotificationsPending)
_ = s.pool.QueryRow(ctx,
`SELECT COALESCE(AVG(EXTRACT(EPOCH FROM (resolved_at - created_at))/3600), 0) FROM compliance.incidents WHERE tenant_id = $1 AND resolved_at IS NOT NULL`, tenantID,
).Scan(&summary.AvgResolutionHours)
return summary
}
func (s *Store) calculateRiskOverview(report *ExecutiveReport) RiskOverview {
modules := []ModuleRisk{}
// DSGVO risk based on overdue DSRs and missing TOMs
dsgvoScore := 100
dsgvoIssues := report.DSGVO.OverdueDSRs + report.DSGVO.TOMsPlanned
if report.DSGVO.OverdueDSRs > 0 {
dsgvoScore -= report.DSGVO.OverdueDSRs * 15
}
if report.DSGVO.TOMsTotal > 0 {
dsgvoScore = int(math.Round(float64(report.DSGVO.CompletionPercent)))
}
if dsgvoScore < 0 {
dsgvoScore = 0
}
modules = append(modules, ModuleRisk{Module: "DSGVO", Level: riskLevel(dsgvoScore), Score: dsgvoScore, Issues: dsgvoIssues})
// Vendor risk based on high-risk vendors and pending reviews
vendorScore := 100
vendorIssues := report.Vendors.PendingReviews + report.Vendors.ExpiredContracts
highRisk := 0
if v, ok := report.Vendors.ByRiskLevel["HIGH"]; ok {
highRisk += v
}
if v, ok := report.Vendors.ByRiskLevel["CRITICAL"]; ok {
highRisk += v
}
if report.Vendors.TotalVendors > 0 {
vendorScore = 100 - int(math.Round(float64(highRisk)/float64(report.Vendors.TotalVendors)*100))
}
vendorScore -= report.Vendors.PendingReviews * 5
vendorScore -= report.Vendors.ExpiredContracts * 10
if vendorScore < 0 {
vendorScore = 0
}
modules = append(modules, ModuleRisk{Module: "Vendors", Level: riskLevel(vendorScore), Score: vendorScore, Issues: vendorIssues})
// Incident risk
incidentScore := 100
incidentIssues := report.Incidents.OpenIncidents
incidentScore -= report.Incidents.CriticalIncidents * 20
incidentScore -= report.Incidents.OpenIncidents * 5
incidentScore -= report.Incidents.NotificationsPending * 15
if incidentScore < 0 {
incidentScore = 0
}
modules = append(modules, ModuleRisk{Module: "Incidents", Level: riskLevel(incidentScore), Score: incidentScore, Issues: incidentIssues})
// Whistleblower compliance
whistleScore := 100
whistleIssues := report.Whistleblower.OverdueAcknowledgments + report.Whistleblower.OverdueFeedbacks
whistleScore -= report.Whistleblower.OverdueAcknowledgments * 20
whistleScore -= report.Whistleblower.OverdueFeedbacks * 10
if whistleScore < 0 {
whistleScore = 0
}
modules = append(modules, ModuleRisk{Module: "Whistleblower", Level: riskLevel(whistleScore), Score: whistleScore, Issues: whistleIssues})
// Academy compliance
academyScore := int(math.Round(report.Academy.CompletionRate))
academyIssues := report.Academy.OverdueCount
modules = append(modules, ModuleRisk{Module: "Academy", Level: riskLevel(academyScore), Score: academyScore, Issues: academyIssues})
// Overall score is the average across modules
totalScore := 0
for _, m := range modules {
totalScore += m.Score
}
if len(modules) > 0 {
totalScore = totalScore / len(modules)
}
totalFindings := 0
criticalFindings := 0
for _, m := range modules {
totalFindings += m.Issues
if m.Level == "CRITICAL" {
criticalFindings += m.Issues
}
}
return RiskOverview{
OverallLevel: riskLevel(totalScore),
ModuleRisks: modules,
OpenFindings: totalFindings,
CriticalFindings: criticalFindings,
}
}
func riskLevel(score int) string {
switch {
case score >= 75:
return "LOW"
case score >= 50:
return "MEDIUM"
case score >= 25:
return "HIGH"
default:
return "CRITICAL"
}
}
func (s *Store) calculateComplianceScore(report *ExecutiveReport) int {
scores := []int{}
weights := []int{}
// DSGVO: weight 30 (most important)
if report.DSGVO.TOMsTotal > 0 {
scores = append(scores, report.DSGVO.CompletionPercent)
} else {
scores = append(scores, 0)
}
weights = append(weights, 30)
// Vendor compliance: weight 20
vendorScore := 100
if report.Vendors.TotalVendors > 0 {
vendorScore -= report.Vendors.PendingReviews * 10
vendorScore -= report.Vendors.ExpiredContracts * 15
}
if vendorScore < 0 {
vendorScore = 0
}
scores = append(scores, vendorScore)
weights = append(weights, 20)
// Incident handling: weight 20
incidentScore := 100
incidentScore -= report.Incidents.OpenIncidents * 10
incidentScore -= report.Incidents.NotificationsPending * 20
if incidentScore < 0 {
incidentScore = 0
}
scores = append(scores, incidentScore)
weights = append(weights, 20)
// Whistleblower: weight 15
whistleScore := 100
whistleScore -= report.Whistleblower.OverdueAcknowledgments * 25
whistleScore -= report.Whistleblower.OverdueFeedbacks * 15
if whistleScore < 0 {
whistleScore = 0
}
scores = append(scores, whistleScore)
weights = append(weights, 15)
// Academy: weight 15
academyScore := int(math.Round(report.Academy.CompletionRate))
scores = append(scores, academyScore)
weights = append(weights, 15)
totalWeight := 0
weightedSum := 0
for i, sc := range scores {
weightedSum += sc * weights[i]
totalWeight += weights[i]
}
if totalWeight == 0 {
return 0
}
return int(math.Round(float64(weightedSum) / float64(totalWeight)))
}
func (s *Store) getUpcomingDeadlines(ctx context.Context, tenantID uuid.UUID) []Deadline {
deadlines := []Deadline{}
now := time.Now().UTC()
// Vendor reviews due
rows, err := s.pool.Query(ctx, `
SELECT name, next_review_date FROM compliance.vendor_compliance
WHERE tenant_id = $1 AND next_review_date IS NOT NULL
ORDER BY next_review_date ASC LIMIT 10
`, tenantID)
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
var dueDate time.Time
if err := rows.Scan(&name, &dueDate); err != nil {
continue
}
daysLeft := int(dueDate.Sub(now).Hours() / 24)
severity := "INFO"
if daysLeft < 0 {
severity = "OVERDUE"
} else if daysLeft <= 7 {
severity = "URGENT"
} else if daysLeft <= 30 {
severity = "WARNING"
}
deadlines = append(deadlines, Deadline{
Module: "Vendors",
Type: "REVIEW",
Description: "Vendor-Review: " + name,
DueDate: dueDate,
DaysLeft: daysLeft,
Severity: severity,
})
}
}
// DSR deadlines (overdue)
rows2, err := s.pool.Query(ctx, `
SELECT request_type, deadline FROM compliance.dsr_requests
WHERE tenant_id = $1 AND status NOT IN ('COMPLETED', 'REJECTED')
AND deadline IS NOT NULL
ORDER BY deadline ASC LIMIT 10
`, tenantID)
if err == nil {
defer rows2.Close()
for rows2.Next() {
var reqType string
var dueDate time.Time
if err := rows2.Scan(&reqType, &dueDate); err != nil {
continue
}
daysLeft := int(dueDate.Sub(now).Hours() / 24)
severity := "INFO"
if daysLeft < 0 {
severity = "OVERDUE"
} else if daysLeft <= 3 {
severity = "URGENT"
} else if daysLeft <= 14 {
severity = "WARNING"
}
deadlines = append(deadlines, Deadline{
Module: "DSR",
Type: "RESPONSE",
Description: "Betroffenenrecht: " + reqType,
DueDate: dueDate,
DaysLeft: daysLeft,
Severity: severity,
})
}
}
// Sort by due date ascending
sort.Slice(deadlines, func(i, j int) bool {
return deadlines[i].DueDate.Before(deadlines[j].DueDate)
})
if len(deadlines) > 15 {
deadlines = deadlines[:15]
}
return deadlines
}
func (s *Store) getRecentActivity(ctx context.Context, tenantID uuid.UUID) []ActivityEntry {
activities := []ActivityEntry{}
// Recent vendors created/updated
rows, _ := s.pool.Query(ctx, `
SELECT name, created_at, 'CREATED' as action FROM compliance.vendor_compliance
WHERE tenant_id = $1 AND created_at > NOW() - INTERVAL '30 days'
UNION ALL
SELECT name, updated_at, 'UPDATED' FROM compliance.vendor_compliance
WHERE tenant_id = $1 AND updated_at > created_at AND updated_at > NOW() - INTERVAL '30 days'
ORDER BY 2 DESC LIMIT 5
`, tenantID)
if rows != nil {
defer rows.Close()
for rows.Next() {
var name, action string
var ts time.Time
if err := rows.Scan(&name, &ts, &action); err != nil {
continue
}
desc := "Vendor "
if action == "CREATED" {
desc += "angelegt: "
} else {
desc += "aktualisiert: "
}
activities = append(activities, ActivityEntry{
Timestamp: ts,
Module: "Vendors",
Action: action,
Description: desc + name,
})
}
}
// Recent incidents
rows2, _ := s.pool.Query(ctx, `
SELECT title, created_at, severity FROM compliance.incidents
WHERE tenant_id = $1 AND created_at > NOW() - INTERVAL '30 days'
ORDER BY created_at DESC LIMIT 5
`, tenantID)
if rows2 != nil {
defer rows2.Close()
for rows2.Next() {
var title, severity string
var ts time.Time
if err := rows2.Scan(&title, &ts, &severity); err != nil {
continue
}
activities = append(activities, ActivityEntry{
Timestamp: ts,
Module: "Incidents",
Action: "CREATED",
Description: "Datenpanne (" + severity + "): " + title,
})
}
}
// Recent whistleblower reports (admin view)
rows3, _ := s.pool.Query(ctx, `
SELECT category, created_at FROM whistleblower_reports
WHERE tenant_id = $1 AND created_at > NOW() - INTERVAL '30 days'
ORDER BY created_at DESC LIMIT 5
`, tenantID)
if rows3 != nil {
defer rows3.Close()
for rows3.Next() {
var category string
var ts time.Time
if err := rows3.Scan(&category, &ts); err != nil {
continue
}
activities = append(activities, ActivityEntry{
Timestamp: ts,
Module: "Whistleblower",
Action: "REPORT",
Description: "Neue Meldung: " + category,
})
}
}
// Sort by timestamp descending (most recent first)
sort.Slice(activities, func(i, j int) bool {
return activities[i].Timestamp.After(activities[j].Timestamp)
})
if len(activities) > 20 {
activities = activities[:20]
}
return activities
}

View File

@@ -1,158 +0,0 @@
package sso
import (
"time"
"github.com/google/uuid"
)
// ============================================================================
// Constants / Enums
// ============================================================================
// ProviderType represents the SSO authentication protocol.
type ProviderType string
const (
// ProviderTypeOIDC represents OpenID Connect authentication.
ProviderTypeOIDC ProviderType = "oidc"
// ProviderTypeSAML represents SAML 2.0 authentication.
ProviderTypeSAML ProviderType = "saml"
)
// ============================================================================
// Main Entities
// ============================================================================
// SSOConfig represents a per-tenant SSO provider configuration supporting
// OIDC and SAML authentication protocols.
type SSOConfig struct {
ID uuid.UUID `json:"id" db:"id"`
TenantID uuid.UUID `json:"tenant_id" db:"tenant_id"`
ProviderType ProviderType `json:"provider_type" db:"provider_type"`
Name string `json:"name" db:"name"`
Enabled bool `json:"enabled" db:"enabled"`
// OIDC settings
OIDCIssuerURL string `json:"oidc_issuer_url,omitempty" db:"oidc_issuer_url"`
OIDCClientID string `json:"oidc_client_id,omitempty" db:"oidc_client_id"`
OIDCClientSecret string `json:"oidc_client_secret,omitempty" db:"oidc_client_secret"`
OIDCRedirectURI string `json:"oidc_redirect_uri,omitempty" db:"oidc_redirect_uri"`
OIDCScopes []string `json:"oidc_scopes,omitempty" db:"oidc_scopes"`
// SAML settings (for future use)
SAMLEntityID string `json:"saml_entity_id,omitempty" db:"saml_entity_id"`
SAMLSSOURL string `json:"saml_sso_url,omitempty" db:"saml_sso_url"`
SAMLCertificate string `json:"saml_certificate,omitempty" db:"saml_certificate"`
SAMLACS_URL string `json:"saml_acs_url,omitempty" db:"saml_acs_url"`
// Role mapping: maps SSO group/role names to internal role IDs
RoleMapping map[string]string `json:"role_mapping" db:"role_mapping"`
DefaultRoleID *uuid.UUID `json:"default_role_id,omitempty" db:"default_role_id"`
AutoProvision bool `json:"auto_provision" db:"auto_provision"`
// Audit
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
// SSOUser represents a JIT-provisioned user authenticated via an SSO provider.
type SSOUser struct {
ID uuid.UUID `json:"id" db:"id"`
TenantID uuid.UUID `json:"tenant_id" db:"tenant_id"`
SSOConfigID uuid.UUID `json:"sso_config_id" db:"sso_config_id"`
ExternalID string `json:"external_id" db:"external_id"`
Email string `json:"email" db:"email"`
DisplayName string `json:"display_name" db:"display_name"`
Groups []string `json:"groups" db:"groups"`
LastLogin *time.Time `json:"last_login,omitempty" db:"last_login"`
IsActive bool `json:"is_active" db:"is_active"`
// Audit
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
// ============================================================================
// API Request Types
// ============================================================================
// CreateSSOConfigRequest is the API request for creating an SSO configuration.
type CreateSSOConfigRequest struct {
ProviderType ProviderType `json:"provider_type" binding:"required"`
Name string `json:"name" binding:"required"`
Enabled bool `json:"enabled"`
OIDCIssuerURL string `json:"oidc_issuer_url"`
OIDCClientID string `json:"oidc_client_id"`
OIDCClientSecret string `json:"oidc_client_secret"`
OIDCRedirectURI string `json:"oidc_redirect_uri"`
OIDCScopes []string `json:"oidc_scopes"`
RoleMapping map[string]string `json:"role_mapping"`
DefaultRoleID *uuid.UUID `json:"default_role_id"`
AutoProvision bool `json:"auto_provision"`
}
// UpdateSSOConfigRequest is the API request for partially updating an SSO
// configuration. Pointer fields allow distinguishing between "not provided"
// (nil) and "set to zero value".
type UpdateSSOConfigRequest struct {
Name *string `json:"name"`
Enabled *bool `json:"enabled"`
OIDCIssuerURL *string `json:"oidc_issuer_url"`
OIDCClientID *string `json:"oidc_client_id"`
OIDCClientSecret *string `json:"oidc_client_secret"`
OIDCRedirectURI *string `json:"oidc_redirect_uri"`
OIDCScopes []string `json:"oidc_scopes"`
RoleMapping map[string]string `json:"role_mapping"`
DefaultRoleID *uuid.UUID `json:"default_role_id"`
AutoProvision *bool `json:"auto_provision"`
}
// ============================================================================
// JWT / Session Types
// ============================================================================
// SSOClaims holds the claims embedded in JWT tokens issued after successful
// SSO authentication. These are used for downstream authorization decisions.
type SSOClaims struct {
UserID uuid.UUID `json:"user_id"`
TenantID uuid.UUID `json:"tenant_id"`
Email string `json:"email"`
DisplayName string `json:"display_name"`
Roles []string `json:"roles"`
SSOConfigID uuid.UUID `json:"sso_config_id"`
}
// ============================================================================
// List / Filter Types
// ============================================================================
// SSOConfigFilters defines filters for listing SSO configurations.
type SSOConfigFilters struct {
ProviderType ProviderType
Enabled *bool
Search string
Limit int
Offset int
}
// SSOUserFilters defines filters for listing SSO users.
type SSOUserFilters struct {
SSOConfigID *uuid.UUID
Email string
IsActive *bool
Limit int
Offset int
}
// SSOConfigListResponse is the API response for listing SSO configurations.
type SSOConfigListResponse struct {
Configs []SSOConfig `json:"configs"`
Total int `json:"total"`
}
// SSOUserListResponse is the API response for listing SSO users.
type SSOUserListResponse struct {
Users []SSOUser `json:"users"`
Total int `json:"total"`
}

View File

@@ -1,477 +0,0 @@
package sso
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
)
// Store handles SSO configuration and user data persistence.
type Store struct {
pool *pgxpool.Pool
}
// NewStore creates a new SSO store.
func NewStore(pool *pgxpool.Pool) *Store {
return &Store{pool: pool}
}
// ============================================================================
// SSO Configuration CRUD Operations
// ============================================================================
// CreateConfig creates a new SSO configuration for a tenant.
func (s *Store) CreateConfig(ctx context.Context, tenantID uuid.UUID, req *CreateSSOConfigRequest) (*SSOConfig, error) {
now := time.Now().UTC()
cfg := &SSOConfig{
ID: uuid.New(),
TenantID: tenantID,
ProviderType: req.ProviderType,
Name: req.Name,
Enabled: req.Enabled,
OIDCIssuerURL: req.OIDCIssuerURL,
OIDCClientID: req.OIDCClientID,
OIDCClientSecret: req.OIDCClientSecret,
OIDCRedirectURI: req.OIDCRedirectURI,
OIDCScopes: req.OIDCScopes,
RoleMapping: req.RoleMapping,
DefaultRoleID: req.DefaultRoleID,
AutoProvision: req.AutoProvision,
CreatedAt: now,
UpdatedAt: now,
}
// Apply defaults
if len(cfg.OIDCScopes) == 0 {
cfg.OIDCScopes = []string{"openid", "profile", "email"}
}
if cfg.RoleMapping == nil {
cfg.RoleMapping = map[string]string{}
}
roleMappingJSON, err := json.Marshal(cfg.RoleMapping)
if err != nil {
return nil, fmt.Errorf("failed to marshal role_mapping: %w", err)
}
_, err = s.pool.Exec(ctx, `
INSERT INTO sso_configurations (
id, tenant_id, provider_type, name, enabled,
oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes,
saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url,
role_mapping, default_role_id, auto_provision,
created_at, updated_at
) VALUES (
$1, $2, $3, $4, $5,
$6, $7, $8, $9, $10,
$11, $12, $13, $14,
$15, $16, $17,
$18, $19
)
`,
cfg.ID, cfg.TenantID, string(cfg.ProviderType), cfg.Name, cfg.Enabled,
cfg.OIDCIssuerURL, cfg.OIDCClientID, cfg.OIDCClientSecret, cfg.OIDCRedirectURI, cfg.OIDCScopes,
cfg.SAMLEntityID, cfg.SAMLSSOURL, cfg.SAMLCertificate, cfg.SAMLACS_URL,
roleMappingJSON, cfg.DefaultRoleID, cfg.AutoProvision,
cfg.CreatedAt, cfg.UpdatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to insert sso configuration: %w", err)
}
return cfg, nil
}
// GetConfig retrieves an SSO configuration by ID and tenant.
func (s *Store) GetConfig(ctx context.Context, tenantID, configID uuid.UUID) (*SSOConfig, error) {
var cfg SSOConfig
var providerType string
var roleMappingJSON []byte
err := s.pool.QueryRow(ctx, `
SELECT
id, tenant_id, provider_type, name, enabled,
oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes,
saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url,
role_mapping, default_role_id, auto_provision,
created_at, updated_at
FROM sso_configurations
WHERE id = $1 AND tenant_id = $2
`, configID, tenantID).Scan(
&cfg.ID, &cfg.TenantID, &providerType, &cfg.Name, &cfg.Enabled,
&cfg.OIDCIssuerURL, &cfg.OIDCClientID, &cfg.OIDCClientSecret, &cfg.OIDCRedirectURI, &cfg.OIDCScopes,
&cfg.SAMLEntityID, &cfg.SAMLSSOURL, &cfg.SAMLCertificate, &cfg.SAMLACS_URL,
&roleMappingJSON, &cfg.DefaultRoleID, &cfg.AutoProvision,
&cfg.CreatedAt, &cfg.UpdatedAt,
)
if err == pgx.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get sso configuration: %w", err)
}
cfg.ProviderType = ProviderType(providerType)
cfg.RoleMapping = unmarshalRoleMapping(roleMappingJSON)
return &cfg, nil
}
// GetConfigByName retrieves an SSO configuration by name and tenant.
func (s *Store) GetConfigByName(ctx context.Context, tenantID uuid.UUID, name string) (*SSOConfig, error) {
var cfg SSOConfig
var providerType string
var roleMappingJSON []byte
err := s.pool.QueryRow(ctx, `
SELECT
id, tenant_id, provider_type, name, enabled,
oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes,
saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url,
role_mapping, default_role_id, auto_provision,
created_at, updated_at
FROM sso_configurations
WHERE tenant_id = $1 AND name = $2
`, tenantID, name).Scan(
&cfg.ID, &cfg.TenantID, &providerType, &cfg.Name, &cfg.Enabled,
&cfg.OIDCIssuerURL, &cfg.OIDCClientID, &cfg.OIDCClientSecret, &cfg.OIDCRedirectURI, &cfg.OIDCScopes,
&cfg.SAMLEntityID, &cfg.SAMLSSOURL, &cfg.SAMLCertificate, &cfg.SAMLACS_URL,
&roleMappingJSON, &cfg.DefaultRoleID, &cfg.AutoProvision,
&cfg.CreatedAt, &cfg.UpdatedAt,
)
if err == pgx.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get sso configuration by name: %w", err)
}
cfg.ProviderType = ProviderType(providerType)
cfg.RoleMapping = unmarshalRoleMapping(roleMappingJSON)
return &cfg, nil
}
// ListConfigs lists all SSO configurations for a tenant.
func (s *Store) ListConfigs(ctx context.Context, tenantID uuid.UUID) ([]SSOConfig, error) {
rows, err := s.pool.Query(ctx, `
SELECT
id, tenant_id, provider_type, name, enabled,
oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes,
saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url,
role_mapping, default_role_id, auto_provision,
created_at, updated_at
FROM sso_configurations
WHERE tenant_id = $1
ORDER BY name ASC
`, tenantID)
if err != nil {
return nil, fmt.Errorf("failed to list sso configurations: %w", err)
}
defer rows.Close()
var configs []SSOConfig
for rows.Next() {
cfg, err := scanSSOConfig(rows)
if err != nil {
return nil, err
}
configs = append(configs, *cfg)
}
return configs, nil
}
// UpdateConfig updates an existing SSO configuration with partial updates.
func (s *Store) UpdateConfig(ctx context.Context, tenantID, configID uuid.UUID, req *UpdateSSOConfigRequest) (*SSOConfig, error) {
cfg, err := s.GetConfig(ctx, tenantID, configID)
if err != nil {
return nil, err
}
if cfg == nil {
return nil, fmt.Errorf("sso configuration not found")
}
// Apply partial updates
if req.Name != nil {
cfg.Name = *req.Name
}
if req.Enabled != nil {
cfg.Enabled = *req.Enabled
}
if req.OIDCIssuerURL != nil {
cfg.OIDCIssuerURL = *req.OIDCIssuerURL
}
if req.OIDCClientID != nil {
cfg.OIDCClientID = *req.OIDCClientID
}
if req.OIDCClientSecret != nil {
cfg.OIDCClientSecret = *req.OIDCClientSecret
}
if req.OIDCRedirectURI != nil {
cfg.OIDCRedirectURI = *req.OIDCRedirectURI
}
if req.OIDCScopes != nil {
cfg.OIDCScopes = req.OIDCScopes
}
if req.RoleMapping != nil {
cfg.RoleMapping = req.RoleMapping
}
if req.DefaultRoleID != nil {
cfg.DefaultRoleID = req.DefaultRoleID
}
if req.AutoProvision != nil {
cfg.AutoProvision = *req.AutoProvision
}
cfg.UpdatedAt = time.Now().UTC()
roleMappingJSON, err := json.Marshal(cfg.RoleMapping)
if err != nil {
return nil, fmt.Errorf("failed to marshal role_mapping: %w", err)
}
_, err = s.pool.Exec(ctx, `
UPDATE sso_configurations SET
name = $3, enabled = $4,
oidc_issuer_url = $5, oidc_client_id = $6, oidc_client_secret = $7,
oidc_redirect_uri = $8, oidc_scopes = $9,
saml_entity_id = $10, saml_sso_url = $11, saml_certificate = $12, saml_acs_url = $13,
role_mapping = $14, default_role_id = $15, auto_provision = $16,
updated_at = $17
WHERE id = $1 AND tenant_id = $2
`,
cfg.ID, cfg.TenantID,
cfg.Name, cfg.Enabled,
cfg.OIDCIssuerURL, cfg.OIDCClientID, cfg.OIDCClientSecret,
cfg.OIDCRedirectURI, cfg.OIDCScopes,
cfg.SAMLEntityID, cfg.SAMLSSOURL, cfg.SAMLCertificate, cfg.SAMLACS_URL,
roleMappingJSON, cfg.DefaultRoleID, cfg.AutoProvision,
cfg.UpdatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to update sso configuration: %w", err)
}
return cfg, nil
}
// DeleteConfig deletes an SSO configuration by ID and tenant.
func (s *Store) DeleteConfig(ctx context.Context, tenantID, configID uuid.UUID) error {
_, err := s.pool.Exec(ctx,
"DELETE FROM sso_configurations WHERE id = $1 AND tenant_id = $2",
configID, tenantID,
)
if err != nil {
return fmt.Errorf("failed to delete sso configuration: %w", err)
}
return nil
}
// GetEnabledConfig retrieves the active/enabled SSO configuration for a tenant.
func (s *Store) GetEnabledConfig(ctx context.Context, tenantID uuid.UUID) (*SSOConfig, error) {
var cfg SSOConfig
var providerType string
var roleMappingJSON []byte
err := s.pool.QueryRow(ctx, `
SELECT
id, tenant_id, provider_type, name, enabled,
oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes,
saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url,
role_mapping, default_role_id, auto_provision,
created_at, updated_at
FROM sso_configurations
WHERE tenant_id = $1 AND enabled = true
LIMIT 1
`, tenantID).Scan(
&cfg.ID, &cfg.TenantID, &providerType, &cfg.Name, &cfg.Enabled,
&cfg.OIDCIssuerURL, &cfg.OIDCClientID, &cfg.OIDCClientSecret, &cfg.OIDCRedirectURI, &cfg.OIDCScopes,
&cfg.SAMLEntityID, &cfg.SAMLSSOURL, &cfg.SAMLCertificate, &cfg.SAMLACS_URL,
&roleMappingJSON, &cfg.DefaultRoleID, &cfg.AutoProvision,
&cfg.CreatedAt, &cfg.UpdatedAt,
)
if err == pgx.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get enabled sso configuration: %w", err)
}
cfg.ProviderType = ProviderType(providerType)
cfg.RoleMapping = unmarshalRoleMapping(roleMappingJSON)
return &cfg, nil
}
// ============================================================================
// SSO User Operations
// ============================================================================
// UpsertUser inserts or updates an SSO user via JIT provisioning.
// On conflict (tenant_id, sso_config_id, external_id), the user's email,
// display name, groups, and last login timestamp are updated.
func (s *Store) UpsertUser(ctx context.Context, tenantID, ssoConfigID uuid.UUID, externalID, email, displayName string, groups []string) (*SSOUser, error) {
now := time.Now().UTC()
id := uuid.New()
var user SSOUser
err := s.pool.QueryRow(ctx, `
INSERT INTO sso_users (
id, tenant_id, sso_config_id,
external_id, email, display_name, groups,
last_login, is_active,
created_at, updated_at
) VALUES (
$1, $2, $3,
$4, $5, $6, $7,
$8, true,
$8, $8
)
ON CONFLICT (tenant_id, sso_config_id, external_id) DO UPDATE SET
email = EXCLUDED.email,
display_name = EXCLUDED.display_name,
groups = EXCLUDED.groups,
last_login = EXCLUDED.last_login,
is_active = true,
updated_at = EXCLUDED.updated_at
RETURNING
id, tenant_id, sso_config_id,
external_id, email, display_name, groups,
last_login, is_active,
created_at, updated_at
`,
id, tenantID, ssoConfigID,
externalID, email, displayName, groups,
now,
).Scan(
&user.ID, &user.TenantID, &user.SSOConfigID,
&user.ExternalID, &user.Email, &user.DisplayName, &user.Groups,
&user.LastLogin, &user.IsActive,
&user.CreatedAt, &user.UpdatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to upsert sso user: %w", err)
}
return &user, nil
}
// GetUserByExternalID looks up an SSO user by their external identity provider ID.
func (s *Store) GetUserByExternalID(ctx context.Context, tenantID, ssoConfigID uuid.UUID, externalID string) (*SSOUser, error) {
var user SSOUser
err := s.pool.QueryRow(ctx, `
SELECT
id, tenant_id, sso_config_id,
external_id, email, display_name, groups,
last_login, is_active,
created_at, updated_at
FROM sso_users
WHERE tenant_id = $1 AND sso_config_id = $2 AND external_id = $3
`, tenantID, ssoConfigID, externalID).Scan(
&user.ID, &user.TenantID, &user.SSOConfigID,
&user.ExternalID, &user.Email, &user.DisplayName, &user.Groups,
&user.LastLogin, &user.IsActive,
&user.CreatedAt, &user.UpdatedAt,
)
if err == pgx.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get sso user by external id: %w", err)
}
return &user, nil
}
// ListUsers lists all SSO-provisioned users for a tenant.
func (s *Store) ListUsers(ctx context.Context, tenantID uuid.UUID) ([]SSOUser, error) {
rows, err := s.pool.Query(ctx, `
SELECT
id, tenant_id, sso_config_id,
external_id, email, display_name, groups,
last_login, is_active,
created_at, updated_at
FROM sso_users
WHERE tenant_id = $1
ORDER BY display_name ASC
`, tenantID)
if err != nil {
return nil, fmt.Errorf("failed to list sso users: %w", err)
}
defer rows.Close()
var users []SSOUser
for rows.Next() {
user, err := scanSSOUser(rows)
if err != nil {
return nil, err
}
users = append(users, *user)
}
return users, nil
}
// ============================================================================
// Row Scanning Helpers
// ============================================================================
// scanSSOConfig scans an SSO configuration row from pgx.Rows.
func scanSSOConfig(rows pgx.Rows) (*SSOConfig, error) {
var cfg SSOConfig
var providerType string
var roleMappingJSON []byte
err := rows.Scan(
&cfg.ID, &cfg.TenantID, &providerType, &cfg.Name, &cfg.Enabled,
&cfg.OIDCIssuerURL, &cfg.OIDCClientID, &cfg.OIDCClientSecret, &cfg.OIDCRedirectURI, &cfg.OIDCScopes,
&cfg.SAMLEntityID, &cfg.SAMLSSOURL, &cfg.SAMLCertificate, &cfg.SAMLACS_URL,
&roleMappingJSON, &cfg.DefaultRoleID, &cfg.AutoProvision,
&cfg.CreatedAt, &cfg.UpdatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to scan sso configuration: %w", err)
}
cfg.ProviderType = ProviderType(providerType)
cfg.RoleMapping = unmarshalRoleMapping(roleMappingJSON)
return &cfg, nil
}
// scanSSOUser scans an SSO user row from pgx.Rows.
func scanSSOUser(rows pgx.Rows) (*SSOUser, error) {
var user SSOUser
err := rows.Scan(
&user.ID, &user.TenantID, &user.SSOConfigID,
&user.ExternalID, &user.Email, &user.DisplayName, &user.Groups,
&user.LastLogin, &user.IsActive,
&user.CreatedAt, &user.UpdatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to scan sso user: %w", err)
}
return &user, nil
}
// unmarshalRoleMapping safely unmarshals JSONB role_mapping bytes into a map.
func unmarshalRoleMapping(data []byte) map[string]string {
if data == nil {
return map[string]string{}
}
var m map[string]string
if err := json.Unmarshal(data, &m); err != nil {
return map[string]string{}
}
return m
}

View File

@@ -36,7 +36,6 @@ async def list_ai_systems(
db: Session = Depends(get_db),
):
"""List all registered AI systems."""
import uuid as _uuid
query = db.query(AISystemDB)
if classification:
@@ -88,7 +87,6 @@ async def create_ai_system(
):
"""Register a new AI system."""
import uuid as _uuid
from datetime import datetime
try:
cls_enum = AIClassificationEnum(data.classification) if data.classification else AIClassificationEnum.UNCLASSIFIED

View File

@@ -26,7 +26,7 @@ from ..db.models import (
)
from .schemas import (
CreateAuditSessionRequest, AuditSessionResponse, AuditSessionSummary, AuditSessionDetailResponse,
AuditSessionListResponse, SignOffRequest, SignOffResponse,
SignOffRequest, SignOffResponse,
AuditChecklistItem, AuditChecklistResponse, AuditStatistics,
PaginationMeta,
)
@@ -164,7 +164,7 @@ async def get_audit_session(
completion_percentage=session.completion_percentage,
)
return AuditSessionDetail(
return AuditSessionDetailResponse(
id=session.id,
name=session.name,
description=session.description,

View File

@@ -12,7 +12,6 @@ from typing import Optional, List
from fastapi import APIRouter, Depends, HTTPException, Query, Header
from pydantic import BaseModel
from sqlalchemy.orm import Session
from sqlalchemy import func
from classroom_engine.database import get_db
from ..db.banner_models import (
@@ -317,12 +316,12 @@ async def get_site_config(
categories = db.query(BannerCategoryConfigDB).filter(
BannerCategoryConfigDB.site_config_id == config.id,
BannerCategoryConfigDB.is_active == True,
BannerCategoryConfigDB.is_active,
).order_by(BannerCategoryConfigDB.sort_order).all()
vendors = db.query(BannerVendorConfigDB).filter(
BannerVendorConfigDB.site_config_id == config.id,
BannerVendorConfigDB.is_active == True,
BannerVendorConfigDB.is_active,
).all()
result = _site_config_to_dict(config)

View File

@@ -96,8 +96,8 @@ def generate_change_requests_for_use_case(
trigger_type="use_case_high_risk",
target_document_type="dsfa",
proposal_title=f"DSFA erstellen für '{title}' (Risiko: {risk_level})",
proposal_body=f"Ein neuer Use Case mit hohem Risiko wurde erstellt. "
f"Art. 35 DSGVO verlangt eine DSFA für Hochrisiko-Verarbeitungen.",
proposal_body="Ein neuer Use Case mit hohem Risiko wurde erstellt. "
"Art. 35 DSGVO verlangt eine DSFA für Hochrisiko-Verarbeitungen.",
proposed_changes={
"source": "use_case",
"title": title,

View File

@@ -14,8 +14,7 @@ Endpoints:
import json
import logging
from datetime import datetime
from typing import Optional, List
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, Query, Header
from pydantic import BaseModel

View File

@@ -11,7 +11,6 @@ Endpoints:
import json
import logging
import uuid
from typing import Optional
from fastapi import APIRouter, HTTPException, Header
@@ -127,16 +126,68 @@ class AuditListResponse(BaseModel):
# SQL column lists — keep in sync with SELECT/INSERT
# =============================================================================
_BASE_COLUMNS = """id, tenant_id, company_name, legal_form, industry, founded_year,
business_model, offerings, company_size, employee_count, annual_revenue,
headquarters_country, headquarters_city, has_international_locations,
international_countries, target_markets, primary_jurisdiction,
is_data_controller, is_data_processor, uses_ai, ai_use_cases,
dpo_name, dpo_email, legal_contact_name, legal_contact_email,
machine_builder, is_complete, completed_at, created_at, updated_at,
repos, document_sources, processing_systems, ai_systems, technical_contacts,
subject_to_nis2, subject_to_ai_act, subject_to_iso27001,
supervisory_authority, review_cycle_months"""
_BASE_COLUMNS_LIST = [
"id", "tenant_id", "company_name", "legal_form", "industry", "founded_year",
"business_model", "offerings", "company_size", "employee_count", "annual_revenue",
"headquarters_country", "headquarters_city", "has_international_locations",
"international_countries", "target_markets", "primary_jurisdiction",
"is_data_controller", "is_data_processor", "uses_ai", "ai_use_cases",
"dpo_name", "dpo_email", "legal_contact_name", "legal_contact_email",
"machine_builder", "is_complete", "completed_at", "created_at", "updated_at",
"repos", "document_sources", "processing_systems", "ai_systems", "technical_contacts",
"subject_to_nis2", "subject_to_ai_act", "subject_to_iso27001",
"supervisory_authority", "review_cycle_months",
]
_BASE_COLUMNS = ", ".join(_BASE_COLUMNS_LIST)
# Per-field defaults and type coercions for row_to_response.
# Each entry is (field_name, default_value, expected_type_or_None).
# - expected_type: if set, the value is checked with isinstance; if it fails,
# default_value is used instead.
# - Special sentinels: "STR" means str(value), "STR_OR_NONE" means str(v) if v else None.
_FIELD_DEFAULTS = {
"id": (None, "STR"),
"tenant_id": (None, None),
"company_name": ("", None),
"legal_form": ("GmbH", None),
"industry": ("", None),
"founded_year": (None, None),
"business_model": ("B2B", None),
"offerings": ([], list),
"company_size": ("small", None),
"employee_count": ("1-9", None),
"annual_revenue": ("< 2 Mio", None),
"headquarters_country": ("DE", None),
"headquarters_city": ("", None),
"has_international_locations": (False, None),
"international_countries": ([], list),
"target_markets": (["DE"], list),
"primary_jurisdiction": ("DE", None),
"is_data_controller": (True, None),
"is_data_processor": (False, None),
"uses_ai": (False, None),
"ai_use_cases": ([], list),
"dpo_name": (None, None),
"dpo_email": (None, None),
"legal_contact_name": (None, None),
"legal_contact_email": (None, None),
"machine_builder": (None, dict),
"is_complete": (False, None),
"completed_at": (None, "STR_OR_NONE"),
"created_at": (None, "STR"),
"updated_at": (None, "STR"),
"repos": ([], list),
"document_sources": ([], list),
"processing_systems": ([], list),
"ai_systems": ([], list),
"technical_contacts": ([], list),
"subject_to_nis2": (False, None),
"subject_to_ai_act": (False, None),
"subject_to_iso27001": (False, None),
"supervisory_authority": (None, None),
"review_cycle_months": (12, None),
}
# =============================================================================
@@ -144,50 +195,29 @@ _BASE_COLUMNS = """id, tenant_id, company_name, legal_form, industry, founded_ye
# =============================================================================
def row_to_response(row) -> CompanyProfileResponse:
"""Convert a DB row to response model."""
return CompanyProfileResponse(
id=str(row[0]),
tenant_id=row[1],
company_name=row[2] or "",
legal_form=row[3] or "GmbH",
industry=row[4] or "",
founded_year=row[5],
business_model=row[6] or "B2B",
offerings=row[7] if isinstance(row[7], list) else [],
company_size=row[8] or "small",
employee_count=row[9] or "1-9",
annual_revenue=row[10] or "< 2 Mio",
headquarters_country=row[11] or "DE",
headquarters_city=row[12] or "",
has_international_locations=row[13] or False,
international_countries=row[14] if isinstance(row[14], list) else [],
target_markets=row[15] if isinstance(row[15], list) else ["DE"],
primary_jurisdiction=row[16] or "DE",
is_data_controller=row[17] if row[17] is not None else True,
is_data_processor=row[18] or False,
uses_ai=row[19] or False,
ai_use_cases=row[20] if isinstance(row[20], list) else [],
dpo_name=row[21],
dpo_email=row[22],
legal_contact_name=row[23],
legal_contact_email=row[24],
machine_builder=row[25] if isinstance(row[25], dict) else None,
is_complete=row[26] or False,
completed_at=str(row[27]) if row[27] else None,
created_at=str(row[28]),
updated_at=str(row[29]),
# Phase 2 fields (indices 30-39)
repos=row[30] if isinstance(row[30], list) else [],
document_sources=row[31] if isinstance(row[31], list) else [],
processing_systems=row[32] if isinstance(row[32], list) else [],
ai_systems=row[33] if isinstance(row[33], list) else [],
technical_contacts=row[34] if isinstance(row[34], list) else [],
subject_to_nis2=row[35] or False,
subject_to_ai_act=row[36] or False,
subject_to_iso27001=row[37] or False,
supervisory_authority=row[38],
review_cycle_months=row[39] or 12,
)
"""Convert a DB row to response model using zip-based column mapping."""
raw = dict(zip(_BASE_COLUMNS_LIST, row))
coerced: dict = {}
for col in _BASE_COLUMNS_LIST:
default, expected_type = _FIELD_DEFAULTS[col]
value = raw[col]
if expected_type == "STR":
coerced[col] = str(value)
elif expected_type == "STR_OR_NONE":
coerced[col] = str(value) if value else None
elif expected_type is not None:
# Type-checked field (list / dict): use value only if it matches
coerced[col] = value if isinstance(value, expected_type) else default
else:
# is_data_controller needs special None-check (True when NULL)
if col == "is_data_controller":
coerced[col] = value if value is not None else default
else:
coerced[col] = value or default if default is not None else value
return CompanyProfileResponse(**coerced)
def log_audit(db, tenant_id: str, action: str, changed_fields: Optional[dict], changed_by: Optional[str]):

View File

@@ -12,7 +12,7 @@ Endpoints:
import logging
from datetime import datetime
from typing import Optional, List
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, Header
from pydantic import BaseModel

View File

@@ -21,7 +21,7 @@ Usage:
import logging
from datetime import datetime
from typing import Any, Dict, List, Optional, Callable
from typing import Any, Dict, List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy import text

View File

@@ -42,7 +42,7 @@ def generate_loeschfristen_drafts(ctx: dict) -> list[dict]:
"responsible": ctx.get("dpo_name", "DSB"),
"status": "draft",
"review_cycle_months": ctx.get("review_cycle_months", 12),
"notes": f"Automatisch generiert aus Stammdaten. Bitte prüfen und anpassen.",
"notes": "Automatisch generiert aus Stammdaten. Bitte prüfen und anpassen.",
}
policies.append(policy)

View File

@@ -51,7 +51,6 @@ def generate_tom_drafts(ctx: dict) -> list[dict]:
measures.extend(_AI_ACT_TOMS)
# Enrich with metadata
company = ctx.get("company_name", "")
result = []
for i, m in enumerate(measures, 1):
result.append({

View File

@@ -33,7 +33,6 @@ from classroom_engine.database import get_db
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/dsfa", tags=["compliance-dsfa"])
from .tenant_utils import get_tenant_id as _shared_get_tenant_id
# Legacy compat — still used by _get_tenant_id() below; will be removed once
# all call-sites switch to Depends(get_tenant_id).
@@ -855,7 +854,7 @@ async def approve_dsfa(
if request.approved:
new_status = "approved"
row = db.execute(
db.execute(
text("""
UPDATE compliance_dsfas
SET status = 'approved', approved_by = :approved_by, approved_at = NOW(), updated_at = NOW()
@@ -866,7 +865,7 @@ async def approve_dsfa(
).fetchone()
else:
new_status = "needs-update"
row = db.execute(
db.execute(
text("""
UPDATE compliance_dsfas
SET status = 'needs-update', updated_at = NOW()

View File

@@ -14,7 +14,7 @@ from fastapi import APIRouter, Depends, HTTPException, Query, Header
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from sqlalchemy.orm import Session
from sqlalchemy import text, func, and_, or_, cast, String
from sqlalchemy import text, func, and_, or_
from classroom_engine.database import get_db
from ..db.dsr_models import (
@@ -574,7 +574,7 @@ async def get_published_templates(
"""Gibt publizierte Vorlagen zurueck."""
query = db.query(DSRTemplateDB).filter(
DSRTemplateDB.tenant_id == uuid.UUID(tenant_id),
DSRTemplateDB.is_active == True,
DSRTemplateDB.is_active,
DSRTemplateDB.language == language,
)
if request_type:

View File

@@ -6,14 +6,12 @@ Inklusive Versionierung, Approval-Workflow, Vorschau und Send-Logging.
"""
import uuid
import re
from datetime import datetime
from typing import Optional, List, Dict, Any
from typing import Optional, Dict
from fastapi import APIRouter, Depends, HTTPException, Query, Header
from pydantic import BaseModel
from sqlalchemy.orm import Session
from sqlalchemy import func
from classroom_engine.database import get_db
from ..db.email_template_models import (
@@ -182,7 +180,7 @@ async def get_stats(
base = db.query(EmailTemplateDB).filter(EmailTemplateDB.tenant_id == tid)
total = base.count()
active = base.filter(EmailTemplateDB.is_active == True).count()
active = base.filter(EmailTemplateDB.is_active).count()
# Count templates with published versions
published_count = 0

View File

@@ -248,7 +248,231 @@ async def upload_evidence(
# ============================================================================
# CI/CD Evidence Collection
# CI/CD Evidence Collection — helpers
# ============================================================================
# Map CI source names to the corresponding control IDs
SOURCE_CONTROL_MAP = {
"sast": "SDLC-001",
"dependency_scan": "SDLC-002",
"secret_scan": "SDLC-003",
"code_review": "SDLC-004",
"sbom": "SDLC-005",
"container_scan": "SDLC-006",
"test_results": "AUD-001",
}
def _parse_ci_evidence(data: dict) -> dict:
"""
Parse and validate incoming CI evidence data.
Returns a dict with:
- report_json: str (serialised JSON)
- report_hash: str (SHA-256 hex digest)
- evidence_status: str ("valid" or "failed")
- findings_count: int
- critical_findings: int
"""
report_json = json.dumps(data) if data else "{}"
report_hash = hashlib.sha256(report_json.encode()).hexdigest()
findings_count = 0
critical_findings = 0
if data and isinstance(data, dict):
# Semgrep format
if "results" in data:
findings_count = len(data.get("results", []))
critical_findings = len([
r for r in data.get("results", [])
if r.get("extra", {}).get("severity", "").upper() in ["CRITICAL", "HIGH"]
])
# Trivy format
elif "Results" in data:
for result in data.get("Results", []):
vulns = result.get("Vulnerabilities", [])
findings_count += len(vulns)
critical_findings += len([
v for v in vulns
if v.get("Severity", "").upper() in ["CRITICAL", "HIGH"]
])
# Generic findings array
elif "findings" in data:
findings_count = len(data.get("findings", []))
# SBOM format - just count components
elif "components" in data:
findings_count = len(data.get("components", []))
evidence_status = "failed" if critical_findings > 0 else "valid"
return {
"report_json": report_json,
"report_hash": report_hash,
"evidence_status": evidence_status,
"findings_count": findings_count,
"critical_findings": critical_findings,
}
def _store_evidence(
db: Session,
*,
control_db_id: str,
source: str,
parsed: dict,
ci_job_id: str,
ci_job_url: str,
report_data: dict,
) -> EvidenceDB:
"""
Persist a CI evidence item to the database and write the report file.
Returns the created EvidenceDB instance (already committed).
"""
findings_count = parsed["findings_count"]
critical_findings = parsed["critical_findings"]
# Build title and description
title = f"{source.upper()} Report - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
description = "Automatically collected from CI/CD pipeline"
if findings_count > 0:
description += f"\n- Total findings: {findings_count}"
if critical_findings > 0:
description += f"\n- Critical/High findings: {critical_findings}"
if ci_job_id:
description += f"\n- CI Job ID: {ci_job_id}"
if ci_job_url:
description += f"\n- CI Job URL: {ci_job_url}"
# Store report file
upload_dir = f"/tmp/compliance_evidence/ci/{source}"
os.makedirs(upload_dir, exist_ok=True)
file_name = f"{source}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{parsed['report_hash'][:8]}.json"
file_path = os.path.join(upload_dir, file_name)
with open(file_path, "w") as f:
json.dump(report_data or {}, f, indent=2)
# Create evidence record
evidence = EvidenceDB(
id=str(uuid_module.uuid4()),
control_id=control_db_id,
evidence_type=f"ci_{source}",
title=title,
description=description,
artifact_path=file_path,
artifact_hash=parsed["report_hash"],
file_size_bytes=len(parsed["report_json"]),
mime_type="application/json",
source="ci_pipeline",
ci_job_id=ci_job_id,
valid_from=datetime.utcnow(),
valid_until=datetime.utcnow() + timedelta(days=90),
status=EvidenceStatusEnum(parsed["evidence_status"]),
)
db.add(evidence)
db.commit()
db.refresh(evidence)
return evidence
def _extract_findings_detail(report_data: dict) -> dict:
"""
Extract severity-bucketed finding counts from report data.
Returns dict with keys: critical, high, medium, low.
"""
findings_detail = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
}
if not report_data:
return findings_detail
# Semgrep format
if "results" in report_data:
for r in report_data.get("results", []):
severity = r.get("extra", {}).get("severity", "").upper()
if severity == "CRITICAL":
findings_detail["critical"] += 1
elif severity == "HIGH":
findings_detail["high"] += 1
elif severity == "MEDIUM":
findings_detail["medium"] += 1
elif severity in ["LOW", "INFO"]:
findings_detail["low"] += 1
# Trivy format
elif "Results" in report_data:
for result in report_data.get("Results", []):
for v in result.get("Vulnerabilities", []):
severity = v.get("Severity", "").upper()
if severity == "CRITICAL":
findings_detail["critical"] += 1
elif severity == "HIGH":
findings_detail["high"] += 1
elif severity == "MEDIUM":
findings_detail["medium"] += 1
elif severity == "LOW":
findings_detail["low"] += 1
# Generic findings with severity
elif "findings" in report_data:
for f in report_data.get("findings", []):
severity = f.get("severity", "").upper()
if severity == "CRITICAL":
findings_detail["critical"] += 1
elif severity == "HIGH":
findings_detail["high"] += 1
elif severity == "MEDIUM":
findings_detail["medium"] += 1
else:
findings_detail["low"] += 1
return findings_detail
def _update_risks(db: Session, *, source: str, control_id: str, ci_job_id: str, report_data: dict):
"""
Update risk status based on new evidence.
Uses AutoRiskUpdater to update Control status and linked Risks based on
severity-bucketed findings. Returns the update result or None on error.
"""
findings_detail = _extract_findings_detail(report_data)
try:
auto_updater = AutoRiskUpdater(db)
risk_update_result = auto_updater.process_evidence_collect_request(
tool=source,
control_id=control_id,
evidence_type=f"ci_{source}",
timestamp=datetime.utcnow().isoformat(),
commit_sha=report_data.get("commit_sha", "unknown") if report_data else "unknown",
ci_job_id=ci_job_id,
findings=findings_detail,
)
logger.info(f"Auto-risk update completed for {control_id}: "
f"control_updated={risk_update_result.control_updated}, "
f"risks_affected={len(risk_update_result.risks_affected)}")
return risk_update_result
except Exception as e:
logger.error(f"Auto-risk update failed for {control_id}: {str(e)}")
return None
# ============================================================================
# CI/CD Evidence Collection — endpoint
# ============================================================================
@router.post("/evidence/collect")
@@ -274,17 +498,6 @@ async def collect_ci_evidence(
- secret_scan: Secret detection (Gitleaks, TruffleHog)
- code_review: Code review metrics
"""
# Map source to control_id
SOURCE_CONTROL_MAP = {
"sast": "SDLC-001",
"dependency_scan": "SDLC-002",
"secret_scan": "SDLC-003",
"code_review": "SDLC-004",
"sbom": "SDLC-005",
"container_scan": "SDLC-006",
"test_results": "AUD-001",
}
if source not in SOURCE_CONTROL_MAP:
raise HTTPException(
status_code=400,
@@ -302,173 +515,38 @@ async def collect_ci_evidence(
detail=f"Control {control_id} not found. Please seed the database first."
)
# Parse and validate report data
report_json = json.dumps(report_data) if report_data else "{}"
report_hash = hashlib.sha256(report_json.encode()).hexdigest()
# --- 1. Parse and validate report data ---
parsed = _parse_ci_evidence(report_data)
# Determine evidence status based on report content
evidence_status = "valid"
findings_count = 0
critical_findings = 0
if report_data:
# Try to extract findings from common report formats
if isinstance(report_data, dict):
# Semgrep format
if "results" in report_data:
findings_count = len(report_data.get("results", []))
critical_findings = len([
r for r in report_data.get("results", [])
if r.get("extra", {}).get("severity", "").upper() in ["CRITICAL", "HIGH"]
])
# Trivy format
elif "Results" in report_data:
for result in report_data.get("Results", []):
vulns = result.get("Vulnerabilities", [])
findings_count += len(vulns)
critical_findings += len([
v for v in vulns
if v.get("Severity", "").upper() in ["CRITICAL", "HIGH"]
])
# Generic findings array
elif "findings" in report_data:
findings_count = len(report_data.get("findings", []))
# SBOM format - just count components
elif "components" in report_data:
findings_count = len(report_data.get("components", []))
# If critical findings exist, mark as failed
if critical_findings > 0:
evidence_status = "failed"
# Create evidence title
title = f"{source.upper()} Report - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
description = f"Automatically collected from CI/CD pipeline"
if findings_count > 0:
description += f"\n- Total findings: {findings_count}"
if critical_findings > 0:
description += f"\n- Critical/High findings: {critical_findings}"
if ci_job_id:
description += f"\n- CI Job ID: {ci_job_id}"
if ci_job_url:
description += f"\n- CI Job URL: {ci_job_url}"
# Store report file
upload_dir = f"/tmp/compliance_evidence/ci/{source}"
os.makedirs(upload_dir, exist_ok=True)
file_name = f"{source}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{report_hash[:8]}.json"
file_path = os.path.join(upload_dir, file_name)
with open(file_path, "w") as f:
json.dump(report_data or {}, f, indent=2)
# Create evidence record directly
evidence = EvidenceDB(
id=str(uuid_module.uuid4()),
control_id=control.id,
evidence_type=f"ci_{source}",
title=title,
description=description,
artifact_path=file_path,
artifact_hash=report_hash,
file_size_bytes=len(report_json),
mime_type="application/json",
source="ci_pipeline",
# --- 2. Store evidence in DB and write report file ---
evidence = _store_evidence(
db,
control_db_id=control.id,
source=source,
parsed=parsed,
ci_job_id=ci_job_id,
valid_from=datetime.utcnow(),
valid_until=datetime.utcnow() + timedelta(days=90),
status=EvidenceStatusEnum(evidence_status),
ci_job_url=ci_job_url,
report_data=report_data,
)
db.add(evidence)
db.commit()
db.refresh(evidence)
# =========================================================================
# AUTOMATIC RISK UPDATE
# Update Control status and linked Risks based on findings
# =========================================================================
risk_update_result = None
try:
# Extract detailed findings for risk assessment
findings_detail = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
}
if report_data:
# Semgrep format
if "results" in report_data:
for r in report_data.get("results", []):
severity = r.get("extra", {}).get("severity", "").upper()
if severity == "CRITICAL":
findings_detail["critical"] += 1
elif severity == "HIGH":
findings_detail["high"] += 1
elif severity == "MEDIUM":
findings_detail["medium"] += 1
elif severity in ["LOW", "INFO"]:
findings_detail["low"] += 1
# Trivy format
elif "Results" in report_data:
for result in report_data.get("Results", []):
for v in result.get("Vulnerabilities", []):
severity = v.get("Severity", "").upper()
if severity == "CRITICAL":
findings_detail["critical"] += 1
elif severity == "HIGH":
findings_detail["high"] += 1
elif severity == "MEDIUM":
findings_detail["medium"] += 1
elif severity == "LOW":
findings_detail["low"] += 1
# Generic findings with severity
elif "findings" in report_data:
for f in report_data.get("findings", []):
severity = f.get("severity", "").upper()
if severity == "CRITICAL":
findings_detail["critical"] += 1
elif severity == "HIGH":
findings_detail["high"] += 1
elif severity == "MEDIUM":
findings_detail["medium"] += 1
else:
findings_detail["low"] += 1
# Use AutoRiskUpdater to update Control status and Risks
auto_updater = AutoRiskUpdater(db)
risk_update_result = auto_updater.process_evidence_collect_request(
tool=source,
control_id=control_id,
evidence_type=f"ci_{source}",
timestamp=datetime.utcnow().isoformat(),
commit_sha=report_data.get("commit_sha", "unknown") if report_data else "unknown",
ci_job_id=ci_job_id,
findings=findings_detail,
)
logger.info(f"Auto-risk update completed for {control_id}: "
f"control_updated={risk_update_result.control_updated}, "
f"risks_affected={len(risk_update_result.risks_affected)}")
except Exception as e:
logger.error(f"Auto-risk update failed for {control_id}: {str(e)}")
# --- 3. Automatic risk update ---
risk_update_result = _update_risks(
db,
source=source,
control_id=control_id,
ci_job_id=ci_job_id,
report_data=report_data,
)
return {
"success": True,
"evidence_id": evidence.id,
"control_id": control_id,
"source": source,
"status": evidence_status,
"findings_count": findings_count,
"critical_findings": critical_findings,
"artifact_path": file_path,
"status": parsed["evidence_status"],
"findings_count": parsed["findings_count"],
"critical_findings": parsed["critical_findings"],
"artifact_path": evidence.artifact_path,
"message": f"Evidence collected successfully for control {control_id}",
"auto_risk_update": {
"enabled": True,

View File

@@ -20,13 +20,13 @@ import asyncio
from typing import Optional, List, Dict
from datetime import datetime
from fastapi import APIRouter, Depends, Query
from fastapi import APIRouter, Depends
from pydantic import BaseModel
from sqlalchemy.orm import Session
from classroom_engine.database import get_db
from ..db import RegulationRepository, RequirementRepository
from ..db.models import RegulationDB, RequirementDB, RegulationTypeEnum
from ..db.models import RegulationDB, RegulationTypeEnum
from ..services.rag_client import get_rag_client, RAGSearchResult
logger = logging.getLogger(__name__)
@@ -185,6 +185,169 @@ def _build_existing_articles(
return {r.article for r in existing}
# ---------------------------------------------------------------------------
# Extraction helpers — independently testable
# ---------------------------------------------------------------------------
def _parse_rag_results(
all_results: List[RAGSearchResult],
regulation_codes: Optional[List[str]] = None,
) -> dict:
"""
Filter, deduplicate, and group RAG search results by regulation code.
Returns a dict with:
- deduped_by_reg: Dict[str, List[tuple[str, RAGSearchResult]]]
- skipped_no_article: List[RAGSearchResult]
- unique_count: int
"""
# Filter by regulation_codes if requested
if regulation_codes:
all_results = [
r for r in all_results
if r.regulation_code in regulation_codes
]
# Deduplicate at result level (regulation_code + article)
seen: set[tuple[str, str]] = set()
unique_count = 0
for r in sorted(all_results, key=lambda x: x.score, reverse=True):
article = _normalize_article(r)
if not article:
continue
key = (r.regulation_code, article)
if key not in seen:
seen.add(key)
unique_count += 1
# Group by regulation_code
by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] = {}
skipped_no_article: List[RAGSearchResult] = []
for r in all_results:
article = _normalize_article(r)
if not article:
skipped_no_article.append(r)
continue
key_r = r.regulation_code or "UNKNOWN"
if key_r not in by_reg:
by_reg[key_r] = []
by_reg[key_r].append((article, r))
# Deduplicate within groups
deduped_by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] = {}
for reg_code, items in by_reg.items():
seen_articles: set[str] = set()
deduped: List[tuple[str, RAGSearchResult]] = []
for art, r in sorted(items, key=lambda x: x[1].score, reverse=True):
if art not in seen_articles:
seen_articles.add(art)
deduped.append((art, r))
deduped_by_reg[reg_code] = deduped
return {
"deduped_by_reg": deduped_by_reg,
"skipped_no_article": skipped_no_article,
"unique_count": unique_count,
}
def _store_requirements(
db: Session,
deduped_by_reg: Dict[str, List[tuple[str, "RAGSearchResult"]]],
dry_run: bool,
) -> dict:
"""
Persist extracted requirements to the database (or simulate in dry_run mode).
Returns a dict with:
- created_count: int
- skipped_dup_count: int
- failed_count: int
- result_items: List[ExtractedRequirement]
"""
req_repo = RequirementRepository(db)
created_count = 0
skipped_dup_count = 0
failed_count = 0
result_items: List[ExtractedRequirement] = []
for reg_code, items in deduped_by_reg.items():
if not items:
continue
# Find or create regulation
try:
first_result = items[0][1]
regulation_name = first_result.regulation_name or first_result.regulation_short or reg_code
if dry_run:
# For dry_run, fake a regulation id
regulation_id = f"dry-run-{reg_code}"
existing_articles: set[str] = set()
else:
reg = _get_or_create_regulation(db, reg_code, regulation_name)
regulation_id = reg.id
existing_articles = _build_existing_articles(db, regulation_id)
except Exception as e:
logger.error("Failed to get/create regulation %s: %s", reg_code, e)
failed_count += len(items)
continue
for article, r in items:
title = _derive_title(r.text, article)
if article in existing_articles:
skipped_dup_count += 1
result_items.append(ExtractedRequirement(
regulation_code=reg_code,
article=article,
title=title,
requirement_text=r.text[:1000],
source_url=r.source_url,
score=r.score,
action="skipped_duplicate",
))
continue
if not dry_run:
try:
req_repo.create(
regulation_id=regulation_id,
article=article,
title=title,
description=f"Extrahiert aus RAG-Korpus (Collection: {r.category or r.regulation_code}). Score: {r.score:.2f}",
requirement_text=r.text[:2000],
breakpilot_interpretation=None,
is_applicable=True,
priority=2,
)
existing_articles.add(article) # prevent intra-batch duplication
created_count += 1
except Exception as e:
logger.error("Failed to create requirement %s/%s: %s", reg_code, article, e)
failed_count += 1
continue
else:
created_count += 1 # dry_run: count as would-create
result_items.append(ExtractedRequirement(
regulation_code=reg_code,
article=article,
title=title,
requirement_text=r.text[:1000],
source_url=r.source_url,
score=r.score,
action="created" if not dry_run else "would_create",
))
return {
"created_count": created_count,
"skipped_dup_count": skipped_dup_count,
"failed_count": failed_count,
"result_items": result_items,
}
# ---------------------------------------------------------------------------
# Endpoint
# ---------------------------------------------------------------------------
@@ -225,126 +388,19 @@ async def extract_requirements_from_rag(
logger.info("RAG extraction: %d raw results from %d collections", len(all_results), len(collections))
# --- 2. Filter by regulation_codes if requested ---
if body.regulation_codes:
all_results = [
r for r in all_results
if r.regulation_code in body.regulation_codes
]
# --- 2. Parse, filter, deduplicate, and group ---
parsed = _parse_rag_results(all_results, body.regulation_codes)
deduped_by_reg = parsed["deduped_by_reg"]
skipped_no_article = parsed["skipped_no_article"]
# --- 3. Deduplicate at result level (regulation_code + article) ---
seen: set[tuple[str, str]] = set()
unique_results: List[RAGSearchResult] = []
for r in sorted(all_results, key=lambda x: x.score, reverse=True):
article = _normalize_article(r)
if not article:
continue
key = (r.regulation_code, article)
if key not in seen:
seen.add(key)
unique_results.append(r)
logger.info("RAG extraction: %d unique (regulation, article) pairs", parsed["unique_count"])
logger.info("RAG extraction: %d unique (regulation, article) pairs", len(unique_results))
# --- 4. Group by regulation_code and process ---
by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] = {}
skipped_no_article: List[RAGSearchResult] = []
for r in all_results:
article = _normalize_article(r)
if not article:
skipped_no_article.append(r)
continue
key_r = r.regulation_code or "UNKNOWN"
if key_r not in by_reg:
by_reg[key_r] = []
by_reg[key_r].append((article, r))
# Deduplicate within groups
deduped_by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] = {}
for reg_code, items in by_reg.items():
seen_articles: set[str] = set()
deduped: List[tuple[str, RAGSearchResult]] = []
for art, r in sorted(items, key=lambda x: x[1].score, reverse=True):
if art not in seen_articles:
seen_articles.add(art)
deduped.append((art, r))
deduped_by_reg[reg_code] = deduped
# --- 5. Create requirements ---
req_repo = RequirementRepository(db)
created_count = 0
skipped_dup_count = 0
failed_count = 0
result_items: List[ExtractedRequirement] = []
for reg_code, items in deduped_by_reg.items():
if not items:
continue
# Find or create regulation
try:
first_result = items[0][1]
regulation_name = first_result.regulation_name or first_result.regulation_short or reg_code
if body.dry_run:
# For dry_run, fake a regulation id
regulation_id = f"dry-run-{reg_code}"
existing_articles: set[str] = set()
else:
reg = _get_or_create_regulation(db, reg_code, regulation_name)
regulation_id = reg.id
existing_articles = _build_existing_articles(db, regulation_id)
except Exception as e:
logger.error("Failed to get/create regulation %s: %s", reg_code, e)
failed_count += len(items)
continue
for article, r in items:
title = _derive_title(r.text, article)
if article in existing_articles:
skipped_dup_count += 1
result_items.append(ExtractedRequirement(
regulation_code=reg_code,
article=article,
title=title,
requirement_text=r.text[:1000],
source_url=r.source_url,
score=r.score,
action="skipped_duplicate",
))
continue
if not body.dry_run:
try:
req_repo.create(
regulation_id=regulation_id,
article=article,
title=title,
description=f"Extrahiert aus RAG-Korpus (Collection: {r.category or r.regulation_code}). Score: {r.score:.2f}",
requirement_text=r.text[:2000],
breakpilot_interpretation=None,
is_applicable=True,
priority=2,
)
existing_articles.add(article) # prevent intra-batch duplication
created_count += 1
except Exception as e:
logger.error("Failed to create requirement %s/%s: %s", reg_code, article, e)
failed_count += 1
continue
else:
created_count += 1 # dry_run: count as would-create
result_items.append(ExtractedRequirement(
regulation_code=reg_code,
article=article,
title=title,
requirement_text=r.text[:1000],
source_url=r.source_url,
score=r.score,
action="created" if not body.dry_run else "would_create",
))
# --- 3. Create requirements ---
store_result = _store_requirements(db, deduped_by_reg, body.dry_run)
created_count = store_result["created_count"]
skipped_dup_count = store_result["skipped_dup_count"]
failed_count = store_result["failed_count"]
result_items = store_result["result_items"]
message = (
f"{'[DRY RUN] ' if body.dry_run else ''}"

View File

@@ -24,7 +24,7 @@ Endpoints:
import json
import logging
from datetime import datetime, timedelta, timezone
from typing import Optional, List, Any
from typing import Optional, List
from uuid import UUID, uuid4
from fastapi import APIRouter, Depends, HTTPException, Query, Header

View File

@@ -14,7 +14,7 @@ Provides endpoints for ISO 27001 certification-ready ISMS management:
import uuid
import hashlib
from datetime import datetime, date
from typing import Optional, List
from typing import Optional
from fastapi import APIRouter, HTTPException, Query, Depends
from sqlalchemy.orm import Session
@@ -53,7 +53,7 @@ from .schemas import (
# Readiness
ISMSReadinessCheckResponse, ISMSReadinessCheckRequest, PotentialFinding,
# Audit Trail
AuditTrailResponse, AuditTrailEntry, PaginationMeta,
AuditTrailResponse, PaginationMeta,
# Overview
ISO27001OverviewResponse, ISO27001ChapterStatus
)
@@ -673,10 +673,6 @@ async def list_findings(
ofi_count = sum(1 for f in findings if f.finding_type == FindingTypeEnum.OFI)
open_count = sum(1 for f in findings if f.status != FindingStatusEnum.CLOSED)
# Add is_blocking property to each finding
for f in findings:
f.is_blocking = f.finding_type == FindingTypeEnum.MAJOR and f.status != FindingStatusEnum.CLOSED
return AuditFindingListResponse(
findings=findings,
total=len(findings),
@@ -746,7 +742,6 @@ async def create_finding(data: AuditFindingCreate, db: Session = Depends(get_db)
db.commit()
db.refresh(finding)
finding.is_blocking = finding.finding_type == FindingTypeEnum.MAJOR
return finding
@@ -775,7 +770,6 @@ async def update_finding(
db.commit()
db.refresh(finding)
finding.is_blocking = finding.finding_type == FindingTypeEnum.MAJOR and finding.status != FindingStatusEnum.CLOSED
return finding
@@ -824,7 +818,6 @@ async def close_finding(
db.commit()
db.refresh(finding)
finding.is_blocking = False
return finding
@@ -1271,10 +1264,9 @@ async def run_readiness_check(
# Chapter 6: Planning - Risk Assessment
from ..db.models import RiskDB
risks = db.query(RiskDB).filter(RiskDB.status == "open").count()
risks_without_treatment = db.query(RiskDB).filter(
RiskDB.status == "open",
RiskDB.treatment_plan == None
RiskDB.treatment_plan is None
).count()
if risks_without_treatment > 0:
potential_majors.append(PotentialFinding(
@@ -1299,7 +1291,7 @@ async def run_readiness_check(
# SoA
soa_total = db.query(StatementOfApplicabilityDB).count()
soa_unapproved = db.query(StatementOfApplicabilityDB).filter(
StatementOfApplicabilityDB.approved_at == None
StatementOfApplicabilityDB.approved_at is None
).count()
if soa_total == 0:
potential_majors.append(PotentialFinding(
@@ -1525,7 +1517,7 @@ async def get_iso27001_overview(db: Session = Depends(get_db)):
soa_total = db.query(StatementOfApplicabilityDB).count()
soa_approved = db.query(StatementOfApplicabilityDB).filter(
StatementOfApplicabilityDB.approved_at != None
StatementOfApplicabilityDB.approved_at is not None
).count()
soa_all_approved = soa_total > 0 and soa_approved == soa_total

View File

@@ -671,7 +671,7 @@ async def get_my_consents(
.filter(
UserConsentDB.tenant_id == tid,
UserConsentDB.user_id == user_id,
UserConsentDB.withdrawn_at == None,
UserConsentDB.withdrawn_at is None,
)
.order_by(UserConsentDB.consented_at.desc())
.all()
@@ -694,8 +694,8 @@ async def check_consent(
UserConsentDB.tenant_id == tid,
UserConsentDB.user_id == user_id,
UserConsentDB.document_type == document_type,
UserConsentDB.consented == True,
UserConsentDB.withdrawn_at == None,
UserConsentDB.consented,
UserConsentDB.withdrawn_at is None,
)
.order_by(UserConsentDB.consented_at.desc())
.first()
@@ -757,10 +757,10 @@ async def get_consent_stats(
total = base.count()
active = base.filter(
UserConsentDB.consented == True,
UserConsentDB.withdrawn_at == None,
UserConsentDB.consented,
UserConsentDB.withdrawn_at is None,
).count()
withdrawn = base.filter(UserConsentDB.withdrawn_at != None).count()
withdrawn = base.filter(UserConsentDB.withdrawn_at is not None).count()
# By document type
by_type = {}

View File

@@ -314,9 +314,9 @@ async def update_legal_template(
raise HTTPException(status_code=400, detail="No fields to update")
if "document_type" in updates and updates["document_type"] not in VALID_DOCUMENT_TYPES:
raise HTTPException(status_code=400, detail=f"Invalid document_type")
raise HTTPException(status_code=400, detail="Invalid document_type")
if "status" in updates and updates["status"] not in VALID_STATUSES:
raise HTTPException(status_code=400, detail=f"Invalid status")
raise HTTPException(status_code=400, detail="Invalid status")
set_clauses = ["updated_at = :updated_at"]
params: Dict[str, Any] = {

View File

@@ -16,11 +16,10 @@ import logging
logger = logging.getLogger(__name__)
import os
from datetime import datetime, timedelta
from typing import Optional, List
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Query, BackgroundTasks
from fastapi import APIRouter, Depends, HTTPException, Query, BackgroundTasks
from fastapi.responses import FileResponse
from sqlalchemy.orm import Session
@@ -31,22 +30,16 @@ from ..db import (
RequirementRepository,
ControlRepository,
EvidenceRepository,
RiskRepository,
AuditExportRepository,
ControlStatusEnum,
ControlDomainEnum,
RiskLevelEnum,
EvidenceStatusEnum,
)
from ..db.models import EvidenceDB, ControlDB
from ..services.seeder import ComplianceSeeder
from ..services.export_generator import AuditExportGenerator
from ..services.auto_risk_updater import AutoRiskUpdater, ScanType
from .schemas import (
RegulationCreate, RegulationResponse, RegulationListResponse,
RegulationResponse, RegulationListResponse,
RequirementCreate, RequirementResponse, RequirementListResponse,
ControlCreate, ControlUpdate, ControlResponse, ControlListResponse, ControlReviewRequest,
MappingCreate, MappingResponse, MappingListResponse,
ControlUpdate, ControlResponse, ControlListResponse, ControlReviewRequest,
ExportRequest, ExportResponse, ExportListResponse,
SeedRequest, SeedResponse,
# Pagination schemas
@@ -381,7 +374,6 @@ async def delete_requirement(requirement_id: str, db: Session = Depends(get_db))
async def update_requirement(requirement_id: str, updates: dict, db: Session = Depends(get_db)):
"""Update a requirement with implementation/audit details."""
from ..db.models import RequirementDB
from datetime import datetime
requirement = db.query(RequirementDB).filter(RequirementDB.id == requirement_id).first()
if not requirement:
@@ -870,8 +862,8 @@ async def init_tables(db: Session = Depends(get_db)):
"""Create compliance tables if they don't exist."""
from classroom_engine.database import engine
from ..db.models import (
RegulationDB, RequirementDB, ControlDB, ControlMappingDB,
EvidenceDB, RiskDB, AuditExportDB, AISystemDB
RegulationDB, RequirementDB, ControlMappingDB,
RiskDB, AuditExportDB, AISystemDB
)
try:
@@ -971,8 +963,8 @@ async def seed_database(
"""Seed the compliance database with initial data."""
from classroom_engine.database import engine
from ..db.models import (
RegulationDB, RequirementDB, ControlDB, ControlMappingDB,
EvidenceDB, RiskDB, AuditExportDB
RegulationDB, RequirementDB, ControlMappingDB,
RiskDB, AuditExportDB
)
try:

View File

@@ -496,57 +496,6 @@ class SeedResponse(BaseModel):
counts: Dict[str, int]
# ============================================================================
# PDF Extraction Schemas
# ============================================================================
class BSIAspectResponse(BaseModel):
"""Response schema for an extracted BSI-TR Pruefaspekt."""
aspect_id: str
title: str
full_text: str
category: str
page_number: int
section: str
requirement_level: str
source_document: str
keywords: List[str] = []
related_aspects: List[str] = []
class PDFExtractionResponse(BaseModel):
"""Response for PDF extraction operation."""
success: bool
source_document: str
total_aspects: int
aspects: List[BSIAspectResponse]
statistics: Dict[str, Any]
requirements_created: int = 0
class PDFExtractionRequest(BaseModel):
"""Request to extract requirements from a PDF."""
document_code: str # e.g., "BSI-TR-03161-2"
save_to_db: bool = True
force: bool = False
# ============================================================================
# Paginated Response Schemas (after all Response classes are defined)
# ============================================================================
class PaginatedRequirementResponse(BaseModel):
"""Paginated response for requirements."""
data: List[RequirementResponse]
pagination: PaginationMeta
class PaginatedControlResponse(BaseModel):
"""Paginated response for controls."""
data: List[ControlResponse]
pagination: PaginationMeta
class PaginatedEvidenceResponse(BaseModel):
"""Paginated response for evidence."""
data: List[EvidenceResponse]

View File

@@ -257,18 +257,6 @@ def map_osv_severity(vuln: dict) -> tuple[str, float]:
severity = "MEDIUM"
cvss = 5.0
# Check severity array
for sev in vuln.get("severity", []):
if sev.get("type") == "CVSS_V3":
score_str = sev.get("score", "")
# Extract base score from CVSS vector
try:
import re as _re
# CVSS vectors don't contain the score directly, try database_specific
pass
except Exception:
pass
# Check database_specific for severity
db_specific = vuln.get("database_specific", {})
if "severity" in db_specific:

View File

@@ -21,9 +21,8 @@ Endpoints:
GET /api/v1/admin/compliance-report — Compliance report
"""
import uuid
from datetime import datetime
from typing import Optional, List
from typing import Optional
from fastapi import APIRouter, HTTPException, Depends, Query
from pydantic import BaseModel, Field
@@ -155,7 +154,7 @@ async def list_sources(
"""List all allowed sources with optional filters."""
query = db.query(AllowedSourceDB)
if active_only:
query = query.filter(AllowedSourceDB.active == True)
query = query.filter(AllowedSourceDB.active)
if source_type:
query = query.filter(AllowedSourceDB.source_type == source_type)
if license:
@@ -527,8 +526,8 @@ async def get_policy_audit(
async def get_policy_stats(db: Session = Depends(get_db)):
"""Get dashboard statistics for source policy."""
total_sources = db.query(AllowedSourceDB).count()
active_sources = db.query(AllowedSourceDB).filter(AllowedSourceDB.active == True).count()
pii_rules = db.query(PIIRuleDB).filter(PIIRuleDB.active == True).count()
active_sources = db.query(AllowedSourceDB).filter(AllowedSourceDB.active).count()
pii_rules = db.query(PIIRuleDB).filter(PIIRuleDB.active).count()
# Count blocked content entries from today
today_start = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
@@ -550,8 +549,8 @@ async def get_policy_stats(db: Session = Depends(get_db)):
@router.get("/compliance-report")
async def get_compliance_report(db: Session = Depends(get_db)):
"""Generate a compliance report for source policies."""
sources = db.query(AllowedSourceDB).filter(AllowedSourceDB.active == True).all()
pii_rules = db.query(PIIRuleDB).filter(PIIRuleDB.active == True).all()
sources = db.query(AllowedSourceDB).filter(AllowedSourceDB.active).all()
pii_rules = db.query(PIIRuleDB).filter(PIIRuleDB.active).all()
return {
"report_date": datetime.utcnow().isoformat(),

View File

@@ -19,11 +19,11 @@ import json
import logging
from datetime import datetime, timezone
from typing import Optional, List, Any, Dict
from uuid import UUID, uuid4
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Query
from fastapi.responses import StreamingResponse
from pydantic import BaseModel, Field
from pydantic import BaseModel
from sqlalchemy import func
from sqlalchemy.orm import Session

View File

@@ -50,10 +50,9 @@ import json
import logging
import uuid
from datetime import datetime
from typing import Optional, List
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from pydantic import BaseModel
from sqlalchemy import text
from sqlalchemy.orm import Session

View File

@@ -7,7 +7,6 @@ with all 5 version tables (DSFA, VVT, TOM, Loeschfristen, Obligations).
import json
import logging
from datetime import datetime
from typing import Optional, List
from fastapi import APIRouter, Depends, HTTPException, Request

View File

@@ -19,7 +19,6 @@ import io
import logging
from datetime import datetime, timezone
from typing import Optional, List
from uuid import uuid4
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import StreamingResponse

View File

@@ -13,7 +13,7 @@ import uuid
from datetime import datetime
from sqlalchemy import (
Column, String, Text, Boolean, Integer, DateTime, Index, JSON
Column, Text, Boolean, Integer, DateTime, Index, JSON
)
from sqlalchemy.dialects.postgresql import UUID

View File

@@ -14,7 +14,7 @@ import uuid
from datetime import datetime
from sqlalchemy import (
Column, String, Text, Boolean, DateTime, JSON, Index
Column, Text, Boolean, DateTime, JSON, Index
)
from sqlalchemy.dialects.postgresql import UUID

View File

@@ -13,7 +13,7 @@ import uuid
from datetime import datetime
from sqlalchemy import (
Column, String, Text, Boolean, DateTime, JSON, Index, Integer
Column, String, Text, Boolean, DateTime, JSON, Index
)
from sqlalchemy.dialects.postgresql import UUID

View File

@@ -13,7 +13,7 @@ import uuid
from datetime import datetime
from sqlalchemy import (
Column, String, Text, Boolean, Integer, DateTime, JSON, Index
Column, Text, Boolean, Integer, DateTime, JSON, Index
)
from sqlalchemy.dialects.postgresql import UUID

View File

@@ -14,10 +14,9 @@ from datetime import datetime, date
from typing import List, Optional, Dict, Any, Tuple
from sqlalchemy.orm import Session as DBSession
from sqlalchemy import func, and_, or_
from .models import (
ISMSScopeDB, ISMSContextDB, ISMSPolicyDB, SecurityObjectiveDB,
ISMSScopeDB, ISMSPolicyDB, SecurityObjectiveDB,
StatementOfApplicabilityDB, AuditFindingDB, CorrectiveActionDB,
ManagementReviewDB, InternalAuditDB, AuditTrailDB, ISMSReadinessCheckDB,
ApprovalStatusEnum, FindingTypeEnum, FindingStatusEnum, CAPATypeEnum

View File

@@ -11,7 +11,7 @@ import uuid
from datetime import datetime
from sqlalchemy import (
Column, String, Text, Boolean, Integer, DateTime, Index, JSON
Column, Text, Boolean, Integer, DateTime, Index, JSON
)
from sqlalchemy.dialects.postgresql import UUID

View File

@@ -14,7 +14,6 @@ Tables:
import enum
import uuid
from datetime import datetime, date
from typing import Optional, List
from sqlalchemy import (
Column, String, Text, Integer, Boolean, DateTime, Date,

View File

@@ -3,6 +3,7 @@ Repository layer for Compliance module.
Provides CRUD operations and business logic queries for all compliance entities.
"""
from __future__ import annotations
import uuid
from datetime import datetime, date
@@ -17,7 +18,8 @@ from .models import (
EvidenceDB, RiskDB, AuditExportDB,
AuditSessionDB, AuditSignOffDB, AuditResultEnum, AuditSessionStatusEnum,
RegulationTypeEnum, ControlDomainEnum, ControlStatusEnum,
RiskLevelEnum, EvidenceStatusEnum, ExportStatusEnum
RiskLevelEnum, EvidenceStatusEnum, ExportStatusEnum,
ServiceModuleDB, ModuleRegulationMappingDB,
)
@@ -447,7 +449,7 @@ class ControlRepository:
self.db.query(ControlDB)
.filter(
or_(
ControlDB.next_review_at == None,
ControlDB.next_review_at is None,
ControlDB.next_review_at <= datetime.utcnow()
)
)
@@ -936,7 +938,7 @@ class ServiceModuleRepository:
"""Get all modules with filters."""
from .models import ServiceModuleDB, ServiceTypeEnum
query = self.db.query(ServiceModuleDB).filter(ServiceModuleDB.is_active == True)
query = self.db.query(ServiceModuleDB).filter(ServiceModuleDB.is_active)
if service_type:
query = query.filter(ServiceModuleDB.service_type == ServiceTypeEnum(service_type))
@@ -990,8 +992,7 @@ class ServiceModuleRepository:
def get_overview(self) -> Dict[str, Any]:
"""Get overview statistics for all modules."""
from .models import ServiceModuleDB, ModuleRegulationMappingDB
from sqlalchemy import func
from .models import ModuleRegulationMappingDB
modules = self.get_all()
total = len(modules)
@@ -1035,7 +1036,6 @@ class ServiceModuleRepository:
def seed_from_data(self, services_data: List[Dict[str, Any]], force: bool = False) -> Dict[str, int]:
"""Seed modules from service_modules.py data."""
from .models import ServiceModuleDB
modules_created = 0
mappings_created = 0

View File

@@ -12,7 +12,7 @@ Checks:
import sys
from pathlib import Path
from collections import defaultdict
from typing import Dict, List, Set
from typing import Dict, List
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
@@ -182,7 +182,7 @@ def validate_data_categories():
def main():
"""Run all validations."""
print(f"{GREEN}{'='*60}")
print(f" Breakpilot Service Module Validation")
print(" Breakpilot Service Module Validation")
print(f"{'='*60}{RESET}")
all_passed = True

View File

@@ -11,11 +11,11 @@ Provides AI-powered features for:
import json
import logging
import re
from dataclasses import dataclass, field
from dataclasses import dataclass
from typing import List, Optional, Dict, Any
from enum import Enum
from .llm_provider import LLMProvider, get_shared_provider, LLMResponse
from .llm_provider import LLMProvider, get_shared_provider
from .rag_client import get_rag_client
logger = logging.getLogger(__name__)

View File

@@ -18,27 +18,23 @@ import io
import logging
from datetime import datetime
from typing import Dict, List, Any, Optional, Tuple
from uuid import uuid4
import hashlib
from sqlalchemy.orm import Session, selectinload
from sqlalchemy.orm import Session
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import mm, cm
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.units import mm
from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY
from reportlab.platypus import (
SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle,
PageBreak, Image, ListFlowable, ListItem, KeepTogether,
HRFlowable
PageBreak, HRFlowable
)
from reportlab.graphics.shapes import Drawing, Rect, String
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.piecharts import Pie
from ..db.models import (
AuditSessionDB, AuditSignOffDB, AuditResultEnum, AuditSessionStatusEnum,
RequirementDB, RegulationDB
AuditSessionDB, AuditSignOffDB, AuditResultEnum, RequirementDB, RegulationDB
)
logger = logging.getLogger(__name__)

View File

@@ -12,7 +12,7 @@ Sprint 6: CI/CD Evidence Collection (2026-01-18)
import logging
from datetime import datetime
from typing import Dict, List, Optional, Any
from typing import Dict, List, Optional
from dataclasses import dataclass
from enum import Enum
@@ -21,7 +21,7 @@ from sqlalchemy.orm import Session
from ..db.models import (
ControlDB, ControlStatusEnum,
EvidenceDB, EvidenceStatusEnum,
RiskDB, RiskLevelEnum,
RiskDB,
)
from ..db.repository import ControlRepository, EvidenceRepository, RiskRepository

View File

@@ -189,7 +189,7 @@ class AuditExportGenerator:
self, output_dir: Path, included_regulations: Optional[List[str]]
) -> None:
"""Export regulations to JSON files."""
query = self.db.query(RegulationDB).filter(RegulationDB.is_active == True)
query = self.db.query(RegulationDB).filter(RegulationDB.is_active)
if included_regulations:
query = query.filter(RegulationDB.code.in_(included_regulations))
@@ -557,7 +557,7 @@ Generiert am: """ + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
) -> Dict[str, Any]:
"""Calculate compliance statistics."""
# Count regulations
reg_query = self.db.query(RegulationDB).filter(RegulationDB.is_active == True)
reg_query = self.db.query(RegulationDB).filter(RegulationDB.is_active)
if included_regulations:
reg_query = reg_query.filter(RegulationDB.code.in_(included_regulations))
total_regulations = reg_query.count()

View File

@@ -26,7 +26,7 @@ import asyncio
import logging
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, Any
from dataclasses import dataclass, field
from dataclasses import dataclass
from enum import Enum
import httpx

View File

@@ -11,11 +11,9 @@ Similar pattern to edu-search and zeugnisse-crawler.
import logging
import re
import asyncio
from datetime import datetime
from typing import Dict, List, Any, Optional
from enum import Enum
import hashlib
import httpx
from bs4 import BeautifulSoup

View File

@@ -19,16 +19,11 @@ from sqlalchemy.orm import Session
from sqlalchemy import func
from ..db.models import (
RegulationDB,
RequirementDB,
ControlDB,
ControlMappingDB,
EvidenceDB,
RiskDB,
AuditExportDB,
ControlStatusEnum,
RiskLevelEnum,
EvidenceStatusEnum,
)
from ..db.repository import (
RegulationRepository,
@@ -171,7 +166,6 @@ class ComplianceReportGenerator:
# Control status findings
by_status = ctrl_stats.get("by_status", {})
passed = by_status.get("pass", 0)
failed = by_status.get("fail", 0)
planned = by_status.get("planned", 0)
@@ -200,10 +194,8 @@ class ComplianceReportGenerator:
"""Generate compliance score section with breakdown."""
stats = self.ctrl_repo.get_statistics()
by_domain = stats.get("by_domain", {})
domain_scores = {}
controls = self.ctrl_repo.get_all()
domain_scores = {}
domain_stats = {}
for ctrl in controls:

View File

@@ -5,8 +5,7 @@ Seeds the database with initial regulations, controls, and requirements.
"""
import logging
from typing import Dict, List, Optional
from datetime import datetime
from typing import Dict
from sqlalchemy.orm import Session
@@ -23,7 +22,6 @@ from ..db.models import (
ControlTypeEnum,
ControlDomainEnum,
ControlStatusEnum,
RiskLevelEnum,
ServiceTypeEnum,
RelevanceLevelEnum,
)

View File

@@ -9,10 +9,9 @@ Run with: pytest backend/compliance/tests/test_audit_routes.py -v
import pytest
import hashlib
from datetime import datetime
from unittest.mock import MagicMock, patch
from unittest.mock import MagicMock
from uuid import uuid4
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
# Import the app and dependencies

View File

@@ -4,10 +4,8 @@ Tests for the AutoRiskUpdater Service.
Sprint 6: CI/CD Evidence Collection & Automatic Risk Updates (2026-01-18)
"""
import pytest
from datetime import datetime
from unittest.mock import MagicMock, patch
from uuid import uuid4
from unittest.mock import MagicMock
from ..services.auto_risk_updater import (
AutoRiskUpdater,
@@ -18,9 +16,7 @@ from ..services.auto_risk_updater import (
CONTROL_SCAN_MAPPING,
)
from ..db.models import (
ControlDB, ControlStatusEnum,
EvidenceDB, EvidenceStatusEnum,
RiskDB, RiskLevelEnum,
ControlStatusEnum,
)

View File

@@ -16,7 +16,6 @@ from compliance.db.models import (
RequirementDB, RegulationDB,
AISystemDB, AIClassificationEnum, AISystemStatusEnum,
RiskDB, RiskLevelEnum,
EvidenceDB, EvidenceStatusEnum,
)
from compliance.db.repository import RequirementRepository

View File

@@ -16,7 +16,7 @@ Run with: pytest backend/compliance/tests/test_isms_routes.py -v
import pytest
from datetime import datetime, date
from unittest.mock import MagicMock, patch
from unittest.mock import MagicMock
from uuid import uuid4
from sqlalchemy.orm import Session
@@ -25,7 +25,7 @@ import sys
sys.path.insert(0, '/Users/benjaminadmin/Projekte/breakpilot-pwa/backend')
from compliance.db.models import (
ISMSScopeDB, ISMSContextDB, ISMSPolicyDB, SecurityObjectiveDB,
ISMSScopeDB, ISMSPolicyDB, SecurityObjectiveDB,
StatementOfApplicabilityDB, AuditFindingDB, CorrectiveActionDB,
ManagementReviewDB, InternalAuditDB, AuditTrailDB, ISMSReadinessCheckDB,
ApprovalStatusEnum, FindingTypeEnum, FindingStatusEnum, CAPATypeEnum
@@ -393,7 +393,7 @@ class TestAuditFinding:
# is_blocking is a property, so we check the type
is_blocking = (sample_major_finding.finding_type == FindingTypeEnum.MAJOR and
sample_major_finding.status != FindingStatusEnum.CLOSED)
assert is_blocking == True
assert is_blocking
def test_finding_has_objective_evidence(self, sample_finding):
"""Findings should have objective evidence."""
@@ -524,7 +524,7 @@ class TestISMSReadinessCheck:
readiness_score=30.0,
)
assert check.certification_possible == False
assert not check.certification_possible
assert len(check.potential_majors) >= 1
assert check.readiness_score < 100
@@ -551,7 +551,7 @@ class TestISMSReadinessCheck:
assert check.chapter_4_status == "pass"
assert check.chapter_5_status == "pass"
assert check.chapter_9_status == "pass"
assert check.certification_possible == True
assert check.certification_possible
# ============================================================================
@@ -660,7 +660,7 @@ class TestCertificationBlockers:
is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and
finding.status != FindingStatusEnum.CLOSED)
assert is_blocking == True
assert is_blocking
def test_closed_major_allows_certification(self):
"""Closed major findings should not block certification."""
@@ -677,7 +677,7 @@ class TestCertificationBlockers:
is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and
finding.status != FindingStatusEnum.CLOSED)
assert is_blocking == False
assert not is_blocking
def test_minor_findings_dont_block_certification(self):
"""Minor findings should not block certification."""
@@ -693,4 +693,4 @@ class TestCertificationBlockers:
is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and
finding.status != FindingStatusEnum.CLOSED)
assert is_blocking == False
assert not is_blocking

View File

@@ -1,415 +0,0 @@
"""
Data Subject Request (DSR) Admin API - Betroffenenanfragen-Verwaltung
Admin-Endpunkte für die Verwaltung von Betroffenenanfragen nach DSGVO
"""
from fastapi import APIRouter, HTTPException, Header, Query
from typing import Optional, List, Dict, Any
from pydantic import BaseModel
import httpx
import os
from consent_client import generate_jwt_token, JWT_SECRET
# Consent Service URL
CONSENT_SERVICE_URL = os.getenv("CONSENT_SERVICE_URL", "http://localhost:8081")
router = APIRouter(prefix="/v1/admin/dsr", tags=["dsr-admin"])
# Admin User UUID (muss in der DB existieren!)
ADMIN_USER_UUID = "a0000000-0000-0000-0000-000000000001"
# Request Models
class CreateDSRRequest(BaseModel):
"""Admin-Anfrage zum manuellen Erstellen einer Betroffenenanfrage"""
request_type: str
requester_email: str
requester_name: Optional[str] = None
requester_phone: Optional[str] = None
request_details: Optional[Dict[str, Any]] = None
priority: Optional[str] = None # normal, high, expedited
source: Optional[str] = "admin_panel"
class UpdateDSRRequest(BaseModel):
"""Anfrage zum Aktualisieren einer Betroffenenanfrage"""
status: Optional[str] = None
priority: Optional[str] = None
processing_notes: Optional[str] = None
class UpdateStatusRequest(BaseModel):
"""Anfrage zum Ändern des Status"""
status: str
comment: Optional[str] = None
class VerifyIdentityRequest(BaseModel):
"""Anfrage zur Identitätsverifizierung"""
method: str # id_card, passport, video_call, email, phone, other
class AssignRequest(BaseModel):
"""Anfrage zur Zuweisung"""
assignee_id: str
class ExtendDeadlineRequest(BaseModel):
"""Anfrage zur Fristverlängerung"""
reason: str
days: Optional[int] = 60
class CompleteDSRRequest(BaseModel):
"""Anfrage zum Abschließen einer Betroffenenanfrage"""
summary: str
result_data: Optional[Dict[str, Any]] = None
class RejectDSRRequest(BaseModel):
"""Anfrage zum Ablehnen einer Betroffenenanfrage"""
reason: str
legal_basis: str # Art. 17(3)a, Art. 17(3)b, Art. 17(3)c, Art. 17(3)d, Art. 17(3)e, Art. 12(5)
class SendCommunicationRequest(BaseModel):
"""Anfrage zum Senden einer Kommunikation"""
communication_type: str
template_version_id: Optional[str] = None
custom_subject: Optional[str] = None
custom_body: Optional[str] = None
variables: Optional[Dict[str, str]] = None
class UpdateExceptionCheckRequest(BaseModel):
"""Anfrage zum Aktualisieren einer Ausnahmeprüfung"""
applies: bool
notes: Optional[str] = None
class CreateTemplateVersionRequest(BaseModel):
"""Anfrage zum Erstellen einer Vorlagen-Version"""
version: str
language: Optional[str] = "de"
subject: str
body_html: str
body_text: Optional[str] = None
# Helper für Admin Token
def get_admin_token(authorization: Optional[str]) -> str:
if authorization:
parts = authorization.split(" ")
if len(parts) == 2 and parts[0] == "Bearer":
return parts[1]
# Für Entwicklung: Generiere einen Admin-Token
return generate_jwt_token(
user_id=ADMIN_USER_UUID,
email="admin@breakpilot.app",
role="admin"
)
async def proxy_request(method: str, path: str, token: str, json_data=None, query_params=None):
"""Proxied Anfragen an den Go Consent Service"""
url = f"{CONSENT_SERVICE_URL}/api/v1/admin{path}"
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
async with httpx.AsyncClient() as client:
try:
if method == "GET":
response = await client.get(url, headers=headers, params=query_params, timeout=30.0)
elif method == "POST":
response = await client.post(url, headers=headers, json=json_data, timeout=30.0)
elif method == "PUT":
response = await client.put(url, headers=headers, json=json_data, timeout=30.0)
elif method == "DELETE":
response = await client.delete(url, headers=headers, timeout=30.0)
else:
raise HTTPException(status_code=400, detail="Invalid method")
if response.status_code >= 400:
error_detail = response.json() if response.content else {"error": "Unknown error"}
raise HTTPException(status_code=response.status_code, detail=error_detail)
return response.json() if response.content else {"success": True}
except httpx.RequestError as e:
raise HTTPException(status_code=503, detail=f"Consent Service unavailable: {str(e)}")
# ==========================================
# DSR List & Statistics
# ==========================================
@router.get("")
async def admin_list_dsr(
status: Optional[str] = Query(None, description="Filter by status"),
request_type: Optional[str] = Query(None, description="Filter by request type"),
assigned_to: Optional[str] = Query(None, description="Filter by assignee"),
priority: Optional[str] = Query(None, description="Filter by priority"),
overdue_only: bool = Query(False, description="Only overdue requests"),
search: Optional[str] = Query(None, description="Search term"),
from_date: Optional[str] = Query(None, description="From date (YYYY-MM-DD)"),
to_date: Optional[str] = Query(None, description="To date (YYYY-MM-DD)"),
limit: int = Query(20, ge=1, le=100),
offset: int = Query(0, ge=0),
authorization: Optional[str] = Header(None)
):
"""Gibt alle Betroffenenanfragen mit Filtern zurück"""
token = get_admin_token(authorization)
params = {"limit": limit, "offset": offset}
if status:
params["status"] = status
if request_type:
params["request_type"] = request_type
if assigned_to:
params["assigned_to"] = assigned_to
if priority:
params["priority"] = priority
if overdue_only:
params["overdue_only"] = "true"
if search:
params["search"] = search
if from_date:
params["from_date"] = from_date
if to_date:
params["to_date"] = to_date
return await proxy_request("GET", "/dsr", token, query_params=params)
@router.get("/stats")
async def admin_get_dsr_stats(authorization: Optional[str] = Header(None)):
"""Gibt Dashboard-Statistiken für Betroffenenanfragen zurück"""
token = get_admin_token(authorization)
return await proxy_request("GET", "/dsr/stats", token)
# ==========================================
# Single DSR Management
# ==========================================
@router.get("/{dsr_id}")
async def admin_get_dsr(dsr_id: str, authorization: Optional[str] = Header(None)):
"""Gibt Details einer Betroffenenanfrage zurück"""
token = get_admin_token(authorization)
return await proxy_request("GET", f"/dsr/{dsr_id}", token)
@router.post("")
async def admin_create_dsr(
request: CreateDSRRequest,
authorization: Optional[str] = Header(None)
):
"""Erstellt eine Betroffenenanfrage manuell"""
token = get_admin_token(authorization)
return await proxy_request("POST", "/dsr", token, request.dict(exclude_none=True))
@router.put("/{dsr_id}")
async def admin_update_dsr(
dsr_id: str,
request: UpdateDSRRequest,
authorization: Optional[str] = Header(None)
):
"""Aktualisiert eine Betroffenenanfrage"""
token = get_admin_token(authorization)
return await proxy_request("PUT", f"/dsr/{dsr_id}", token, request.dict(exclude_none=True))
@router.post("/{dsr_id}/status")
async def admin_update_dsr_status(
dsr_id: str,
request: UpdateStatusRequest,
authorization: Optional[str] = Header(None)
):
"""Ändert den Status einer Betroffenenanfrage"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/status", token, request.dict(exclude_none=True))
# ==========================================
# DSR Workflow Actions
# ==========================================
@router.post("/{dsr_id}/verify-identity")
async def admin_verify_identity(
dsr_id: str,
request: VerifyIdentityRequest,
authorization: Optional[str] = Header(None)
):
"""Verifiziert die Identität des Antragstellers"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/verify-identity", token, request.dict())
@router.post("/{dsr_id}/assign")
async def admin_assign_dsr(
dsr_id: str,
request: AssignRequest,
authorization: Optional[str] = Header(None)
):
"""Weist eine Betroffenenanfrage einem Bearbeiter zu"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/assign", token, request.dict())
@router.post("/{dsr_id}/extend")
async def admin_extend_deadline(
dsr_id: str,
request: ExtendDeadlineRequest,
authorization: Optional[str] = Header(None)
):
"""Verlängert die Bearbeitungsfrist (max. 2 weitere Monate nach Art. 12(3))"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/extend", token, request.dict())
@router.post("/{dsr_id}/complete")
async def admin_complete_dsr(
dsr_id: str,
request: CompleteDSRRequest,
authorization: Optional[str] = Header(None)
):
"""Schließt eine Betroffenenanfrage erfolgreich ab"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/complete", token, request.dict(exclude_none=True))
@router.post("/{dsr_id}/reject")
async def admin_reject_dsr(
dsr_id: str,
request: RejectDSRRequest,
authorization: Optional[str] = Header(None)
):
"""Lehnt eine Betroffenenanfrage mit Rechtsgrundlage ab"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/reject", token, request.dict())
# ==========================================
# DSR History & Communications
# ==========================================
@router.get("/{dsr_id}/history")
async def admin_get_dsr_history(dsr_id: str, authorization: Optional[str] = Header(None)):
"""Gibt die Status-Historie einer Betroffenenanfrage zurück"""
token = get_admin_token(authorization)
return await proxy_request("GET", f"/dsr/{dsr_id}/history", token)
@router.get("/{dsr_id}/communications")
async def admin_get_dsr_communications(dsr_id: str, authorization: Optional[str] = Header(None)):
"""Gibt die Kommunikationshistorie einer Betroffenenanfrage zurück"""
token = get_admin_token(authorization)
return await proxy_request("GET", f"/dsr/{dsr_id}/communications", token)
@router.post("/{dsr_id}/communicate")
async def admin_send_communication(
dsr_id: str,
request: SendCommunicationRequest,
authorization: Optional[str] = Header(None)
):
"""Sendet eine Kommunikation zum Antragsteller"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/communicate", token, request.dict(exclude_none=True))
# ==========================================
# Exception Checks (Art. 17)
# ==========================================
@router.get("/{dsr_id}/exception-checks")
async def admin_get_exception_checks(dsr_id: str, authorization: Optional[str] = Header(None)):
"""Gibt die Ausnahmeprüfungen für Löschanfragen (Art. 17) zurück"""
token = get_admin_token(authorization)
return await proxy_request("GET", f"/dsr/{dsr_id}/exception-checks", token)
@router.post("/{dsr_id}/exception-checks/init")
async def admin_init_exception_checks(dsr_id: str, authorization: Optional[str] = Header(None)):
"""Initialisiert die Ausnahmeprüfungen für eine Löschanfrage"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/exception-checks/init", token)
@router.put("/{dsr_id}/exception-checks/{check_id}")
async def admin_update_exception_check(
dsr_id: str,
check_id: str,
request: UpdateExceptionCheckRequest,
authorization: Optional[str] = Header(None)
):
"""Aktualisiert eine einzelne Ausnahmeprüfung"""
token = get_admin_token(authorization)
return await proxy_request("PUT", f"/dsr/{dsr_id}/exception-checks/{check_id}", token, request.dict(exclude_none=True))
# ==========================================
# Deadline Processing
# ==========================================
@router.post("/deadlines/process")
async def admin_process_deadlines(authorization: Optional[str] = Header(None)):
"""Verarbeitet Fristen und sendet Warnungen (für Cronjob)"""
token = get_admin_token(authorization)
return await proxy_request("POST", "/dsr/deadlines/process", token)
# ==========================================
# DSR Templates Router
# ==========================================
templates_router = APIRouter(prefix="/v1/admin/dsr-templates", tags=["dsr-templates"])
@templates_router.get("")
async def admin_get_templates(authorization: Optional[str] = Header(None)):
"""Gibt alle DSR-Vorlagen zurück"""
token = get_admin_token(authorization)
return await proxy_request("GET", "/dsr-templates", token)
@templates_router.get("/published")
async def admin_get_published_templates(
request_type: Optional[str] = Query(None, description="Filter by request type"),
language: str = Query("de", description="Language"),
authorization: Optional[str] = Header(None)
):
"""Gibt alle veröffentlichten Vorlagen für die Auswahl zurück"""
token = get_admin_token(authorization)
params = {"language": language}
if request_type:
params["request_type"] = request_type
return await proxy_request("GET", "/dsr-templates/published", token, query_params=params)
@templates_router.get("/{template_id}/versions")
async def admin_get_template_versions(template_id: str, authorization: Optional[str] = Header(None)):
"""Gibt alle Versionen einer Vorlage zurück"""
token = get_admin_token(authorization)
return await proxy_request("GET", f"/dsr-templates/{template_id}/versions", token)
@templates_router.post("/{template_id}/versions")
async def admin_create_template_version(
template_id: str,
request: CreateTemplateVersionRequest,
authorization: Optional[str] = Header(None)
):
"""Erstellt eine neue Version einer Vorlage"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr-templates/{template_id}/versions", token, request.dict(exclude_none=True))
@templates_router.post("/versions/{version_id}/publish")
async def admin_publish_template_version(version_id: str, authorization: Optional[str] = Header(None)):
"""Veröffentlicht eine Vorlagen-Version"""
token = get_admin_token(authorization)
return await proxy_request("POST", f"/dsr-template-versions/{version_id}/publish", token)

View File

@@ -1,111 +0,0 @@
"""
Data Subject Request (DSR) API - Betroffenenanfragen nach DSGVO
Benutzer-Endpunkte zum Erstellen und Verwalten eigener Betroffenenanfragen
"""
from fastapi import APIRouter, HTTPException, Header, Query
from typing import Optional, List, Dict, Any
from pydantic import BaseModel, EmailStr
import httpx
import os
from consent_client import generate_jwt_token, JWT_SECRET
# Consent Service URL
CONSENT_SERVICE_URL = os.getenv("CONSENT_SERVICE_URL", "http://localhost:8081")
router = APIRouter(prefix="/v1/dsr", tags=["dsr"])
# Request Models
class CreateDSRRequest(BaseModel):
"""Anfrage zum Erstellen einer Betroffenenanfrage"""
request_type: str # access, rectification, erasure, restriction, portability
requester_email: Optional[str] = None
requester_name: Optional[str] = None
requester_phone: Optional[str] = None
request_details: Optional[Dict[str, Any]] = None
# Helper to extract token
def get_token(authorization: Optional[str]) -> str:
if authorization:
parts = authorization.split(" ")
if len(parts) == 2 and parts[0] == "Bearer":
return parts[1]
raise HTTPException(status_code=401, detail="Authorization required")
async def proxy_request(method: str, path: str, token: str, json_data=None, query_params=None):
"""Proxied Anfragen an den Go Consent Service"""
url = f"{CONSENT_SERVICE_URL}/api/v1{path}"
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
async with httpx.AsyncClient() as client:
try:
if method == "GET":
response = await client.get(url, headers=headers, params=query_params, timeout=10.0)
elif method == "POST":
response = await client.post(url, headers=headers, json=json_data, timeout=10.0)
elif method == "PUT":
response = await client.put(url, headers=headers, json=json_data, timeout=10.0)
elif method == "DELETE":
response = await client.delete(url, headers=headers, timeout=10.0)
else:
raise HTTPException(status_code=400, detail="Invalid method")
if response.status_code >= 400:
error_detail = response.json() if response.content else {"error": "Unknown error"}
raise HTTPException(status_code=response.status_code, detail=error_detail)
return response.json() if response.content else {"success": True}
except httpx.RequestError as e:
raise HTTPException(status_code=503, detail=f"Consent Service unavailable: {str(e)}")
# ==========================================
# User DSR Endpoints
# ==========================================
@router.post("")
async def create_dsr(
request: CreateDSRRequest,
authorization: str = Header(...)
):
"""
Erstellt eine neue Betroffenenanfrage.
request_type muss einer der folgenden Werte sein:
- access: Auskunftsrecht (Art. 15 DSGVO)
- rectification: Recht auf Berichtigung (Art. 16 DSGVO)
- erasure: Recht auf Löschung (Art. 17 DSGVO)
- restriction: Recht auf Einschränkung (Art. 18 DSGVO)
- portability: Recht auf Datenübertragbarkeit (Art. 20 DSGVO)
"""
token = get_token(authorization)
return await proxy_request("POST", "/dsr", token, request.dict(exclude_none=True))
@router.get("")
async def get_my_dsrs(authorization: str = Header(...)):
"""Gibt alle eigenen Betroffenenanfragen zurück"""
token = get_token(authorization)
return await proxy_request("GET", "/dsr", token)
@router.get("/{dsr_id}")
async def get_my_dsr(dsr_id: str, authorization: str = Header(...)):
"""Gibt Details einer eigenen Betroffenenanfrage zurück"""
token = get_token(authorization)
return await proxy_request("GET", f"/dsr/{dsr_id}", token)
@router.post("/{dsr_id}/cancel")
async def cancel_my_dsr(dsr_id: str, authorization: str = Header(...)):
"""Storniert eine eigene Betroffenenanfrage"""
token = get_token(authorization)
return await proxy_request("POST", f"/dsr/{dsr_id}/cancel", token)

Some files were not shown because too many files have changed in this diff Show More