From 95fcba34cd5f211184f456a377cfbcb3926f2baf Mon Sep 17 00:00:00 2001 From: Benjamin Admin Date: Sat, 7 Mar 2026 19:00:33 +0100 Subject: [PATCH] fix(quality): Ruff/CVE/TS-Fixes, 104 neue Tests, Complexity-Refactoring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Ruff: 144 auto-fixes (unused imports, == None → is None), F821/F811/F841 manuell - CVEs: python-multipart>=0.0.22, weasyprint>=68.0, pillow>=12.1.1, npm audit fix (0 vulns) - TS: 5 tote Drafting-Engine-Dateien entfernt, allowed-facts/sanitizer/StepHeader/context fixes - Tests: +104 (ISMS 58, Evidence 18, VVT 14, Generation 14) → 1449 passed - Refactoring: collect_ci_evidence (F→A), row_to_response (E→A), extract_requirements (E→A) - Dead Code: pca-platform, 7 Go-Handler, dsr_api.py, duplicate Schemas entfernt Co-Authored-By: Claude Opus 4.6 --- .../sdk/document-generator/contextBridge.ts | 4 +- .../app/sdk/document-generator/page.tsx | 8 +- .../app/sdk/email-templates/page.tsx | 13 +- .../sdk/ComplianceAdvisorWidget.tsx | 9 +- .../components/sdk/DraftEditor.tsx | 300 ------ .../components/sdk/DraftingEngineWidget.tsx | 443 --------- .../components/sdk/StepHeader/StepHeader.tsx | 67 +- .../components/sdk/ValidationReport.tsx | 220 ----- .../sdk/__tests__/StepHeader.test.tsx | 17 +- .../lib/sdk/compliance-scope-golden-tests.ts | 722 -------------- .../__tests__/intent-classifier.test.ts | 153 --- .../__tests__/state-projector.test.ts | 312 ------ .../lib/sdk/drafting-engine/allowed-facts.ts | 42 +- .../sdk/drafting-engine/intent-classifier.ts | 373 -------- .../lib/sdk/drafting-engine/sanitizer.ts | 2 +- .../sdk/drafting-engine/state-projector.ts | 342 ------- .../drafting-engine/use-drafting-engine.ts | 343 ------- .../lib/sdk/dsfa/__tests__/types.test.ts | 3 + .../lib/sdk/tom-generator/context.tsx | 10 + admin-compliance/package-lock.json | 305 +++--- .../internal/api/handlers/dsb_handlers.go | 451 --------- .../internal/api/handlers/funding_handlers.go | 638 ------------- .../internal/api/handlers/gci_handlers.go | 188 ---- .../api/handlers/industry_handlers.go | 115 --- .../api/handlers/multitenant_handlers.go | 268 ------ .../api/handlers/reporting_handlers.go | 97 -- .../internal/api/handlers/sso_handlers.go | 631 ------------- ai-compliance-sdk/internal/dsb/models.go | 164 ---- ai-compliance-sdk/internal/dsb/store.go | 510 ---------- ai-compliance-sdk/internal/funding/export.go | 395 -------- ai-compliance-sdk/internal/funding/models.go | 394 -------- .../internal/funding/postgres_store.go | 652 ------------- ai-compliance-sdk/internal/funding/store.go | 81 -- ai-compliance-sdk/internal/gci/engine.go | 371 -------- .../internal/gci/iso_gap_analysis.go | 188 ---- ai-compliance-sdk/internal/gci/iso_mapping.go | 207 ---- ai-compliance-sdk/internal/gci/mock_data.go | 74 -- ai-compliance-sdk/internal/gci/models.go | 104 -- ai-compliance-sdk/internal/gci/nis2_roles.go | 118 --- .../internal/gci/nis2_scoring.go | 147 --- ai-compliance-sdk/internal/gci/validity.go | 59 -- ai-compliance-sdk/internal/gci/weights.go | 78 -- ai-compliance-sdk/internal/industry/models.go | 65 -- .../internal/industry/templates.go | 558 ----------- .../internal/multitenant/models.go | 77 -- .../internal/multitenant/store.go | 148 --- .../internal/reporting/models.go | 97 -- ai-compliance-sdk/internal/reporting/store.go | 516 ---------- ai-compliance-sdk/internal/sso/models.go | 158 ---- ai-compliance-sdk/internal/sso/store.go | 477 ---------- .../compliance/api/ai_routes.py | 2 - .../compliance/api/audit_routes.py | 4 +- .../compliance/api/banner_routes.py | 5 +- .../compliance/api/change_request_engine.py | 4 +- .../compliance/api/change_request_routes.py | 3 +- .../compliance/api/company_profile_routes.py | 140 +-- .../compliance/api/consent_template_routes.py | 2 +- .../compliance/api/crud_factory.py | 2 +- .../loeschfristen_template.py | 2 +- .../api/document_templates/tom_template.py | 1 - .../compliance/api/dsfa_routes.py | 5 +- .../compliance/api/dsr_routes.py | 4 +- .../compliance/api/email_template_routes.py | 6 +- .../compliance/api/evidence_routes.py | 416 ++++---- .../compliance/api/extraction_routes.py | 296 +++--- .../compliance/api/incident_routes.py | 2 +- .../compliance/api/isms_routes.py | 18 +- .../compliance/api/legal_document_routes.py | 12 +- .../compliance/api/legal_template_routes.py | 4 +- backend-compliance/compliance/api/routes.py | 26 +- backend-compliance/compliance/api/schemas.py | 51 - .../compliance/api/screening_routes.py | 12 - .../compliance/api/source_policy_router.py | 13 +- .../compliance/api/tom_routes.py | 4 +- .../api/vendor_compliance_routes.py | 3 +- .../compliance/api/versioning_utils.py | 1 - .../compliance/api/vvt_routes.py | 1 - .../compliance/db/banner_models.py | 2 +- .../compliance/db/dsr_models.py | 2 +- .../compliance/db/einwilligungen_models.py | 2 +- .../compliance/db/email_template_models.py | 2 +- .../compliance/db/isms_repository.py | 3 +- .../db/legal_document_extend_models.py | 2 +- backend-compliance/compliance/db/models.py | 1 - .../compliance/db/repository.py | 12 +- .../scripts/validate_service_modules.py | 4 +- .../services/ai_compliance_assistant.py | 4 +- .../services/audit_pdf_generator.py | 16 +- .../compliance/services/auto_risk_updater.py | 4 +- .../compliance/services/export_generator.py | 4 +- .../compliance/services/llm_provider.py | 2 +- .../compliance/services/regulation_scraper.py | 2 - .../compliance/services/report_generator.py | 10 +- .../compliance/services/seeder.py | 4 +- .../compliance/tests/test_audit_routes.py | 3 +- .../tests/test_auto_risk_updater.py | 8 +- .../tests/test_compliance_routes.py | 1 - .../compliance/tests/test_isms_routes.py | 16 +- backend-compliance/dsr_admin_api.py | 415 -------- backend-compliance/dsr_api.py | 111 --- backend-compliance/middleware/__init__.py | 10 - backend-compliance/requirements.txt | 5 +- .../tests/test_company_profile_routes.py | 13 +- backend-compliance/tests/test_dsfa_routes.py | 2 +- .../tests/test_evidence_routes.py | 265 ++++++ .../tests/test_generation_routes.py | 158 ++++ backend-compliance/tests/test_isms_routes.py | 886 ++++++++++++++++++ backend-compliance/tests/test_vvt_routes.py | 334 +++++++ pca-platform/README.md | 243 ----- pca-platform/ai-access.json | 82 -- pca-platform/demo/index.html | 444 --------- pca-platform/docker-compose.yml | 81 -- pca-platform/heuristic-service/Dockerfile | 41 - .../heuristic-service/cmd/server/main.go | 84 -- pca-platform/heuristic-service/go.mod | 36 - pca-platform/heuristic-service/go.sum | 89 -- .../internal/api/handlers.go | 285 ------ .../internal/config/config.go | 151 --- .../internal/heuristics/scorer.go | 340 ------- .../internal/heuristics/scorer_test.go | 250 ----- .../heuristic-service/internal/stepup/pow.go | 180 ---- .../internal/stepup/pow_test.go | 235 ----- .../internal/stepup/webauthn.go | 172 ---- pca-platform/sdk/js/src/pca-sdk.js | 473 ---------- 124 files changed, 2533 insertions(+), 15709 deletions(-) delete mode 100644 admin-compliance/components/sdk/DraftEditor.tsx delete mode 100644 admin-compliance/components/sdk/DraftingEngineWidget.tsx delete mode 100644 admin-compliance/components/sdk/ValidationReport.tsx delete mode 100644 admin-compliance/lib/sdk/compliance-scope-golden-tests.ts delete mode 100644 admin-compliance/lib/sdk/drafting-engine/__tests__/intent-classifier.test.ts delete mode 100644 admin-compliance/lib/sdk/drafting-engine/__tests__/state-projector.test.ts delete mode 100644 admin-compliance/lib/sdk/drafting-engine/intent-classifier.ts delete mode 100644 admin-compliance/lib/sdk/drafting-engine/state-projector.ts delete mode 100644 admin-compliance/lib/sdk/drafting-engine/use-drafting-engine.ts delete mode 100644 ai-compliance-sdk/internal/api/handlers/dsb_handlers.go delete mode 100644 ai-compliance-sdk/internal/api/handlers/funding_handlers.go delete mode 100644 ai-compliance-sdk/internal/api/handlers/gci_handlers.go delete mode 100644 ai-compliance-sdk/internal/api/handlers/industry_handlers.go delete mode 100644 ai-compliance-sdk/internal/api/handlers/multitenant_handlers.go delete mode 100644 ai-compliance-sdk/internal/api/handlers/reporting_handlers.go delete mode 100644 ai-compliance-sdk/internal/api/handlers/sso_handlers.go delete mode 100644 ai-compliance-sdk/internal/dsb/models.go delete mode 100644 ai-compliance-sdk/internal/dsb/store.go delete mode 100644 ai-compliance-sdk/internal/funding/export.go delete mode 100644 ai-compliance-sdk/internal/funding/models.go delete mode 100644 ai-compliance-sdk/internal/funding/postgres_store.go delete mode 100644 ai-compliance-sdk/internal/funding/store.go delete mode 100644 ai-compliance-sdk/internal/gci/engine.go delete mode 100644 ai-compliance-sdk/internal/gci/iso_gap_analysis.go delete mode 100644 ai-compliance-sdk/internal/gci/iso_mapping.go delete mode 100644 ai-compliance-sdk/internal/gci/mock_data.go delete mode 100644 ai-compliance-sdk/internal/gci/models.go delete mode 100644 ai-compliance-sdk/internal/gci/nis2_roles.go delete mode 100644 ai-compliance-sdk/internal/gci/nis2_scoring.go delete mode 100644 ai-compliance-sdk/internal/gci/validity.go delete mode 100644 ai-compliance-sdk/internal/gci/weights.go delete mode 100644 ai-compliance-sdk/internal/industry/models.go delete mode 100644 ai-compliance-sdk/internal/industry/templates.go delete mode 100644 ai-compliance-sdk/internal/multitenant/models.go delete mode 100644 ai-compliance-sdk/internal/multitenant/store.go delete mode 100644 ai-compliance-sdk/internal/reporting/models.go delete mode 100644 ai-compliance-sdk/internal/reporting/store.go delete mode 100644 ai-compliance-sdk/internal/sso/models.go delete mode 100644 ai-compliance-sdk/internal/sso/store.go delete mode 100644 backend-compliance/dsr_admin_api.py delete mode 100644 backend-compliance/dsr_api.py create mode 100644 backend-compliance/tests/test_isms_routes.py delete mode 100644 pca-platform/README.md delete mode 100644 pca-platform/ai-access.json delete mode 100644 pca-platform/demo/index.html delete mode 100644 pca-platform/docker-compose.yml delete mode 100644 pca-platform/heuristic-service/Dockerfile delete mode 100644 pca-platform/heuristic-service/cmd/server/main.go delete mode 100644 pca-platform/heuristic-service/go.mod delete mode 100644 pca-platform/heuristic-service/go.sum delete mode 100644 pca-platform/heuristic-service/internal/api/handlers.go delete mode 100644 pca-platform/heuristic-service/internal/config/config.go delete mode 100644 pca-platform/heuristic-service/internal/heuristics/scorer.go delete mode 100644 pca-platform/heuristic-service/internal/heuristics/scorer_test.go delete mode 100644 pca-platform/heuristic-service/internal/stepup/pow.go delete mode 100644 pca-platform/heuristic-service/internal/stepup/pow_test.go delete mode 100644 pca-platform/heuristic-service/internal/stepup/webauthn.go delete mode 100644 pca-platform/sdk/js/src/pca-sdk.js diff --git a/admin-compliance/app/sdk/document-generator/contextBridge.ts b/admin-compliance/app/sdk/document-generator/contextBridge.ts index e29bf01..a0416ae 100644 --- a/admin-compliance/app/sdk/document-generator/contextBridge.ts +++ b/admin-compliance/app/sdk/document-generator/contextBridge.ts @@ -515,7 +515,7 @@ export function setContextPath(ctx: TemplateContext, dotPath: string, value: unk return { ...ctx, [section]: { - ...(ctx[section] as Record), + ...(ctx[section] as unknown as Record), [key]: value, }, } @@ -526,6 +526,6 @@ export function setContextPath(ctx: TemplateContext, dotPath: string, value: unk */ export function getContextPath(ctx: TemplateContext, dotPath: string): unknown { const [section, ...rest] = dotPath.split('.') as [keyof TemplateContext, ...string[]] - const sectionObj = ctx[section] as Record + const sectionObj = ctx[section] as unknown as Record return sectionObj?.[rest.join('.')] } diff --git a/admin-compliance/app/sdk/document-generator/page.tsx b/admin-compliance/app/sdk/document-generator/page.tsx index dafc047..51a39d5 100644 --- a/admin-compliance/app/sdk/document-generator/page.tsx +++ b/admin-compliance/app/sdk/document-generator/page.tsx @@ -313,7 +313,7 @@ function ContextSectionForm({ onChange: (section: keyof TemplateContext, key: string, value: unknown) => void }) { const fields = SECTION_FIELDS[section] - const sectionData = context[section] as Record + const sectionData = context[section] as unknown as Record return (
@@ -523,7 +523,7 @@ function GeneratorSection({ }, [template.id]) // eslint-disable-line react-hooks/exhaustive-deps // Computed flags pills config - const flagPills: { key: keyof typeof ruleResult.computedFlags; label: string; color: string }[] = ruleResult ? [ + const flagPills: { key: string; label: string; color: string }[] = ruleResult ? [ { key: 'IS_B2C', label: 'B2C', color: 'bg-blue-100 text-blue-700' }, { key: 'SERVICE_IS_SAAS', label: 'SaaS', color: 'bg-green-100 text-green-700' }, { key: 'HAS_PENALTY', label: 'Vertragsstrafe', color: 'bg-orange-100 text-orange-700' }, @@ -842,7 +842,7 @@ function DocumentGeneratorPageInner() { useEffect(() => { if (state?.companyProfile) { const profile = state.companyProfile - const p = profile as Record + const p = profile as unknown as Record setContext((prev) => ({ ...prev, PROVIDER: { @@ -919,7 +919,7 @@ function DocumentGeneratorPageInner() { (section: keyof TemplateContext, key: string, value: unknown) => { setContext((prev) => ({ ...prev, - [section]: { ...(prev[section] as Record), [key]: value }, + [section]: { ...(prev[section] as unknown as Record), [key]: value }, })) }, [] diff --git a/admin-compliance/app/sdk/email-templates/page.tsx b/admin-compliance/app/sdk/email-templates/page.tsx index 552d2af..99a7a76 100644 --- a/admin-compliance/app/sdk/email-templates/page.tsx +++ b/admin-compliance/app/sdk/email-templates/page.tsx @@ -2,7 +2,7 @@ import React, { useState, useEffect, useCallback } from 'react' import { useSDK } from '@/lib/sdk' -import { StepHeader, STEP_EXPLANATIONS } from '@/components/sdk/StepHeader' +import { StepHeader } from '@/components/sdk/StepHeader' // ============================================================================= // TYPES @@ -321,16 +321,7 @@ export default function EmailTemplatesPage() { return (
- + {error && (
diff --git a/admin-compliance/components/sdk/ComplianceAdvisorWidget.tsx b/admin-compliance/components/sdk/ComplianceAdvisorWidget.tsx index 5fb92ce..93b8b6d 100644 --- a/admin-compliance/components/sdk/ComplianceAdvisorWidget.tsx +++ b/admin-compliance/components/sdk/ComplianceAdvisorWidget.tsx @@ -15,7 +15,6 @@ interface Message { interface ComplianceAdvisorWidgetProps { currentStep?: string - enableDraftingEngine?: boolean } // ============================================================================= @@ -68,13 +67,7 @@ const COUNTRIES: { code: Country; label: string }[] = [ { code: 'EU', label: 'EU' }, ] -export function ComplianceAdvisorWidget({ currentStep = 'default', enableDraftingEngine = false }: ComplianceAdvisorWidgetProps) { - // Feature-flag: If Drafting Engine enabled, render DraftingEngineWidget instead - if (enableDraftingEngine) { - const { DraftingEngineWidget } = require('./DraftingEngineWidget') - return - } - +export function ComplianceAdvisorWidget({ currentStep = 'default' }: ComplianceAdvisorWidgetProps) { const [isOpen, setIsOpen] = useState(false) const [isExpanded, setIsExpanded] = useState(false) const [messages, setMessages] = useState([]) diff --git a/admin-compliance/components/sdk/DraftEditor.tsx b/admin-compliance/components/sdk/DraftEditor.tsx deleted file mode 100644 index d459780..0000000 --- a/admin-compliance/components/sdk/DraftEditor.tsx +++ /dev/null @@ -1,300 +0,0 @@ -'use client' - -/** - * DraftEditor - Split-Pane Editor fuer Compliance-Dokument-Entwuerfe - * - * Links (2/3): Gerenderter Draft mit Section-Headern - * Rechts (1/3): Chat-Panel fuer iterative Verfeinerung - * Oben: Document-Type Label, Depth-Level Badge, Constraint-Compliance - */ - -import { useState, useRef, useCallback } from 'react' -import { DOCUMENT_TYPE_LABELS } from '@/lib/sdk/compliance-scope-types' -import type { ScopeDocumentType } from '@/lib/sdk/compliance-scope-types' -import type { - DraftRevision, - ConstraintCheckResult, - ValidationResult, -} from '@/lib/sdk/drafting-engine/types' - -interface DraftEditorProps { - draft: DraftRevision - documentType: ScopeDocumentType | null - constraintCheck: ConstraintCheckResult | null - validationResult: ValidationResult | null - isTyping: boolean - onAccept: () => void - onValidate: () => void - onClose: () => void - onRefine: (instruction: string) => void -} - -export function DraftEditor({ - draft, - documentType, - constraintCheck, - validationResult, - isTyping, - onAccept, - onValidate, - onClose, - onRefine, -}: DraftEditorProps) { - const [refineInput, setRefineInput] = useState('') - const [activeSection, setActiveSection] = useState(null) - const contentRef = useRef(null) - - const handleRefine = useCallback(() => { - if (!refineInput.trim() || isTyping) return - onRefine(refineInput.trim()) - setRefineInput('') - }, [refineInput, isTyping, onRefine]) - - const handleRefineKeyDown = (e: React.KeyboardEvent) => { - if (e.key === 'Enter' && !e.shiftKey) { - e.preventDefault() - handleRefine() - } - } - - const docLabel = documentType - ? DOCUMENT_TYPE_LABELS[documentType]?.split(' (')[0] || documentType - : 'Dokument' - - return ( -
-
- {/* Header */} -
-
- - - -
-
{docLabel} - Entwurf
-
- {draft.sections.length} Sections | Erstellt {new Date(draft.createdAt).toLocaleTimeString('de-DE', { hour: '2-digit', minute: '2-digit' })} -
-
-
- -
- {/* Constraint Badge */} - {constraintCheck && ( - - {constraintCheck.allowed ? 'Constraints OK' : 'Constraint-Verletzung'} - - )} - - {/* Validation Badge */} - {validationResult && ( - - {validationResult.passed ? 'Validiert' : `${validationResult.errors.length} Fehler`} - - )} - - -
-
- - {/* Adjustment Warnings */} - {constraintCheck && constraintCheck.adjustments.length > 0 && ( -
- {constraintCheck.adjustments.map((adj, i) => ( -

- - - - {adj} -

- ))} -
- )} - - {/* Main Content: 2/3 Editor + 1/3 Chat */} -
- {/* Left: Draft Content (2/3) */} -
- {/* Section Navigation */} -
- {draft.sections.map((section) => ( - - ))} -
- - {/* Sections */} -
- {draft.sections.map((section) => ( -
-
-

{section.title}

- {section.schemaField && ( - {section.schemaField} - )} -
-
-
- {section.content} -
-
-
- ))} -
-
- - {/* Right: Refinement Chat (1/3) */} -
-
-

Verfeinerung

-

Geben Sie Anweisungen zur Verbesserung

-
- - {/* Validation Summary (if present) */} - {validationResult && ( -
-
- {validationResult.errors.length > 0 && ( -
- - {validationResult.errors.length} Fehler -
- )} - {validationResult.warnings.length > 0 && ( -
- - {validationResult.warnings.length} Warnungen -
- )} - {validationResult.suggestions.length > 0 && ( -
- - {validationResult.suggestions.length} Vorschlaege -
- )} -
-
- )} - - {/* Refinement Area */} -
-
-

- Beschreiben Sie, was geaendert werden soll. Der Agent erstellt eine ueberarbeitete Version unter Beachtung der Scope-Constraints. -

- - {/* Quick Refinement Buttons */} -
- {[ - 'Mehr Details hinzufuegen', - 'Platzhalter ausfuellen', - 'Rechtliche Referenzen ergaenzen', - 'Sprache vereinfachen', - ].map((suggestion) => ( - - ))} -
-
-
- - {/* Refinement Input */} -
-
- setRefineInput(e.target.value)} - onKeyDown={handleRefineKeyDown} - placeholder="Anweisung eingeben..." - disabled={isTyping} - className="flex-1 px-3 py-2 text-sm border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent disabled:opacity-50" - /> - -
-
-
-
- - {/* Footer Actions */} -
-
- -
- -
- - -
-
-
-
- ) -} diff --git a/admin-compliance/components/sdk/DraftingEngineWidget.tsx b/admin-compliance/components/sdk/DraftingEngineWidget.tsx deleted file mode 100644 index 11ab5d0..0000000 --- a/admin-compliance/components/sdk/DraftingEngineWidget.tsx +++ /dev/null @@ -1,443 +0,0 @@ -'use client' - -/** - * DraftingEngineWidget - Erweitert den ComplianceAdvisor um 4 Modi - * - * Mode-Indicator Pills: Explain / Ask / Draft / Validate - * Document-Type Selector aus requiredDocuments der ScopeDecision - * Feature-Flag enableDraftingEngine fuer schrittweises Rollout - */ - -import { useState, useEffect, useRef, useCallback } from 'react' -import { useSDK } from '@/lib/sdk/context' -import { useDraftingEngine } from '@/lib/sdk/drafting-engine/use-drafting-engine' -import { DOCUMENT_TYPE_LABELS } from '@/lib/sdk/compliance-scope-types' -import type { AgentMode } from '@/lib/sdk/drafting-engine/types' -import type { ScopeDocumentType } from '@/lib/sdk/compliance-scope-types' -import { DraftEditor } from './DraftEditor' -import { ValidationReport } from './ValidationReport' - -interface DraftingEngineWidgetProps { - currentStep?: string - enableDraftingEngine?: boolean -} - -const MODE_CONFIG: Record = { - explain: { label: 'Explain', color: 'bg-gray-100 text-gray-600', activeColor: 'bg-purple-100 text-purple-700 ring-1 ring-purple-300', icon: 'M13 16h-1v-4h-1m1-4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z' }, - ask: { label: 'Ask', color: 'bg-gray-100 text-gray-600', activeColor: 'bg-amber-100 text-amber-700 ring-1 ring-amber-300', icon: 'M8.228 9c.549-1.165 2.03-2 3.772-2 2.21 0 4 1.343 4 3 0 1.4-1.278 2.575-3.006 2.907-.542.104-.994.54-.994 1.093m0 3h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z' }, - draft: { label: 'Draft', color: 'bg-gray-100 text-gray-600', activeColor: 'bg-blue-100 text-blue-700 ring-1 ring-blue-300', icon: 'M11 5H6a2 2 0 00-2 2v11a2 2 0 002 2h11a2 2 0 002-2v-5m-1.414-9.414a2 2 0 112.828 2.828L11.828 15H9v-2.828l8.586-8.586z' }, - validate: { label: 'Validate', color: 'bg-gray-100 text-gray-600', activeColor: 'bg-green-100 text-green-700 ring-1 ring-green-300', icon: 'M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z' }, -} - -const EXAMPLE_QUESTIONS: Record = { - explain: [ - 'Was ist ein Verarbeitungsverzeichnis?', - 'Wann brauche ich eine DSFA?', - 'Was sind TOM nach Art. 32 DSGVO?', - ], - ask: [ - 'Welche Luecken hat mein Compliance-Profil?', - 'Was fehlt noch fuer die Zertifizierung?', - 'Welche Dokumente muss ich noch erstellen?', - ], - draft: [ - 'Erstelle einen VVT-Eintrag fuer unseren Hauptprozess', - 'Erstelle TOM fuer unsere Cloud-Infrastruktur', - 'Erstelle eine Datenschutzerklaerung', - ], - validate: [ - 'Pruefe die Konsistenz meiner Dokumente', - 'Stimmen VVT und TOM ueberein?', - 'Gibt es Luecken bei den Loeschfristen?', - ], -} - -export function DraftingEngineWidget({ - currentStep = 'default', - enableDraftingEngine = true, -}: DraftingEngineWidgetProps) { - const { state } = useSDK() - const engine = useDraftingEngine() - const [isOpen, setIsOpen] = useState(false) - const [isExpanded, setIsExpanded] = useState(false) - const [inputValue, setInputValue] = useState('') - const [showDraftEditor, setShowDraftEditor] = useState(false) - const [showValidationReport, setShowValidationReport] = useState(false) - const messagesEndRef = useRef(null) - - // Available document types from scope decision - const availableDocumentTypes: ScopeDocumentType[] = - state.complianceScope?.decision?.requiredDocuments - ?.filter(d => d.required) - .map(d => d.documentType as ScopeDocumentType) ?? ['vvt', 'tom', 'lf'] - - // Auto-scroll - useEffect(() => { - messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }) - }, [engine.messages]) - - // Open draft editor when a new draft arrives - useEffect(() => { - if (engine.currentDraft) { - setShowDraftEditor(true) - } - }, [engine.currentDraft]) - - // Open validation report when new results arrive - useEffect(() => { - if (engine.validationResult) { - setShowValidationReport(true) - } - }, [engine.validationResult]) - - const handleSendMessage = useCallback( - (content: string) => { - if (!content.trim()) return - setInputValue('') - engine.sendMessage(content) - }, - [engine] - ) - - const handleKeyDown = (e: React.KeyboardEvent) => { - if (e.key === 'Enter' && !e.shiftKey) { - e.preventDefault() - handleSendMessage(inputValue) - } - } - - const exampleQuestions = EXAMPLE_QUESTIONS[engine.currentMode] - - if (!isOpen) { - return ( - - ) - } - - // Draft Editor full-screen overlay - if (showDraftEditor && engine.currentDraft) { - return ( - { - engine.acceptDraft() - setShowDraftEditor(false) - }} - onValidate={() => { - engine.validateDraft() - }} - onClose={() => setShowDraftEditor(false)} - onRefine={(instruction: string) => { - engine.requestDraft(instruction) - }} - validationResult={engine.validationResult} - isTyping={engine.isTyping} - /> - ) - } - - return ( -
- {/* Header */} -
-
-
- - - -
-
-
Drafting Engine
-
Compliance-Dokumententwurf
-
-
-
- - -
-
- - {/* Mode Pills */} -
- {(Object.keys(MODE_CONFIG) as AgentMode[]).map((mode) => { - const config = MODE_CONFIG[mode] - const isActive = engine.currentMode === mode - return ( - - ) - })} -
- - {/* Document Type Selector (visible in draft/validate mode) */} - {(engine.currentMode === 'draft' || engine.currentMode === 'validate') && ( -
-
- Dokument: - -
-
- )} - - {/* Gap Banner */} - {(() => { - const gaps = state.complianceScope?.decision?.gaps?.filter( - (g: { severity: string }) => g.severity === 'HIGH' || g.severity === 'CRITICAL' - ) ?? [] - if (gaps.length > 0) { - return ( -
-
- - - - {gaps.length} kritische Luecke{gaps.length !== 1 ? 'n' : ''} erkannt -
- -
- ) - } - return null - })()} - - {/* Error Banner */} - {engine.error && ( -
- {engine.error} - -
- )} - - {/* Validation Report Inline */} - {showValidationReport && engine.validationResult && ( -
- setShowValidationReport(false)} - compact - /> -
- )} - - {/* Messages Area */} -
- {engine.messages.length === 0 ? ( -
-
- - - -
-

- {engine.currentMode === 'explain' && 'Fragen beantworten'} - {engine.currentMode === 'ask' && 'Luecken erkennen'} - {engine.currentMode === 'draft' && 'Dokumente entwerfen'} - {engine.currentMode === 'validate' && 'Konsistenz pruefen'} -

-

- {engine.currentMode === 'explain' && 'Stellen Sie Fragen zu DSGVO, AI Act und Compliance.'} - {engine.currentMode === 'ask' && 'Identifiziert Luecken in Ihrem Compliance-Profil.'} - {engine.currentMode === 'draft' && 'Erstellt strukturierte Compliance-Dokumente.'} - {engine.currentMode === 'validate' && 'Prueft Cross-Dokument-Konsistenz.'} -

- -
-

Beispiele:

- {exampleQuestions.map((q, idx) => ( - - ))} -
- - {/* Quick Actions for Draft/Validate */} - {engine.currentMode === 'draft' && engine.activeDocumentType && ( - - )} - {engine.currentMode === 'validate' && ( - - )} -
- ) : ( - <> - {engine.messages.map((message, idx) => ( -
-
-

- {message.content} -

- - {/* Draft ready indicator */} - {message.metadata?.hasDraft && engine.currentDraft && ( - - )} - - {/* Validation ready indicator */} - {message.metadata?.hasValidation && engine.validationResult && ( - - )} -
-
- ))} - - {engine.isTyping && ( -
-
-
-
-
-
-
-
-
- )} - -
- - )} -
- - {/* Input Area */} -
-
- setInputValue(e.target.value)} - onKeyDown={handleKeyDown} - placeholder={ - engine.currentMode === 'draft' - ? 'Anweisung fuer den Entwurf...' - : engine.currentMode === 'validate' - ? 'Validierungsfrage...' - : 'Frage eingeben...' - } - disabled={engine.isTyping} - className="flex-1 px-3 py-2 text-sm border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-purple-500 focus:border-transparent disabled:opacity-50" - /> - {engine.isTyping ? ( - - ) : ( - - )} -
-
-
- ) -} diff --git a/admin-compliance/components/sdk/StepHeader/StepHeader.tsx b/admin-compliance/components/sdk/StepHeader/StepHeader.tsx index 483772f..8ffce1d 100644 --- a/admin-compliance/components/sdk/StepHeader/StepHeader.tsx +++ b/admin-compliance/components/sdk/StepHeader/StepHeader.tsx @@ -17,9 +17,9 @@ export interface StepTip { interface StepHeaderProps { stepId: string - title: string - description: string - explanation: string + title?: string + description?: string + explanation?: string tips?: StepTip[] showNavigation?: boolean showProgress?: boolean @@ -95,10 +95,10 @@ const tipIconColors = { export function StepHeader({ stepId, - title, - description, - explanation, - tips = [], + title: titleProp, + description: descriptionProp, + explanation: explanationProp, + tips: tipsProp, showNavigation = true, showProgress = true, onComplete, @@ -109,6 +109,13 @@ export function StepHeader({ const { state, dispatch } = useSDK() const [showHelp, setShowHelp] = useState(false) + // Look up defaults from STEP_EXPLANATIONS when props are not provided + const preset = STEP_EXPLANATIONS[stepId as keyof typeof STEP_EXPLANATIONS] + const title = titleProp ?? preset?.title ?? stepId + const description = descriptionProp ?? preset?.description ?? '' + const explanation = explanationProp ?? preset?.explanation ?? '' + const tips = tipsProp ?? preset?.tips ?? [] + const currentStep = getStepById(stepId) const prevStep = getPreviousStep(stepId) const nextStep = getNextStep(stepId) @@ -996,6 +1003,50 @@ export const STEP_EXPLANATIONS = { }, ], }, -} + 'email-templates': { + title: 'E-Mail-Templates', + description: 'Verwalten Sie Vorlagen fuer alle DSGVO-relevanten Benachrichtigungen', + explanation: 'E-Mail-Templates definieren die Texte und das Layout fuer automatisierte DSGVO-Benachrichtigungen: Einwilligungsbestaetigung, Widerrufsbestaetigung, Auskunftsantwort, Loeschbestaetigung und weitere Lifecycle-E-Mails. Alle 16 Template-Typen koennen individuell angepasst und mit Variablen personalisiert werden.', + tips: [ + { + icon: 'info' as const, + title: '16 Lifecycle-E-Mails', + description: 'Von der Registrierungsbestaetigung bis zur Kontoloeschung — alle relevanten Touchpoints sind mit Vorlagen abgedeckt.', + }, + { + icon: 'warning' as const, + title: 'Pflichtangaben', + description: 'Stellen Sie sicher, dass jede E-Mail die gesetzlich vorgeschriebenen Angaben enthaelt: Impressum, Datenschutzhinweis und Widerrufsmoeglichkeit.', + }, + { + icon: 'lightbulb' as const, + title: 'Variablen', + description: 'Nutzen Sie Platzhalter wie {{name}}, {{email}} und {{company}} fuer automatische Personalisierung.', + }, + ], + }, + 'use-case-workshop': { + title: 'Use Case Workshop', + description: 'Erfassen und bewerten Sie Ihre KI-Anwendungsfaelle im Workshop-Format', + explanation: 'Im Use Case Workshop erfassen Sie Ihre KI-Anwendungsfaelle strukturiert in einem gefuehrten Prozess. Der Workshop leitet Sie durch Identifikation, Beschreibung, Datenkategorien, Risikobewertung und Stakeholder-Analyse. Die Ergebnisse fliessen direkt in die Compliance-Bewertung ein.', + tips: [ + { + icon: 'lightbulb' as const, + title: 'Vollstaendigkeit', + description: 'Erfassen Sie alle KI-Anwendungsfaelle — auch solche, die nur intern genutzt werden oder sich noch in der Planungsphase befinden.', + }, + { + icon: 'info' as const, + title: 'Stakeholder einbeziehen', + description: 'Beziehen Sie Fachbereiche und IT in den Workshop ein, um alle Anwendungsfaelle zu identifizieren.', + }, + { + icon: 'warning' as const, + title: 'Risikobewertung', + description: 'Jeder Anwendungsfall wird nach EU AI Act Risikostufen klassifiziert. Hochrisiko-Systeme erfordern zusaetzliche Dokumentation.', + }, + ], + }, +} satisfies Record export default StepHeader diff --git a/admin-compliance/components/sdk/ValidationReport.tsx b/admin-compliance/components/sdk/ValidationReport.tsx deleted file mode 100644 index abe4a79..0000000 --- a/admin-compliance/components/sdk/ValidationReport.tsx +++ /dev/null @@ -1,220 +0,0 @@ -'use client' - -/** - * ValidationReport - Strukturierte Anzeige von Validierungsergebnissen - * - * Errors (Scope-Violations) in Rot - * Warnings (Inkonsistenzen) in Amber - * Suggestions in Blau - */ - -import { DOCUMENT_TYPE_LABELS } from '@/lib/sdk/compliance-scope-types' -import type { ValidationResult, ValidationFinding } from '@/lib/sdk/drafting-engine/types' - -interface ValidationReportProps { - result: ValidationResult - onClose: () => void - /** Compact mode for inline display in widget */ - compact?: boolean -} - -const SEVERITY_CONFIG = { - error: { - bg: 'bg-red-50', - border: 'border-red-200', - text: 'text-red-700', - icon: 'M10 14l2-2m0 0l2-2m-2 2l-2-2m2 2l2 2m7-2a9 9 0 11-18 0 9 9 0 0118 0z', - label: 'Fehler', - dotColor: 'bg-red-500', - }, - warning: { - bg: 'bg-amber-50', - border: 'border-amber-200', - text: 'text-amber-700', - icon: 'M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-2.5L13.732 4c-.77-.833-1.964-.833-2.732 0L3.732 16.5c-.77.833.192 2.5 1.732 2.5z', - label: 'Warnungen', - dotColor: 'bg-amber-500', - }, - suggestion: { - bg: 'bg-blue-50', - border: 'border-blue-200', - text: 'text-blue-700', - icon: 'M13 16h-1v-4h-1m1-4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z', - label: 'Vorschlaege', - dotColor: 'bg-blue-500', - }, -} - -function FindingCard({ finding, compact }: { finding: ValidationFinding; compact?: boolean }) { - const config = SEVERITY_CONFIG[finding.severity] - const docLabel = DOCUMENT_TYPE_LABELS[finding.documentType]?.split(' (')[0] || finding.documentType - - if (compact) { - return ( -
- -
-

{finding.title}

-

{finding.description}

-
-
- ) - } - - return ( -
-
- - - -
-
-

{finding.title}

- {docLabel} -
-

{finding.description}

- - {finding.crossReferenceType && ( -

- Cross-Referenz: {DOCUMENT_TYPE_LABELS[finding.crossReferenceType]?.split(' (')[0] || finding.crossReferenceType} -

- )} - - {finding.legalReference && ( -

{finding.legalReference}

- )} - - {finding.suggestion && ( -
- - - -

{finding.suggestion}

-
- )} -
-
-
- ) -} - -export function ValidationReport({ result, onClose, compact }: ValidationReportProps) { - const totalFindings = result.errors.length + result.warnings.length + result.suggestions.length - - if (compact) { - return ( -
-
-
- - - {result.passed ? 'Validierung bestanden' : 'Validierung fehlgeschlagen'} - - - ({totalFindings} {totalFindings === 1 ? 'Fund' : 'Funde'}) - -
- -
-
- {result.errors.map((f) => )} - {result.warnings.map((f) => )} - {result.suggestions.map((f) => )} -
-
- ) - } - - return ( -
- {/* Summary Header */} -
-
-
-
- - - -
-
-

- {result.passed ? 'Validierung bestanden' : 'Validierung fehlgeschlagen'} -

-

- Level {result.scopeLevel} | {new Date(result.timestamp).toLocaleString('de-DE')} -

-
-
- - {/* Stats */} -
- {result.errors.length > 0 && ( -
- - {result.errors.length} -
- )} - {result.warnings.length > 0 && ( -
- - {result.warnings.length} -
- )} - {result.suggestions.length > 0 && ( -
- - {result.suggestions.length} -
- )} - - -
-
-
- - {/* Errors */} - {result.errors.length > 0 && ( -
-

- Fehler ({result.errors.length}) -

-
- {result.errors.map((f) => )} -
-
- )} - - {/* Warnings */} - {result.warnings.length > 0 && ( -
-

- Warnungen ({result.warnings.length}) -

-
- {result.warnings.map((f) => )} -
-
- )} - - {/* Suggestions */} - {result.suggestions.length > 0 && ( -
-

- Vorschlaege ({result.suggestions.length}) -

-
- {result.suggestions.map((f) => )} -
-
- )} -
- ) -} diff --git a/admin-compliance/components/sdk/__tests__/StepHeader.test.tsx b/admin-compliance/components/sdk/__tests__/StepHeader.test.tsx index 87370a6..cba54f1 100644 --- a/admin-compliance/components/sdk/__tests__/StepHeader.test.tsx +++ b/admin-compliance/components/sdk/__tests__/StepHeader.test.tsx @@ -1,12 +1,14 @@ import { describe, it, expect } from 'vitest' import { STEP_EXPLANATIONS } from '../StepHeader' +type StepExplanationKey = keyof typeof STEP_EXPLANATIONS + // Focus on testing the STEP_EXPLANATIONS data structure // Component tests require more complex SDK context mocking describe('STEP_EXPLANATIONS', () => { it('should have explanations for all Phase 1 steps', () => { - const phase1Steps = [ + const phase1Steps: StepExplanationKey[] = [ 'use-case-workshop', 'screening', 'modules', @@ -29,7 +31,7 @@ describe('STEP_EXPLANATIONS', () => { }) it('should have explanations for all Phase 2 steps', () => { - const phase2Steps = [ + const phase2Steps: StepExplanationKey[] = [ 'ai-act', 'obligations', 'dsfa', @@ -93,8 +95,8 @@ describe('STEP_EXPLANATIONS', () => { expect(dsfa.explanation.length).toBeGreaterThan(50) }) - it('should cover all 19 SDK steps', () => { - const allStepIds = [ + it('should cover all core SDK steps', () => { + const coreStepIds: StepExplanationKey[] = [ // Phase 1 'use-case-workshop', 'screening', @@ -118,10 +120,11 @@ describe('STEP_EXPLANATIONS', () => { 'escalations', ] - expect(Object.keys(STEP_EXPLANATIONS).length).toBe(allStepIds.length) - - allStepIds.forEach(stepId => { + coreStepIds.forEach(stepId => { expect(STEP_EXPLANATIONS[stepId]).toBeDefined() }) + + // Ensure we have at least the core steps plus additional module explanations + expect(Object.keys(STEP_EXPLANATIONS).length).toBeGreaterThanOrEqual(coreStepIds.length) }) }) diff --git a/admin-compliance/lib/sdk/compliance-scope-golden-tests.ts b/admin-compliance/lib/sdk/compliance-scope-golden-tests.ts deleted file mode 100644 index 6a31b7c..0000000 --- a/admin-compliance/lib/sdk/compliance-scope-golden-tests.ts +++ /dev/null @@ -1,722 +0,0 @@ -import type { ScopeProfilingAnswer, ComplianceDepthLevel, ScopeDocumentType } from './compliance-scope-types' - -export interface GoldenTest { - id: string - name: string - description: string - answers: ScopeProfilingAnswer[] - expectedLevel: ComplianceDepthLevel | null // null for prefill tests - expectedMinDocuments?: ScopeDocumentType[] - expectedHardTriggerIds?: string[] - expectedDsfaRequired?: boolean - tags: string[] -} - -export const GOLDEN_TESTS: GoldenTest[] = [ - // GT-01: 2-Person Freelancer, nur B2B, DE-Hosting → L1 - { - id: 'GT-01', - name: '2-Person Freelancer B2B', - description: 'Kleinstes Setup ohne besondere Risiken', - answers: [ - { questionId: 'org_employee_count', value: '2' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'consulting' }, - { questionId: 'data_health', value: false }, - { questionId: 'data_genetic', value: false }, - { questionId: 'data_biometric', value: false }, - { questionId: 'data_racial_ethnic', value: false }, - { questionId: 'data_political_opinion', value: false }, - { questionId: 'data_religious', value: false }, - { questionId: 'data_union_membership', value: false }, - { questionId: 'data_sexual_orientation', value: false }, - { questionId: 'data_criminal', value: false }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - { questionId: 'process_has_dsfa', value: true }, - { questionId: 'process_has_incident_plan', value: true }, - { questionId: 'data_volume', value: '<1000' }, - { questionId: 'org_customer_count', value: '<100' }, - ], - expectedLevel: 'L1', - expectedMinDocuments: ['VVT', 'TOM', 'COOKIE_BANNER'], - expectedHardTriggerIds: [], - expectedDsfaRequired: false, - tags: ['baseline', 'freelancer', 'b2b'], - }, - - // GT-02: Solo IT-Berater → L1 - { - id: 'GT-02', - name: 'Solo IT-Berater', - description: 'Einzelperson, minimale Datenverarbeitung', - answers: [ - { questionId: 'org_employee_count', value: '1' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'it_services' }, - { questionId: 'data_health', value: false }, - { questionId: 'data_genetic', value: false }, - { questionId: 'data_biometric', value: false }, - { questionId: 'data_volume', value: '<1000' }, - { questionId: 'org_customer_count', value: '<50' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L1', - expectedHardTriggerIds: [], - tags: ['baseline', 'solo', 'minimal'], - }, - - // GT-03: 5-Person Agentur, Website, kein Tracking → L1 - { - id: 'GT-03', - name: '5-Person Agentur ohne Tracking', - description: 'Kleine Agentur, einfache Website ohne Analytics', - answers: [ - { questionId: 'org_employee_count', value: '5' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'marketing' }, - { questionId: 'tech_has_website', value: true }, - { questionId: 'tech_has_tracking', value: false }, - { questionId: 'data_volume', value: '1000-10000' }, - { questionId: 'org_customer_count', value: '100-1000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L1', - expectedMinDocuments: ['VVT', 'TOM', 'COOKIE_BANNER'], - tags: ['baseline', 'agency', 'simple'], - }, - - // GT-04: 30-Person SaaS B2B, EU-Cloud → L2 (scale trigger) - { - id: 'GT-04', - name: '30-Person SaaS B2B', - description: 'Scale-Trigger durch Mitarbeiterzahl', - answers: [ - { questionId: 'org_employee_count', value: '30' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'software' }, - { questionId: 'tech_has_cloud', value: true }, - { questionId: 'data_volume', value: '10000-100000' }, - { questionId: 'org_customer_count', value: '1000-10000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - { questionId: 'process_has_dsfa', value: false }, - ], - expectedLevel: 'L2', - expectedMinDocuments: ['VVT', 'TOM', 'AVV', 'COOKIE_BANNER'], - tags: ['scale', 'saas', 'growth'], - }, - - // GT-05: 50-Person Handel B2C, Webshop → L2 (B2C+Webshop) - { - id: 'GT-05', - name: '50-Person E-Commerce B2C', - description: 'B2C mit Webshop erhöht Anforderungen', - answers: [ - { questionId: 'org_employee_count', value: '50' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'retail' }, - { questionId: 'tech_has_webshop', value: true }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'org_customer_count', value: '10000-100000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L2', - expectedHardTriggerIds: ['HT-H01'], - expectedMinDocuments: ['VVT', 'TOM', 'AVV', 'COOKIE_BANNER', 'EINWILLIGUNG'], - tags: ['b2c', 'webshop', 'retail'], - }, - - // GT-06: 80-Person Dienstleister, Cloud → L2 (scale) - { - id: 'GT-06', - name: '80-Person Dienstleister', - description: 'Größerer Betrieb mit Cloud-Services', - answers: [ - { questionId: 'org_employee_count', value: '80' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'professional_services' }, - { questionId: 'tech_has_cloud', value: true }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'org_customer_count', value: '1000-10000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L2', - expectedMinDocuments: ['VVT', 'TOM', 'AVV'], - tags: ['scale', 'services'], - }, - - // GT-07: 20-Person Startup mit GA4 Tracking → L2 (tracking) - { - id: 'GT-07', - name: 'Startup mit Google Analytics', - description: 'Tracking-Tools erhöhen Compliance-Anforderungen', - answers: [ - { questionId: 'org_employee_count', value: '20' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'technology' }, - { questionId: 'tech_has_website', value: true }, - { questionId: 'tech_has_tracking', value: true }, - { questionId: 'tech_tracking_tools', value: 'google_analytics' }, - { questionId: 'data_volume', value: '10000-100000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L2', - expectedMinDocuments: ['VVT', 'TOM', 'COOKIE_BANNER', 'EINWILLIGUNG'], - tags: ['tracking', 'analytics', 'startup'], - }, - - // GT-08: Kita-App (Minderjaehrige) → L3 (HT-B01) - { - id: 'GT-08', - name: 'Kita-App für Eltern', - description: 'Datenverarbeitung von Minderjährigen unter 16', - answers: [ - { questionId: 'org_employee_count', value: '15' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'education' }, - { questionId: 'data_subjects_minors', value: true }, - { questionId: 'data_subjects_minors_age', value: '<16' }, - { questionId: 'data_volume', value: '1000-10000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-B01'], - expectedDsfaRequired: true, - expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'EINWILLIGUNG', 'AVV'], - tags: ['hard-trigger', 'minors', 'education'], - }, - - // GT-09: Krankenhaus-Software → L3 (HT-A01) - { - id: 'GT-09', - name: 'Krankenhaus-Verwaltungssoftware', - description: 'Gesundheitsdaten Art. 9 DSGVO', - answers: [ - { questionId: 'org_employee_count', value: '200' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'healthcare' }, - { questionId: 'data_health', value: true }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'org_customer_count', value: '10-50' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-A01'], - expectedDsfaRequired: true, - expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'AVV'], - tags: ['hard-trigger', 'health', 'art9'], - }, - - // GT-10: HR-Scoring-Plattform → L3 (HT-C01) - { - id: 'GT-10', - name: 'HR-Scoring für Bewerbungen', - description: 'Automatisierte Entscheidungen im HR-Bereich', - answers: [ - { questionId: 'org_employee_count', value: '40' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'hr_tech' }, - { questionId: 'tech_has_adm', value: true }, - { questionId: 'tech_adm_type', value: 'profiling' }, - { questionId: 'tech_adm_impact', value: 'employment' }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-C01'], - expectedDsfaRequired: true, - expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'AVV'], - tags: ['hard-trigger', 'adm', 'profiling'], - }, - - // GT-11: Fintech Kreditscoring → L3 (HT-H05 + C01) - { - id: 'GT-11', - name: 'Fintech Kreditscoring', - description: 'Finanzsektor mit automatisierten Entscheidungen', - answers: [ - { questionId: 'org_employee_count', value: '120' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'finance' }, - { questionId: 'tech_has_adm', value: true }, - { questionId: 'tech_adm_type', value: 'scoring' }, - { questionId: 'tech_adm_impact', value: 'credit' }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-H05', 'HT-C01'], - expectedDsfaRequired: true, - expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'AVV'], - tags: ['hard-trigger', 'finance', 'scoring'], - }, - - // GT-12: Bildungsplattform Minderjaehrige → L3 (HT-B01) - { - id: 'GT-12', - name: 'Online-Lernplattform für Schüler', - description: 'Bildungssektor mit minderjährigen Nutzern', - answers: [ - { questionId: 'org_employee_count', value: '35' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'education' }, - { questionId: 'data_subjects_minors', value: true }, - { questionId: 'data_subjects_minors_age', value: '<16' }, - { questionId: 'tech_has_tracking', value: true }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-B01'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'education', 'minors'], - }, - - // GT-13: Datenbroker → L3 (HT-H02) - { - id: 'GT-13', - name: 'Datenbroker / Adresshandel', - description: 'Geschäftsmodell basiert auf Datenhandel', - answers: [ - { questionId: 'org_employee_count', value: '25' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'data_broker' }, - { questionId: 'data_is_core_business', value: true }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'org_customer_count', value: '100-1000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-H02'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'data-broker'], - }, - - // GT-14: Video + ADM → L3 (HT-D05) - { - id: 'GT-14', - name: 'Videoüberwachung mit Gesichtserkennung', - description: 'Biometrische Daten mit automatisierter Verarbeitung', - answers: [ - { questionId: 'org_employee_count', value: '60' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'security' }, - { questionId: 'data_biometric', value: true }, - { questionId: 'tech_has_video_surveillance', value: true }, - { questionId: 'tech_has_adm', value: true }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-D05'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'biometric', 'video'], - }, - - // GT-15: 500-MA Konzern ohne Zert → L3 (HT-G04) - { - id: 'GT-15', - name: 'Großunternehmen ohne Zertifizierung', - description: 'Scale-Trigger durch Unternehmensgröße', - answers: [ - { questionId: 'org_employee_count', value: '500' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'manufacturing' }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'org_customer_count', value: '>100000' }, - { questionId: 'cert_has_iso27001', value: false }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-G04'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'scale', 'enterprise'], - }, - - // GT-16: ISO 27001 Anbieter → L4 (HT-F01) - { - id: 'GT-16', - name: 'ISO 27001 zertifizierter Cloud-Provider', - description: 'Zertifizierung erfordert höchste Compliance', - answers: [ - { questionId: 'org_employee_count', value: '150' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'cloud_services' }, - { questionId: 'cert_has_iso27001', value: true }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - { questionId: 'process_has_dsfa', value: true }, - ], - expectedLevel: 'L4', - expectedHardTriggerIds: ['HT-F01'], - expectedMinDocuments: ['VVT', 'TOM', 'DSFA', 'AVV', 'CERT_ISO27001'], - tags: ['hard-trigger', 'certification', 'iso'], - }, - - // GT-17: TISAX Automobilzulieferer → L4 (HT-F04) - { - id: 'GT-17', - name: 'TISAX-zertifizierter Automobilzulieferer', - description: 'Automotive-Branche mit TISAX-Anforderungen', - answers: [ - { questionId: 'org_employee_count', value: '300' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'automotive' }, - { questionId: 'cert_has_tisax', value: true }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'org_customer_count', value: '10-50' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L4', - expectedHardTriggerIds: ['HT-F04'], - tags: ['hard-trigger', 'certification', 'tisax'], - }, - - // GT-18: ISO 27701 Cloud-Provider → L4 (HT-F02) - { - id: 'GT-18', - name: 'ISO 27701 Privacy-zertifiziert', - description: 'Privacy-spezifische Zertifizierung', - answers: [ - { questionId: 'org_employee_count', value: '200' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'cloud_services' }, - { questionId: 'cert_has_iso27701', value: true }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - { questionId: 'process_has_dsfa', value: true }, - ], - expectedLevel: 'L4', - expectedHardTriggerIds: ['HT-F02'], - tags: ['hard-trigger', 'certification', 'privacy'], - }, - - // GT-19: Grosskonzern + Art.9 + >1M DS → L4 (HT-G05) - { - id: 'GT-19', - name: 'Konzern mit sensiblen Massendaten', - description: 'Kombination aus Scale und Art. 9 Daten', - answers: [ - { questionId: 'org_employee_count', value: '2000' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'insurance' }, - { questionId: 'data_health', value: true }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'org_customer_count', value: '>100000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - ], - expectedLevel: 'L4', - expectedHardTriggerIds: ['HT-G05'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'scale', 'art9'], - }, - - // GT-20: Nur B2C Webshop → L2 (HT-H01) - { - id: 'GT-20', - name: 'Reiner B2C Webshop', - description: 'B2C-Trigger ohne weitere Risiken', - answers: [ - { questionId: 'org_employee_count', value: '12' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'retail' }, - { questionId: 'tech_has_webshop', value: true }, - { questionId: 'data_volume', value: '10000-100000' }, - { questionId: 'org_customer_count', value: '1000-10000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L2', - expectedHardTriggerIds: ['HT-H01'], - tags: ['b2c', 'webshop'], - }, - - // GT-21: Keine Daten, keine MA → L1 - { - id: 'GT-21', - name: 'Minimale Datenverarbeitung', - description: 'Absolute Baseline ohne Risiken', - answers: [ - { questionId: 'org_employee_count', value: '1' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'consulting' }, - { questionId: 'data_volume', value: '<1000' }, - { questionId: 'org_customer_count', value: '<50' }, - { questionId: 'tech_has_website', value: false }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L1', - expectedHardTriggerIds: [], - tags: ['baseline', 'minimal'], - }, - - // GT-22: Alle Art.9 Kategorien → L3 (HT-A09) - { - id: 'GT-22', - name: 'Alle Art. 9 Kategorien', - description: 'Multiple sensible Datenkategorien', - answers: [ - { questionId: 'org_employee_count', value: '50' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'research' }, - { questionId: 'data_health', value: true }, - { questionId: 'data_genetic', value: true }, - { questionId: 'data_biometric', value: true }, - { questionId: 'data_racial_ethnic', value: true }, - { questionId: 'data_political_opinion', value: true }, - { questionId: 'data_religious', value: true }, - { questionId: 'data_union_membership', value: true }, - { questionId: 'data_sexual_orientation', value: true }, - { questionId: 'data_criminal', value: true }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-A09'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'art9', 'multiple-categories'], - }, - - // GT-23: Drittland + Art.9 → L3 (HT-E04) - { - id: 'GT-23', - name: 'Drittlandtransfer mit Art. 9 Daten', - description: 'Kombination aus Drittland und sensiblen Daten', - answers: [ - { questionId: 'org_employee_count', value: '45' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'us' }, - { questionId: 'org_industry', value: 'healthcare' }, - { questionId: 'data_health', value: true }, - { questionId: 'tech_has_third_country_transfer', value: true }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-E04'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'third-country', 'art9'], - }, - - // GT-24: Minderjaehrige + Art.9 → L4 (HT-B02) - { - id: 'GT-24', - name: 'Minderjährige mit Gesundheitsdaten', - description: 'Kombination aus vulnerabler Gruppe und Art. 9', - answers: [ - { questionId: 'org_employee_count', value: '30' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'healthcare' }, - { questionId: 'data_subjects_minors', value: true }, - { questionId: 'data_subjects_minors_age', value: '<16' }, - { questionId: 'data_health', value: true }, - { questionId: 'data_volume', value: '10000-100000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L4', - expectedHardTriggerIds: ['HT-B02'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'minors', 'health', 'combined-risk'], - }, - - // GT-25: KI autonome Entscheidungen → L3 (HT-C02) - { - id: 'GT-25', - name: 'KI mit autonomen Entscheidungen', - description: 'AI Act relevante autonome Systeme', - answers: [ - { questionId: 'org_employee_count', value: '70' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'ai_services' }, - { questionId: 'tech_has_adm', value: true }, - { questionId: 'tech_adm_type', value: 'autonomous_decision' }, - { questionId: 'tech_has_ai', value: true }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-C02'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'ai', 'adm'], - }, - - // GT-26: Multiple Zertifizierungen → L4 (HT-F01-05) - { - id: 'GT-26', - name: 'Multiple Zertifizierungen', - description: 'Mehrere Zertifizierungen kombiniert', - answers: [ - { questionId: 'org_employee_count', value: '250' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'cloud_services' }, - { questionId: 'cert_has_iso27001', value: true }, - { questionId: 'cert_has_iso27701', value: true }, - { questionId: 'cert_has_soc2', value: true }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - { questionId: 'process_has_dsfa', value: true }, - ], - expectedLevel: 'L4', - expectedHardTriggerIds: ['HT-F01', 'HT-F02', 'HT-F03'], - tags: ['hard-trigger', 'certification', 'multiple'], - }, - - // GT-27: Oeffentlicher Sektor + Gesundheit → L3 (HT-H07 + A01) - { - id: 'GT-27', - name: 'Öffentlicher Sektor mit Gesundheitsdaten', - description: 'Behörde mit Art. 9 Datenverarbeitung', - answers: [ - { questionId: 'org_employee_count', value: '120' }, - { questionId: 'org_business_model', value: 'b2g' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'public_sector' }, - { questionId: 'org_is_public_sector', value: true }, - { questionId: 'data_health', value: true }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-H07', 'HT-A01'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'public-sector', 'health'], - }, - - // GT-28: Bildung + KI + Minderjaehrige → L4 (HT-B03) - { - id: 'GT-28', - name: 'EdTech mit KI für Minderjährige', - description: 'Triple-Risiko: Bildung, KI, vulnerable Gruppe', - answers: [ - { questionId: 'org_employee_count', value: '55' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'education' }, - { questionId: 'data_subjects_minors', value: true }, - { questionId: 'data_subjects_minors_age', value: '<16' }, - { questionId: 'tech_has_ai', value: true }, - { questionId: 'tech_has_adm', value: true }, - { questionId: 'data_volume', value: '100000-1000000' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L4', - expectedHardTriggerIds: ['HT-B03'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'education', 'ai', 'minors', 'triple-risk'], - }, - - // GT-29: Freelancer mit 1 Art.9 → L3 (hard trigger override despite low score) - { - id: 'GT-29', - name: 'Freelancer mit Gesundheitsdaten', - description: 'Hard Trigger überschreibt niedrige Score-Bewertung', - answers: [ - { questionId: 'org_employee_count', value: '1' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'de' }, - { questionId: 'org_industry', value: 'healthcare' }, - { questionId: 'data_health', value: true }, - { questionId: 'data_volume', value: '<1000' }, - { questionId: 'org_customer_count', value: '<50' }, - { questionId: 'process_has_vvt', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-A01'], - expectedDsfaRequired: true, - tags: ['hard-trigger', 'override', 'art9', 'freelancer'], - }, - - // GT-30: Enterprise, alle Prozesse vorhanden → L3 (good process maturity) - { - id: 'GT-30', - name: 'Enterprise mit reifer Prozesslandschaft', - description: 'Große Organisation mit allen Compliance-Prozessen', - answers: [ - { questionId: 'org_employee_count', value: '450' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - { questionId: 'org_industry', value: 'manufacturing' }, - { questionId: 'data_volume', value: '>1000000' }, - { questionId: 'org_customer_count', value: '10000-100000' }, - { questionId: 'process_has_vvt', value: true }, - { questionId: 'process_has_tom', value: true }, - { questionId: 'process_has_dsfa', value: true }, - { questionId: 'process_has_incident_plan', value: true }, - { questionId: 'process_has_dsb', value: true }, - { questionId: 'process_has_training', value: true }, - ], - expectedLevel: 'L3', - expectedHardTriggerIds: ['HT-G04'], - tags: ['enterprise', 'mature', 'all-processes'], - }, - - // GT-31: SMB, nur 1 Block beantwortet → L1 (graceful degradation) - { - id: 'GT-31', - name: 'Unvollständige Profilerstellung', - description: 'Test für graceful degradation bei unvollständigen Antworten', - answers: [ - { questionId: 'org_employee_count', value: '8' }, - { questionId: 'org_business_model', value: 'b2b' }, - { questionId: 'org_industry', value: 'consulting' }, - // Nur Block 1 (Organization) beantwortet, Rest fehlt - ], - expectedLevel: 'L1', - expectedHardTriggerIds: [], - tags: ['incomplete', 'degradation', 'edge-case'], - }, - - // GT-32: CompanyProfile Prefill Konsistenz → null (prefill test, no expected level) - { - id: 'GT-32', - name: 'CompanyProfile Prefill Test', - description: 'Prüft ob CompanyProfile-Daten korrekt in ScopeProfile übernommen werden', - answers: [ - { questionId: 'org_employee_count', value: '25' }, - { questionId: 'org_business_model', value: 'b2c' }, - { questionId: 'org_industry', value: 'retail' }, - { questionId: 'tech_hosting_location', value: 'eu' }, - // Diese Werte sollten mit CompanyProfile-Prefill übereinstimmen - ], - expectedLevel: null, - tags: ['prefill', 'integration', 'consistency'], - }, -] diff --git a/admin-compliance/lib/sdk/drafting-engine/__tests__/intent-classifier.test.ts b/admin-compliance/lib/sdk/drafting-engine/__tests__/intent-classifier.test.ts deleted file mode 100644 index 521ea1c..0000000 --- a/admin-compliance/lib/sdk/drafting-engine/__tests__/intent-classifier.test.ts +++ /dev/null @@ -1,153 +0,0 @@ -import { IntentClassifier } from '../intent-classifier' - -describe('IntentClassifier', () => { - const classifier = new IntentClassifier() - - describe('classify - Draft mode', () => { - it.each([ - ['Erstelle ein VVT fuer unseren Hauptprozess', 'draft'], - ['Generiere eine TOM-Dokumentation', 'draft'], - ['Schreibe eine Datenschutzerklaerung', 'draft'], - ['Verfasse einen Entwurf fuer das Loeschkonzept', 'draft'], - ['Create a DSFA document', 'draft'], - ['Draft a privacy policy for us', 'draft'], - ['Neues VVT anlegen', 'draft'], - ])('"%s" should classify as %s', (input, expectedMode) => { - const result = classifier.classify(input) - expect(result.mode).toBe(expectedMode) - expect(result.confidence).toBeGreaterThan(0.7) - }) - }) - - describe('classify - Validate mode', () => { - it.each([ - ['Pruefe die Konsistenz meiner Dokumente', 'validate'], - ['Ist mein VVT korrekt?', 'validate'], - ['Validiere die TOM gegen das VVT', 'validate'], - ['Check die Vollstaendigkeit', 'validate'], - ['Stimmt das mit der DSFA ueberein?', 'validate'], - ['Cross-Check VVT und TOM', 'validate'], - ])('"%s" should classify as %s', (input, expectedMode) => { - const result = classifier.classify(input) - expect(result.mode).toBe(expectedMode) - expect(result.confidence).toBeGreaterThan(0.7) - }) - }) - - describe('classify - Ask mode', () => { - it.each([ - ['Was fehlt noch in meinem Profil?', 'ask'], - ['Zeige mir die Luecken', 'ask'], - ['Welche Dokumente fehlen noch?', 'ask'], - ['Was ist der naechste Schritt?', 'ask'], - ['Welche Informationen brauche ich noch?', 'ask'], - ])('"%s" should classify as %s', (input, expectedMode) => { - const result = classifier.classify(input) - expect(result.mode).toBe(expectedMode) - expect(result.confidence).toBeGreaterThan(0.6) - }) - }) - - describe('classify - Explain mode (fallback)', () => { - it.each([ - ['Was ist DSGVO?', 'explain'], - ['Erklaere mir Art. 30', 'explain'], - ['Hallo', 'explain'], - ['Danke fuer die Hilfe', 'explain'], - ])('"%s" should classify as %s (fallback)', (input, expectedMode) => { - const result = classifier.classify(input) - expect(result.mode).toBe(expectedMode) - }) - }) - - describe('classify - confidence thresholds', () => { - it('should have high confidence for clear draft intents', () => { - const result = classifier.classify('Erstelle ein neues VVT') - expect(result.confidence).toBeGreaterThanOrEqual(0.85) - }) - - it('should have lower confidence for ambiguous inputs', () => { - const result = classifier.classify('Hallo') - expect(result.confidence).toBeLessThan(0.6) - }) - - it('should boost confidence with document type detection', () => { - const withDoc = classifier.classify('Erstelle VVT') - const withoutDoc = classifier.classify('Erstelle etwas') - expect(withDoc.confidence).toBeGreaterThanOrEqual(withoutDoc.confidence) - }) - - it('should boost confidence with multiple pattern matches', () => { - const single = classifier.classify('Erstelle Dokument') - const multi = classifier.classify('Erstelle und generiere ein neues Dokument') - expect(multi.confidence).toBeGreaterThanOrEqual(single.confidence) - }) - }) - - describe('detectDocumentType', () => { - it.each([ - ['VVT erstellen', 'vvt'], - ['Verarbeitungsverzeichnis', 'vvt'], - ['Art. 30 Dokumentation', 'vvt'], - ['TOM definieren', 'tom'], - ['technisch organisatorische Massnahmen', 'tom'], - ['Art. 32 Massnahmen', 'tom'], - ['DSFA durchfuehren', 'dsfa'], - ['Datenschutz-Folgenabschaetzung', 'dsfa'], - ['Art. 35 Pruefung', 'dsfa'], - ['DPIA erstellen', 'dsfa'], - ['Datenschutzerklaerung', 'dsi'], - ['Privacy Policy', 'dsi'], - ['Art. 13 Information', 'dsi'], - ['Loeschfristen definieren', 'lf'], - ['Loeschkonzept erstellen', 'lf'], - ['Retention Policy', 'lf'], - ['Auftragsverarbeitung', 'av_vertrag'], - ['AVV erstellen', 'av_vertrag'], - ['Art. 28 Vertrag', 'av_vertrag'], - ['Einwilligung einholen', 'einwilligung'], - ['Consent Management', 'einwilligung'], - ['Cookie Banner', 'einwilligung'], - ])('"%s" should detect document type %s', (input, expectedType) => { - const result = classifier.detectDocumentType(input) - expect(result).toBe(expectedType) - }) - - it('should return undefined for unrecognized types', () => { - expect(classifier.detectDocumentType('Hallo Welt')).toBeUndefined() - expect(classifier.detectDocumentType('Was kostet das?')).toBeUndefined() - }) - }) - - describe('classify - Umlaut handling', () => { - it('should handle German umlauts correctly', () => { - // With actual umlauts (ä, ö, ü) - const result1 = classifier.classify('Prüfe die Vollständigkeit') - expect(result1.mode).toBe('validate') - - // With ae/oe/ue substitution - const result2 = classifier.classify('Pruefe die Vollstaendigkeit') - expect(result2.mode).toBe('validate') - }) - - it('should handle ß correctly', () => { - const result = classifier.classify('Schließe Lücken') - // Should still detect via normalized patterns - expect(result).toBeDefined() - }) - }) - - describe('classify - combined mode + document type', () => { - it('should detect both mode and document type', () => { - const result = classifier.classify('Erstelle ein VVT fuer unsere Firma') - expect(result.mode).toBe('draft') - expect(result.detectedDocumentType).toBe('vvt') - }) - - it('should detect validate + document type', () => { - const result = classifier.classify('Pruefe mein TOM auf Konsistenz') - expect(result.mode).toBe('validate') - expect(result.detectedDocumentType).toBe('tom') - }) - }) -}) diff --git a/admin-compliance/lib/sdk/drafting-engine/__tests__/state-projector.test.ts b/admin-compliance/lib/sdk/drafting-engine/__tests__/state-projector.test.ts deleted file mode 100644 index 6f3d233..0000000 --- a/admin-compliance/lib/sdk/drafting-engine/__tests__/state-projector.test.ts +++ /dev/null @@ -1,312 +0,0 @@ -import { StateProjector } from '../state-projector' -import type { SDKState } from '../../types' - -describe('StateProjector', () => { - const projector = new StateProjector() - - // Helper: minimal SDKState - function makeState(overrides: Partial = {}): SDKState { - return { - version: '1.0.0', - lastModified: new Date(), - tenantId: 'test', - userId: 'user1', - subscription: 'PROFESSIONAL', - customerType: null, - companyProfile: null, - complianceScope: null, - sourcePolicy: null, - currentPhase: 1, - currentStep: 'company-profile', - completedSteps: [], - checkpoints: {}, - importedDocuments: [], - gapAnalysis: null, - useCases: [], - activeUseCase: null, - screening: null, - modules: [], - requirements: [], - controls: [], - evidence: [], - checklist: [], - risks: [], - aiActClassification: null, - obligations: [], - dsfa: null, - toms: [], - retentionPolicies: [], - vvt: [], - documents: [], - cookieBanner: null, - consents: [], - dsrConfig: null, - escalationWorkflows: [], - preferences: { - language: 'de', - theme: 'light', - compactMode: false, - showHints: true, - autoSave: true, - autoValidate: true, - allowParallelWork: true, - }, - ...overrides, - } as SDKState - } - - function makeDecisionState(level: string = 'L2'): SDKState { - return makeState({ - companyProfile: { - companyName: 'Test GmbH', - industry: 'IT-Dienstleistung', - employeeCount: 50, - businessModel: 'SaaS', - isPublicSector: false, - } as any, - complianceScope: { - decision: { - id: 'dec-1', - determinedLevel: level, - scores: { risk_score: 60, complexity_score: 50, assurance_need: 55, composite_score: 55 }, - triggeredHardTriggers: [], - requiredDocuments: [ - { documentType: 'vvt', label: 'VVT', required: true, depth: 'Standard', detailItems: ['Bezeichnung', 'Zweck'], estimatedEffort: '2h', triggeredBy: [] }, - { documentType: 'tom', label: 'TOM', required: true, depth: 'Standard', detailItems: ['Verschluesselung'], estimatedEffort: '3h', triggeredBy: [] }, - { documentType: 'lf', label: 'LF', required: true, depth: 'Basis', detailItems: [], estimatedEffort: '1h', triggeredBy: [] }, - ], - riskFlags: [ - { id: 'rf-1', severity: 'MEDIUM', title: 'Cloud-Nutzung', description: '', recommendation: 'AVV pruefen' }, - ], - gaps: [ - { id: 'gap-1', severity: 'high', title: 'TOM fehlt', description: 'Keine TOM definiert', relatedDocuments: ['tom'] }, - ], - nextActions: [], - reasoning: [], - createdAt: new Date().toISOString(), - updatedAt: new Date().toISOString(), - }, - answers: [], - } as any, - vvt: [{ id: 'vvt-1', name: 'Kundenverwaltung' }] as any[], - toms: [], - retentionPolicies: [], - }) - } - - describe('projectForDraft', () => { - it('should return a DraftContext with correct structure', () => { - const state = makeDecisionState() - const result = projector.projectForDraft(state, 'vvt') - - expect(result).toHaveProperty('decisions') - expect(result).toHaveProperty('companyProfile') - expect(result).toHaveProperty('constraints') - expect(result.decisions.level).toBe('L2') - }) - - it('should project company profile', () => { - const state = makeDecisionState() - const result = projector.projectForDraft(state, 'vvt') - - expect(result.companyProfile.name).toBe('Test GmbH') - expect(result.companyProfile.industry).toBe('IT-Dienstleistung') - expect(result.companyProfile.employeeCount).toBe(50) - }) - - it('should provide defaults when no company profile', () => { - const state = makeState() - const result = projector.projectForDraft(state, 'vvt') - - expect(result.companyProfile.name).toBe('Unbekannt') - expect(result.companyProfile.industry).toBe('Unbekannt') - expect(result.companyProfile.employeeCount).toBe(0) - }) - - it('should extract constraints and depth requirements', () => { - const state = makeDecisionState() - const result = projector.projectForDraft(state, 'vvt') - - expect(result.constraints.depthRequirements).toBeDefined() - expect(result.constraints.boundaries.length).toBeGreaterThan(0) - }) - - it('should extract risk flags', () => { - const state = makeDecisionState() - const result = projector.projectForDraft(state, 'vvt') - - expect(result.constraints.riskFlags.length).toBe(1) - expect(result.constraints.riskFlags[0].title).toBe('Cloud-Nutzung') - }) - - it('should include existing document data when available', () => { - const state = makeDecisionState() - const result = projector.projectForDraft(state, 'vvt') - - expect(result.existingDocumentData).toBeDefined() - expect((result.existingDocumentData as any).totalCount).toBe(1) - }) - - it('should return undefined existingDocumentData when none exists', () => { - const state = makeDecisionState() - const result = projector.projectForDraft(state, 'tom') - - expect(result.existingDocumentData).toBeUndefined() - }) - - it('should filter required documents', () => { - const state = makeDecisionState() - const result = projector.projectForDraft(state, 'vvt') - - expect(result.decisions.requiredDocuments.length).toBe(3) - expect(result.decisions.requiredDocuments.every(d => d.documentType)).toBe(true) - }) - - it('should handle empty state gracefully', () => { - const state = makeState() - const result = projector.projectForDraft(state, 'vvt') - - expect(result.decisions.level).toBe('L1') - expect(result.decisions.hardTriggers).toEqual([]) - expect(result.decisions.requiredDocuments).toEqual([]) - }) - }) - - describe('projectForAsk', () => { - it('should return a GapContext with correct structure', () => { - const state = makeDecisionState() - const result = projector.projectForAsk(state) - - expect(result).toHaveProperty('unansweredQuestions') - expect(result).toHaveProperty('gaps') - expect(result).toHaveProperty('missingDocuments') - }) - - it('should identify missing documents', () => { - const state = makeDecisionState() - // vvt exists, tom and lf are missing - const result = projector.projectForAsk(state) - - expect(result.missingDocuments.some(d => d.documentType === 'tom')).toBe(true) - expect(result.missingDocuments.some(d => d.documentType === 'lf')).toBe(true) - }) - - it('should not list existing documents as missing', () => { - const state = makeDecisionState() - const result = projector.projectForAsk(state) - - // vvt exists in state - expect(result.missingDocuments.some(d => d.documentType === 'vvt')).toBe(false) - }) - - it('should include gaps from scope decision', () => { - const state = makeDecisionState() - const result = projector.projectForAsk(state) - - expect(result.gaps.length).toBe(1) - expect(result.gaps[0].title).toBe('TOM fehlt') - }) - - it('should handle empty state', () => { - const state = makeState() - const result = projector.projectForAsk(state) - - expect(result.gaps).toEqual([]) - expect(result.missingDocuments).toEqual([]) - }) - }) - - describe('projectForValidate', () => { - it('should return a ValidationContext with correct structure', () => { - const state = makeDecisionState() - const result = projector.projectForValidate(state, ['vvt', 'tom', 'lf']) - - expect(result).toHaveProperty('documents') - expect(result).toHaveProperty('crossReferences') - expect(result).toHaveProperty('scopeLevel') - expect(result).toHaveProperty('depthRequirements') - }) - - it('should include all requested document types', () => { - const state = makeDecisionState() - const result = projector.projectForValidate(state, ['vvt', 'tom']) - - expect(result.documents.length).toBe(2) - expect(result.documents.map(d => d.type)).toContain('vvt') - expect(result.documents.map(d => d.type)).toContain('tom') - }) - - it('should include cross-references', () => { - const state = makeDecisionState() - const result = projector.projectForValidate(state, ['vvt', 'tom', 'lf']) - - expect(result.crossReferences).toHaveProperty('vvtCategories') - expect(result.crossReferences).toHaveProperty('tomControls') - expect(result.crossReferences).toHaveProperty('retentionCategories') - expect(result.crossReferences.vvtCategories.length).toBe(1) - expect(result.crossReferences.vvtCategories[0]).toBe('Kundenverwaltung') - }) - - it('should include scope level', () => { - const state = makeDecisionState('L3') - const result = projector.projectForValidate(state, ['vvt']) - - expect(result.scopeLevel).toBe('L3') - }) - - it('should include depth requirements per document type', () => { - const state = makeDecisionState() - const result = projector.projectForValidate(state, ['vvt', 'tom']) - - expect(result.depthRequirements).toHaveProperty('vvt') - expect(result.depthRequirements).toHaveProperty('tom') - }) - - it('should summarize documents', () => { - const state = makeDecisionState() - const result = projector.projectForValidate(state, ['vvt', 'tom']) - - expect(result.documents[0].contentSummary).toContain('1') - expect(result.documents[1].contentSummary).toContain('Keine TOM') - }) - - it('should handle empty state', () => { - const state = makeState() - const result = projector.projectForValidate(state, ['vvt', 'tom', 'lf']) - - expect(result.scopeLevel).toBe('L1') - expect(result.crossReferences.vvtCategories).toEqual([]) - expect(result.crossReferences.tomControls).toEqual([]) - }) - }) - - describe('token budget estimation', () => { - it('projectForDraft should produce compact output', () => { - const state = makeDecisionState() - const result = projector.projectForDraft(state, 'vvt') - const json = JSON.stringify(result) - - // Rough token estimation: ~4 chars per token - const estimatedTokens = json.length / 4 - expect(estimatedTokens).toBeLessThan(2000) // Budget is ~1500 - }) - - it('projectForAsk should produce very compact output', () => { - const state = makeDecisionState() - const result = projector.projectForAsk(state) - const json = JSON.stringify(result) - - const estimatedTokens = json.length / 4 - expect(estimatedTokens).toBeLessThan(1000) // Budget is ~600 - }) - - it('projectForValidate should stay within budget', () => { - const state = makeDecisionState() - const result = projector.projectForValidate(state, ['vvt', 'tom', 'lf']) - const json = JSON.stringify(result) - - const estimatedTokens = json.length / 4 - expect(estimatedTokens).toBeLessThan(3000) // Budget is ~2000 - }) - }) -}) diff --git a/admin-compliance/lib/sdk/drafting-engine/allowed-facts.ts b/admin-compliance/lib/sdk/drafting-engine/allowed-facts.ts index 1ef2e71..f84ee90 100644 --- a/admin-compliance/lib/sdk/drafting-engine/allowed-facts.ts +++ b/admin-compliance/lib/sdk/drafting-engine/allowed-facts.ts @@ -95,11 +95,11 @@ export function buildAllowedFacts( const scope = state.complianceScope return { - companyName: profile?.name ?? 'Unbekannt', + companyName: profile?.companyName ?? 'Unbekannt', legalForm: profile?.legalForm ?? '', industry: profile?.industry ?? '', - location: profile?.location ?? '', - employeeCount: profile?.employeeCount ?? 0, + location: profile?.headquartersCity ?? '', + employeeCount: parseEmployeeCount(profile?.employeeCount), teamStructure: deriveTeamStructure(profile), itLandscape: deriveItLandscape(profile), @@ -213,11 +213,33 @@ export function checkForDisallowedContent( // Private Helpers // ============================================================================ +/** + * Parst den employeeCount-String (z.B. "1-9", "50-249", "1000+") in eine Zahl. + * Verwendet den Mittelwert des Bereichs oder den unteren Wert bei "+". + */ +function parseEmployeeCount(value: string | undefined | null): number { + if (!value) return 0 + // Handle "1000+" style + const plusMatch = value.match(/^(\d+)\+$/) + if (plusMatch) return parseInt(plusMatch[1], 10) + // Handle "50-249" style ranges + const rangeMatch = value.match(/^(\d+)-(\d+)$/) + if (rangeMatch) { + const low = parseInt(rangeMatch[1], 10) + const high = parseInt(rangeMatch[2], 10) + return Math.round((low + high) / 2) + } + // Try plain number + const num = parseInt(value, 10) + return isNaN(num) ? 0 : num +} + function deriveTeamStructure(profile: CompanyProfile | null): string { if (!profile) return '' // Ableitung aus verfuegbaren Profildaten - if (profile.employeeCount > 500) return 'Konzernstruktur' - if (profile.employeeCount > 50) return 'mittelstaendisch' + const count = parseEmployeeCount(profile.employeeCount) + if (count > 500) return 'Konzernstruktur' + if (count > 50) return 'mittelstaendisch' return 'Kleinunternehmen' } @@ -225,15 +247,15 @@ function deriveItLandscape(profile: CompanyProfile | null): string { if (!profile) return '' return profile.businessModel?.includes('SaaS') ? 'Cloud-First' : profile.businessModel?.includes('Cloud') ? 'Cloud-First' : - profile.isPublicSector ? 'On-Premise' : 'Hybrid' + 'Hybrid' } function deriveSpecialFeatures(profile: CompanyProfile | null): string[] { if (!profile) return [] const features: string[] = [] - if (profile.isPublicSector) features.push('Oeffentlicher Sektor') - if (profile.employeeCount > 250) features.push('Grossunternehmen') - if (profile.dataProtectionOfficer) features.push('Interner DSB benannt') + const count = parseEmployeeCount(profile.employeeCount) + if (count > 250) features.push('Grossunternehmen') + if (profile.dpoName) features.push('Interner DSB benannt') return features } @@ -253,5 +275,5 @@ function deriveTriggeredRegulations( function derivePrimaryUseCases(state: SDKState): string[] { if (!state.useCases || state.useCases.length === 0) return [] - return state.useCases.slice(0, 3).map(uc => uc.name || uc.title || 'Unbenannt') + return state.useCases.slice(0, 3).map(uc => uc.name || 'Unbenannt') } diff --git a/admin-compliance/lib/sdk/drafting-engine/intent-classifier.ts b/admin-compliance/lib/sdk/drafting-engine/intent-classifier.ts deleted file mode 100644 index 071e881..0000000 --- a/admin-compliance/lib/sdk/drafting-engine/intent-classifier.ts +++ /dev/null @@ -1,373 +0,0 @@ -/** - * Intent Classifier - Leichtgewichtiger Pattern-Matcher - * - * Erkennt den Agent-Modus anhand des Nutzer-Inputs ohne LLM-Call. - * Deutsche und englische Muster werden unterstuetzt. - * - * Confidence-Schwellen: - * - >0.8: Hohe Sicherheit, automatisch anwenden - * - 0.6-0.8: Mittel, Nutzer kann bestaetigen - * - <0.6: Fallback zu 'explain' - */ - -import type { AgentMode, IntentClassification } from './types' -import type { ScopeDocumentType } from '../compliance-scope-types' - -// ============================================================================ -// Pattern Definitions -// ============================================================================ - -interface ModePattern { - mode: AgentMode - patterns: RegExp[] - /** Base-Confidence wenn ein Pattern matched */ - baseConfidence: number -} - -const MODE_PATTERNS: ModePattern[] = [ - { - mode: 'draft', - baseConfidence: 0.85, - patterns: [ - /\b(erstell|generier|entw[iu]rf|entwer[ft]|schreib|verfass|formulier|anlege)/i, - /\b(draft|create|generate|write|compose)\b/i, - /\b(neues?\s+(?:vvt|tom|dsfa|dokument|loeschkonzept|datenschutzerklaerung))\b/i, - /\b(vorlage|template)\s+(erstell|generier)/i, - /\bfuer\s+(?:uns|mich|unser)\b.*\b(erstell|schreib)/i, - ], - }, - { - mode: 'validate', - baseConfidence: 0.80, - patterns: [ - /\b(pruef|validier|check|kontrollier|ueberpruef)\b/i, - /\b(korrekt|richtig|vollstaendig|konsistent|komplett)\b.*\?/i, - /\b(stimmt|passt)\b.*\b(das|mein|unser)\b/i, - /\b(validate|verify|check|review)\b/i, - /\b(fehler|luecken?|maengel)\b.*\b(find|such|zeig)\b/i, - /\bcross[\s-]?check\b/i, - /\b(vvt|tom|dsfa)\b.*\b(konsisten[tz]|widerspruch|uebereinstimm)/i, - ], - }, - { - mode: 'ask', - baseConfidence: 0.75, - patterns: [ - /\bwas\s+fehlt\b/i, - /\b(luecken?|gaps?)\b.*\b(zeig|find|identifizier|analysier)/i, - /\b(unvollstaendig|unfertig|offen)\b/i, - /\bwelche\s+(dokumente?|informationen?|daten)\b.*\b(fehlen?|brauch|benoetig)/i, - /\b(naechste[rn]?\s+schritt|next\s+step|todo)\b/i, - /\bworan\s+(muss|soll)\b/i, - ], - }, -] - -/** Dokumenttyp-Erkennung */ -const DOCUMENT_TYPE_PATTERNS: Array<{ - type: ScopeDocumentType - patterns: RegExp[] -}> = [ - { - type: 'vvt', - patterns: [ - /\bv{1,2}t\b/i, - /\bverarbeitungsverzeichnis\b/i, - /\bverarbeitungstaetigkeit/i, - /\bprocessing\s+activit/i, - /\bart\.?\s*30\b/i, - ], - }, - { - type: 'tom', - patterns: [ - /\btom\b/i, - /\btechnisch.*organisatorisch.*massnahm/i, - /\bart\.?\s*32\b/i, - /\bsicherheitsmassnahm/i, - ], - }, - { - type: 'dsfa', - patterns: [ - /\bdsfa\b/i, - /\bdatenschutz[\s-]?folgenabschaetzung\b/i, - /\bdpia\b/i, - /\bart\.?\s*35\b/i, - /\bimpact\s+assessment\b/i, - ], - }, - { - type: 'dsi', - patterns: [ - /\bdatenschutzerklaerung\b/i, - /\bprivacy\s+policy\b/i, - /\bdsi\b/i, - /\bart\.?\s*13\b/i, - /\bart\.?\s*14\b/i, - ], - }, - { - type: 'lf', - patterns: [ - /\bloeschfrist/i, - /\bloeschkonzept/i, - /\bretention/i, - /\baufbewahr/i, - ], - }, - { - type: 'av_vertrag', - patterns: [ - /\bavv?\b/i, - /\bauftragsverarbeit/i, - /\bdata\s+processing\s+agreement/i, - /\bart\.?\s*28\b/i, - ], - }, - { - type: 'betroffenenrechte', - patterns: [ - /\bbetroffenenrecht/i, - /\bdata\s+subject\s+right/i, - /\bart\.?\s*15\b/i, - /\bauskunft/i, - ], - }, - { - type: 'einwilligung', - patterns: [ - /\beinwillig/i, - /\bconsent/i, - /\bcookie/i, - ], - }, - { - type: 'datenpannen', - patterns: [ - /\bdatenpanne/i, - /\bdata\s*breach/i, - /\bart\.?\s*33\b/i, - /\bsicherheitsvorfall/i, - /\bincident/i, - /\bmelde.*vorfall/i, - ], - }, - { - type: 'daten_transfer', - patterns: [ - /\bdrittland/i, - /\btransfer/i, - /\bscc\b/i, - /\bstandardvertragsklausel/i, - /\bart\.?\s*44\b/i, - ], - }, - { - type: 'vertragsmanagement', - patterns: [ - /\bvertragsmanagement/i, - /\bcontract\s*management/i, - ], - }, - { - type: 'schulung', - patterns: [ - /\bschulung/i, - /\btraining/i, - /\bawareness/i, - /\bmitarbeiterschulung/i, - ], - }, - { - type: 'audit_log', - patterns: [ - /\baudit/i, - /\blogging\b/i, - /\bprotokollierung/i, - /\bart\.?\s*5\s*abs\.?\s*2\b/i, - ], - }, - { - type: 'risikoanalyse', - patterns: [ - /\brisikoanalyse/i, - /\brisk\s*assessment/i, - /\brisikobewertung/i, - ], - }, - { - type: 'notfallplan', - patterns: [ - /\bnotfallplan/i, - /\bkrisenmanagement/i, - /\bbusiness\s*continuity/i, - /\bnotfall/i, - ], - }, - { - type: 'zertifizierung', - patterns: [ - /\bzertifizierung/i, - /\biso\s*27001\b/i, - /\biso\s*27701\b/i, - /\bart\.?\s*42\b/i, - ], - }, - { - type: 'datenschutzmanagement', - patterns: [ - /\bdsms\b/i, - /\bdatenschutzmanagement/i, - /\bpdca/i, - ], - }, - { - type: 'iace_ce_assessment', - patterns: [ - /\biace\b/i, - /\bce[\s-]?kennzeichnung/i, - /\bai\s*act\b/i, - /\bki[\s-]?verordnung/i, - ], - }, -] - -// ============================================================================ -// Redirect Patterns (nicht-draftbare Dokumente → Document Generator) -// ============================================================================ - -const REDIRECT_PATTERNS: Array<{ - pattern: RegExp - response: string -}> = [ - { - pattern: /\bimpressum\b/i, - response: 'Impressum-Templates finden Sie unter /sdk/document-generator → Kategorie "Impressum". Der Drafting Agent erstellt keine Impressen, da diese nach DDG §5 unternehmensspezifisch sind.', - }, - { - pattern: /\b(agb|allgemeine.?geschaefts)/i, - response: 'AGB-Vorlagen erstellen Sie im Document Generator unter /sdk/document-generator → Kategorie "AGB". Der Drafting Agent erstellt keine AGB, da diese nach BGB §305ff individuell gestaltet werden muessen.', - }, - { - pattern: /\bwiderruf/i, - response: 'Widerrufs-Templates finden Sie unter /sdk/document-generator → Kategorie "Widerruf".', - }, - { - pattern: /\bnda\b/i, - response: 'NDA-Vorlagen finden Sie unter /sdk/document-generator.', - }, - { - pattern: /\bsla\b/i, - response: 'SLA-Vorlagen finden Sie unter /sdk/document-generator.', - }, -] - -// ============================================================================ -// Classifier -// ============================================================================ - -export class IntentClassifier { - - /** - * Klassifiziert die Nutzerabsicht anhand des Inputs. - * - * @param input - Die Nutzer-Nachricht - * @returns IntentClassification mit Mode, Confidence, Patterns - */ - classify(input: string): IntentClassification { - const normalized = this.normalize(input) - - // Redirect-Check: Nicht-draftbare Dokumente → Document Generator - for (const redirect of REDIRECT_PATTERNS) { - if (redirect.pattern.test(normalized)) { - return { - mode: 'explain', - confidence: 0.90, - matchedPatterns: [redirect.pattern.source], - suggestedResponse: redirect.response, - } - } - } - - let bestMatch: IntentClassification = { - mode: 'explain', - confidence: 0.3, - matchedPatterns: [], - } - - for (const modePattern of MODE_PATTERNS) { - const matched: string[] = [] - - for (const pattern of modePattern.patterns) { - if (pattern.test(normalized)) { - matched.push(pattern.source) - } - } - - if (matched.length > 0) { - // Mehr Matches = hoehere Confidence (bis zum Maximum) - const matchBonus = Math.min(matched.length - 1, 2) * 0.05 - const confidence = Math.min(modePattern.baseConfidence + matchBonus, 0.99) - - if (confidence > bestMatch.confidence) { - bestMatch = { - mode: modePattern.mode, - confidence, - matchedPatterns: matched, - } - } - } - } - - // Dokumenttyp erkennen - const detectedDocType = this.detectDocumentType(normalized) - if (detectedDocType) { - bestMatch.detectedDocumentType = detectedDocType - // Dokumenttyp-Erkennung erhoeht Confidence leicht - bestMatch.confidence = Math.min(bestMatch.confidence + 0.05, 0.99) - } - - // Fallback: Bei Confidence <0.6 immer 'explain' - if (bestMatch.confidence < 0.6) { - bestMatch.mode = 'explain' - } - - return bestMatch - } - - /** - * Erkennt den Dokumenttyp aus dem Input. - */ - detectDocumentType(input: string): ScopeDocumentType | undefined { - const normalized = this.normalize(input) - - for (const docPattern of DOCUMENT_TYPE_PATTERNS) { - for (const pattern of docPattern.patterns) { - if (pattern.test(normalized)) { - return docPattern.type - } - } - } - - return undefined - } - - /** - * Normalisiert den Input fuer Pattern-Matching. - * Ersetzt Umlaute, entfernt Sonderzeichen. - */ - private normalize(input: string): string { - return input - .replace(/ä/g, 'ae') - .replace(/ö/g, 'oe') - .replace(/ü/g, 'ue') - .replace(/ß/g, 'ss') - .replace(/Ä/g, 'Ae') - .replace(/Ö/g, 'Oe') - .replace(/Ü/g, 'Ue') - } -} - -/** Singleton-Instanz */ -export const intentClassifier = new IntentClassifier() diff --git a/admin-compliance/lib/sdk/drafting-engine/sanitizer.ts b/admin-compliance/lib/sdk/drafting-engine/sanitizer.ts index ccdad14..161a627 100644 --- a/admin-compliance/lib/sdk/drafting-engine/sanitizer.ts +++ b/admin-compliance/lib/sdk/drafting-engine/sanitizer.ts @@ -243,7 +243,7 @@ function sanitizeAddress( */ export function validateNoRemainingPII(facts: SanitizedFacts): string[] { const warnings: string[] = [] - const allValues = extractAllStringValues(facts) + const allValues = extractAllStringValues(facts as unknown as Record) for (const { path, value } of allValues) { if (path === '__sanitized') continue diff --git a/admin-compliance/lib/sdk/drafting-engine/state-projector.ts b/admin-compliance/lib/sdk/drafting-engine/state-projector.ts deleted file mode 100644 index 9eba9c9..0000000 --- a/admin-compliance/lib/sdk/drafting-engine/state-projector.ts +++ /dev/null @@ -1,342 +0,0 @@ -/** - * State Projector - Token-budgetierte Projektion des SDK-State - * - * Extrahiert aus dem vollen SDKState (der ~50k Tokens betragen kann) nur die - * relevanten Slices fuer den jeweiligen Agent-Modus. - * - * Token-Budgets: - * - Draft: ~1500 Tokens - * - Ask: ~600 Tokens - * - Validate: ~2000 Tokens - */ - -import type { SDKState, CompanyProfile } from '../types' -import type { - ComplianceScopeState, - ScopeDecision, - ScopeDocumentType, - ScopeGap, - RequiredDocument, - RiskFlag, - DOCUMENT_SCOPE_MATRIX, - DocumentDepthRequirement, -} from '../compliance-scope-types' -import { DOCUMENT_SCOPE_MATRIX as DOC_MATRIX, DOCUMENT_TYPE_LABELS } from '../compliance-scope-types' -import type { - DraftContext, - GapContext, - ValidationContext, -} from './types' - -// ============================================================================ -// State Projector -// ============================================================================ - -export class StateProjector { - - /** - * Projiziert den SDKState fuer Draft-Operationen. - * Fokus: Scope-Decision, Company-Profile, Dokument-spezifische Constraints. - * - * ~1500 Tokens - */ - projectForDraft( - state: SDKState, - documentType: ScopeDocumentType - ): DraftContext { - const decision = state.complianceScope?.decision ?? null - const level = decision?.determinedLevel ?? 'L1' - const depthReq = DOC_MATRIX[documentType]?.[level] ?? { - required: false, - depth: 'Basis', - detailItems: [], - estimatedEffort: 'N/A', - } - - return { - decisions: { - level, - scores: decision?.scores ?? { - risk_score: 0, - complexity_score: 0, - assurance_need: 0, - composite_score: 0, - }, - hardTriggers: (decision?.triggeredHardTriggers ?? []).map(t => ({ - id: t.rule.id, - label: t.rule.label, - legalReference: t.rule.legalReference, - })), - requiredDocuments: (decision?.requiredDocuments ?? []) - .filter(d => d.required) - .map(d => ({ - documentType: d.documentType, - depth: d.depth, - detailItems: d.detailItems, - })), - }, - companyProfile: this.projectCompanyProfile(state.companyProfile), - constraints: { - depthRequirements: depthReq, - riskFlags: (decision?.riskFlags ?? []).map(f => ({ - severity: f.severity, - title: f.title, - recommendation: f.recommendation, - })), - boundaries: this.deriveBoundaries(decision, documentType), - }, - existingDocumentData: this.extractExistingDocumentData(state, documentType), - } - } - - /** - * Projiziert den SDKState fuer Ask-Operationen. - * Fokus: Luecken, unbeantwortete Fragen, fehlende Dokumente. - * - * ~600 Tokens - */ - projectForAsk(state: SDKState): GapContext { - const decision = state.complianceScope?.decision ?? null - - // Fehlende Pflichtdokumente ermitteln - const requiredDocs = (decision?.requiredDocuments ?? []).filter(d => d.required) - const existingDocTypes = this.getExistingDocumentTypes(state) - const missingDocuments = requiredDocs - .filter(d => !existingDocTypes.includes(d.documentType)) - .map(d => ({ - documentType: d.documentType, - label: DOCUMENT_TYPE_LABELS[d.documentType] ?? d.documentType, - depth: d.depth, - estimatedEffort: d.estimatedEffort, - })) - - // Gaps aus der Scope-Decision - const gaps = (decision?.gaps ?? []).map(g => ({ - id: g.id, - severity: g.severity, - title: g.title, - description: g.description, - relatedDocuments: g.relatedDocuments, - })) - - // Unbeantwortete Fragen (aus dem Scope-Profiling) - const answers = state.complianceScope?.answers ?? [] - const answeredIds = new Set(answers.map(a => a.questionId)) - - return { - unansweredQuestions: [], // Populated dynamically from question catalog - gaps, - missingDocuments, - } - } - - /** - * Projiziert den SDKState fuer Validate-Operationen. - * Fokus: Cross-Dokument-Konsistenz, Scope-Compliance. - * - * ~2000 Tokens - */ - projectForValidate( - state: SDKState, - documentTypes: ScopeDocumentType[] - ): ValidationContext { - const decision = state.complianceScope?.decision ?? null - const level = decision?.determinedLevel ?? 'L1' - - // Dokument-Zusammenfassungen sammeln - const documents = documentTypes.map(type => ({ - type, - contentSummary: this.summarizeDocument(state, type), - structuredData: this.extractExistingDocumentData(state, type), - })) - - // Cross-Referenzen extrahieren - const crossReferences = { - vvtCategories: (state.vvt ?? []).map(v => - typeof v === 'object' && v !== null && 'name' in v ? String((v as Record).name) : '' - ).filter(Boolean), - dsfaRisks: state.dsfa - ? ['DSFA vorhanden'] - : [], - tomControls: (state.toms ?? []).map(t => - typeof t === 'object' && t !== null && 'name' in t ? String((t as Record).name) : '' - ).filter(Boolean), - retentionCategories: (state.retentionPolicies ?? []).map(p => - typeof p === 'object' && p !== null && 'name' in p ? String((p as Record).name) : '' - ).filter(Boolean), - } - - // Depth-Requirements fuer alle angefragten Typen - const depthRequirements: Record = {} - for (const type of documentTypes) { - depthRequirements[type] = DOC_MATRIX[type]?.[level] ?? { - required: false, - depth: 'Basis', - detailItems: [], - estimatedEffort: 'N/A', - } - } - - return { - documents, - crossReferences, - scopeLevel: level, - depthRequirements: depthRequirements as Record, - } - } - - // ========================================================================== - // Private Helpers - // ========================================================================== - - private projectCompanyProfile( - profile: CompanyProfile | null - ): DraftContext['companyProfile'] { - if (!profile) { - return { - name: 'Unbekannt', - industry: 'Unbekannt', - employeeCount: 0, - businessModel: 'Unbekannt', - isPublicSector: false, - } - } - - return { - name: profile.companyName ?? profile.name ?? 'Unbekannt', - industry: profile.industry ?? 'Unbekannt', - employeeCount: typeof profile.employeeCount === 'number' - ? profile.employeeCount - : parseInt(String(profile.employeeCount ?? '0'), 10) || 0, - businessModel: profile.businessModel ?? 'Unbekannt', - isPublicSector: profile.isPublicSector ?? false, - ...(profile.dataProtectionOfficer ? { - dataProtectionOfficer: { - name: profile.dataProtectionOfficer.name ?? '', - email: profile.dataProtectionOfficer.email ?? '', - }, - } : {}), - } - } - - /** - * Leitet Grenzen (Boundaries) ab, die der Agent nicht ueberschreiten darf. - */ - private deriveBoundaries( - decision: ScopeDecision | null, - documentType: ScopeDocumentType - ): string[] { - const boundaries: string[] = [] - const level = decision?.determinedLevel ?? 'L1' - - // Grundregel: Scope-Engine ist autoritativ - boundaries.push( - `Maximale Dokumenttiefe: ${level} (${DOC_MATRIX[documentType]?.[level]?.depth ?? 'Basis'})` - ) - - // DSFA-Boundary - if (documentType === 'dsfa') { - const dsfaRequired = decision?.triggeredHardTriggers?.some( - t => t.rule.dsfaRequired - ) ?? false - if (!dsfaRequired && level !== 'L4') { - boundaries.push('DSFA ist laut Scope-Engine NICHT erforderlich. Nur auf expliziten Wunsch erstellen.') - } - } - - // Dokument nicht in requiredDocuments? - const isRequired = decision?.requiredDocuments?.some( - d => d.documentType === documentType && d.required - ) ?? false - if (!isRequired) { - boundaries.push( - `Dokument "${DOCUMENT_TYPE_LABELS[documentType] ?? documentType}" ist auf Level ${level} nicht als Pflicht eingestuft.` - ) - } - - return boundaries - } - - /** - * Extrahiert bereits vorhandene Dokumentdaten aus dem SDK-State. - */ - private extractExistingDocumentData( - state: SDKState, - documentType: ScopeDocumentType - ): Record | undefined { - switch (documentType) { - case 'vvt': - return state.vvt?.length ? { entries: state.vvt.slice(0, 5), totalCount: state.vvt.length } : undefined - case 'tom': - return state.toms?.length ? { entries: state.toms.slice(0, 5), totalCount: state.toms.length } : undefined - case 'lf': - return state.retentionPolicies?.length - ? { entries: state.retentionPolicies.slice(0, 5), totalCount: state.retentionPolicies.length } - : undefined - case 'dsfa': - return state.dsfa ? { assessment: state.dsfa } : undefined - case 'dsi': - return state.documents?.length - ? { entries: state.documents.slice(0, 3), totalCount: state.documents.length } - : undefined - case 'einwilligung': - return state.consents?.length - ? { entries: state.consents.slice(0, 5), totalCount: state.consents.length } - : undefined - default: - return undefined - } - } - - /** - * Ermittelt welche Dokumenttypen bereits im State vorhanden sind. - */ - private getExistingDocumentTypes(state: SDKState): ScopeDocumentType[] { - const types: ScopeDocumentType[] = [] - if (state.vvt?.length) types.push('vvt') - if (state.toms?.length) types.push('tom') - if (state.retentionPolicies?.length) types.push('lf') - if (state.dsfa) types.push('dsfa') - if (state.documents?.length) types.push('dsi') - if (state.consents?.length) types.push('einwilligung') - if (state.cookieBanner) types.push('einwilligung') - if (state.risks?.length) types.push('risikoanalyse') - if (state.escalationWorkflows?.length) types.push('datenpannen') - if (state.iaceProjects?.length) types.push('iace_ce_assessment') - if (state.obligations?.length) types.push('zertifizierung') - if (state.dsrConfig) types.push('betroffenenrechte') - return types - } - - /** - * Erstellt eine kurze Zusammenfassung eines Dokuments fuer Validierung. - */ - private summarizeDocument( - state: SDKState, - documentType: ScopeDocumentType - ): string { - switch (documentType) { - case 'vvt': - return state.vvt?.length - ? `${state.vvt.length} Verarbeitungstaetigkeiten erfasst` - : 'Keine VVT-Eintraege vorhanden' - case 'tom': - return state.toms?.length - ? `${state.toms.length} TOM-Massnahmen definiert` - : 'Keine TOM-Massnahmen vorhanden' - case 'lf': - return state.retentionPolicies?.length - ? `${state.retentionPolicies.length} Loeschfristen definiert` - : 'Keine Loeschfristen vorhanden' - case 'dsfa': - return state.dsfa - ? 'DSFA vorhanden' - : 'Keine DSFA vorhanden' - default: - return `Dokument ${DOCUMENT_TYPE_LABELS[documentType] ?? documentType}` - } - } -} - -/** Singleton-Instanz */ -export const stateProjector = new StateProjector() diff --git a/admin-compliance/lib/sdk/drafting-engine/use-drafting-engine.ts b/admin-compliance/lib/sdk/drafting-engine/use-drafting-engine.ts deleted file mode 100644 index d8de534..0000000 --- a/admin-compliance/lib/sdk/drafting-engine/use-drafting-engine.ts +++ /dev/null @@ -1,343 +0,0 @@ -'use client' - -/** - * useDraftingEngine - React Hook fuer die Drafting Engine - * - * Managed: currentMode, activeDocumentType, draftSessions, validationState - * Handled: State-Projection, API-Calls, Streaming - * Provides: sendMessage(), requestDraft(), validateDraft(), acceptDraft() - */ - -import { useState, useCallback, useRef } from 'react' -import { useSDK } from '../context' -import { stateProjector } from './state-projector' -import { intentClassifier } from './intent-classifier' -import { constraintEnforcer } from './constraint-enforcer' -import type { - AgentMode, - DraftSession, - DraftRevision, - DraftingChatMessage, - ValidationResult, - ConstraintCheckResult, - DraftContext, - GapContext, - ValidationContext, -} from './types' -import type { ScopeDocumentType } from '../compliance-scope-types' - -export interface DraftingEngineState { - currentMode: AgentMode - activeDocumentType: ScopeDocumentType | null - messages: DraftingChatMessage[] - isTyping: boolean - currentDraft: DraftRevision | null - validationResult: ValidationResult | null - constraintCheck: ConstraintCheckResult | null - error: string | null -} - -export interface DraftingEngineActions { - setMode: (mode: AgentMode) => void - setDocumentType: (type: ScopeDocumentType) => void - sendMessage: (content: string) => Promise - requestDraft: (instructions?: string) => Promise - validateDraft: () => Promise - acceptDraft: () => void - stopGeneration: () => void - clearMessages: () => void -} - -export function useDraftingEngine(): DraftingEngineState & DraftingEngineActions { - const { state, dispatch } = useSDK() - const abortControllerRef = useRef(null) - - const [currentMode, setCurrentMode] = useState('explain') - const [activeDocumentType, setActiveDocumentType] = useState(null) - const [messages, setMessages] = useState([]) - const [isTyping, setIsTyping] = useState(false) - const [currentDraft, setCurrentDraft] = useState(null) - const [validationResult, setValidationResult] = useState(null) - const [constraintCheck, setConstraintCheck] = useState(null) - const [error, setError] = useState(null) - - // Get state projection based on mode - const getProjection = useCallback(() => { - switch (currentMode) { - case 'draft': - return activeDocumentType - ? stateProjector.projectForDraft(state, activeDocumentType) - : null - case 'ask': - return stateProjector.projectForAsk(state) - case 'validate': - return activeDocumentType - ? stateProjector.projectForValidate(state, [activeDocumentType]) - : stateProjector.projectForValidate(state, ['vvt', 'tom', 'lf']) - default: - return activeDocumentType - ? stateProjector.projectForDraft(state, activeDocumentType) - : null - } - }, [state, currentMode, activeDocumentType]) - - const setMode = useCallback((mode: AgentMode) => { - setCurrentMode(mode) - }, []) - - const setDocumentType = useCallback((type: ScopeDocumentType) => { - setActiveDocumentType(type) - }, []) - - const sendMessage = useCallback(async (content: string) => { - if (!content.trim() || isTyping) return - setError(null) - - // Auto-detect mode if needed - const classification = intentClassifier.classify(content) - if (classification.confidence > 0.7 && classification.mode !== currentMode) { - setCurrentMode(classification.mode) - } - if (classification.detectedDocumentType && !activeDocumentType) { - setActiveDocumentType(classification.detectedDocumentType) - } - - const userMessage: DraftingChatMessage = { - role: 'user', - content: content.trim(), - } - setMessages(prev => [...prev, userMessage]) - setIsTyping(true) - - abortControllerRef.current = new AbortController() - - try { - const projection = getProjection() - const response = await fetch('/api/sdk/drafting-engine/chat', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - message: content.trim(), - history: messages.map(m => ({ role: m.role, content: m.content })), - sdkStateProjection: projection, - mode: currentMode, - documentType: activeDocumentType, - }), - signal: abortControllerRef.current.signal, - }) - - if (!response.ok) { - const errorData = await response.json().catch(() => ({ error: 'Unbekannter Fehler' })) - throw new Error(errorData.error || `Server-Fehler (${response.status})`) - } - - const agentMessageId = `msg-${Date.now()}-agent` - setMessages(prev => [...prev, { - role: 'assistant', - content: '', - metadata: { mode: currentMode, documentType: activeDocumentType ?? undefined }, - }]) - - // Stream response - const reader = response.body!.getReader() - const decoder = new TextDecoder() - let accumulated = '' - - while (true) { - const { done, value } = await reader.read() - if (done) break - accumulated += decoder.decode(value, { stream: true }) - const text = accumulated - setMessages(prev => - prev.map((m, i) => i === prev.length - 1 ? { ...m, content: text } : m) - ) - } - - setIsTyping(false) - } catch (err) { - if ((err as Error).name === 'AbortError') { - setIsTyping(false) - return - } - setError((err as Error).message) - setMessages(prev => [...prev, { - role: 'assistant', - content: `Fehler: ${(err as Error).message}`, - }]) - setIsTyping(false) - } - }, [isTyping, messages, currentMode, activeDocumentType, getProjection]) - - const requestDraft = useCallback(async (instructions?: string) => { - if (!activeDocumentType) { - setError('Bitte waehlen Sie zuerst einen Dokumenttyp.') - return - } - setError(null) - setIsTyping(true) - - try { - const draftContext = stateProjector.projectForDraft(state, activeDocumentType) - - const response = await fetch('/api/sdk/drafting-engine/draft', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - documentType: activeDocumentType, - draftContext, - instructions, - existingDraft: currentDraft, - }), - }) - - const result = await response.json() - - if (!response.ok) { - throw new Error(result.error || 'Draft-Generierung fehlgeschlagen') - } - - setCurrentDraft(result.draft) - setConstraintCheck(result.constraintCheck) - - setMessages(prev => [...prev, { - role: 'assistant', - content: `Draft fuer ${activeDocumentType} erstellt (${result.draft.sections.length} Sections). Oeffnen Sie den Editor zur Bearbeitung.`, - metadata: { mode: 'draft', documentType: activeDocumentType, hasDraft: true }, - }]) - - setIsTyping(false) - } catch (err) { - setError((err as Error).message) - setIsTyping(false) - } - }, [activeDocumentType, state, currentDraft]) - - const validateDraft = useCallback(async () => { - setError(null) - setIsTyping(true) - - try { - const docTypes: ScopeDocumentType[] = activeDocumentType - ? [activeDocumentType] - : ['vvt', 'tom', 'lf'] - const validationContext = stateProjector.projectForValidate(state, docTypes) - - const response = await fetch('/api/sdk/drafting-engine/validate', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - documentType: activeDocumentType || 'vvt', - draftContent: currentDraft?.content || '', - validationContext, - }), - }) - - const result = await response.json() - - if (!response.ok) { - throw new Error(result.error || 'Validierung fehlgeschlagen') - } - - setValidationResult(result) - - const summary = result.passed - ? `Validierung bestanden. ${result.warnings.length} Warnungen, ${result.suggestions.length} Vorschlaege.` - : `Validierung fehlgeschlagen. ${result.errors.length} Fehler, ${result.warnings.length} Warnungen.` - - setMessages(prev => [...prev, { - role: 'assistant', - content: summary, - metadata: { mode: 'validate', hasValidation: true }, - }]) - - setIsTyping(false) - } catch (err) { - setError((err as Error).message) - setIsTyping(false) - } - }, [activeDocumentType, state, currentDraft]) - - const acceptDraft = useCallback(() => { - if (!currentDraft || !activeDocumentType) return - - // Dispatch the draft data into SDK state - switch (activeDocumentType) { - case 'vvt': - dispatch({ - type: 'ADD_PROCESSING_ACTIVITY', - payload: { - id: `draft-vvt-${Date.now()}`, - name: currentDraft.sections.find(s => s.schemaField === 'name')?.content || 'Neuer VVT-Eintrag', - ...Object.fromEntries( - currentDraft.sections - .filter(s => s.schemaField) - .map(s => [s.schemaField!, s.content]) - ), - }, - }) - break - case 'tom': - dispatch({ - type: 'ADD_TOM', - payload: { - id: `draft-tom-${Date.now()}`, - name: 'TOM-Entwurf', - ...Object.fromEntries( - currentDraft.sections - .filter(s => s.schemaField) - .map(s => [s.schemaField!, s.content]) - ), - }, - }) - break - default: - dispatch({ - type: 'ADD_DOCUMENT', - payload: { - id: `draft-${activeDocumentType}-${Date.now()}`, - type: activeDocumentType, - content: currentDraft.content, - sections: currentDraft.sections, - }, - }) - } - - setMessages(prev => [...prev, { - role: 'assistant', - content: `Draft wurde in den SDK-State uebernommen.`, - }]) - setCurrentDraft(null) - }, [currentDraft, activeDocumentType, dispatch]) - - const stopGeneration = useCallback(() => { - abortControllerRef.current?.abort() - setIsTyping(false) - }, []) - - const clearMessages = useCallback(() => { - setMessages([]) - setCurrentDraft(null) - setValidationResult(null) - setConstraintCheck(null) - setError(null) - }, []) - - return { - currentMode, - activeDocumentType, - messages, - isTyping, - currentDraft, - validationResult, - constraintCheck, - error, - setMode, - setDocumentType, - sendMessage, - requestDraft, - validateDraft, - acceptDraft, - stopGeneration, - clearMessages, - } -} diff --git a/admin-compliance/lib/sdk/dsfa/__tests__/types.test.ts b/admin-compliance/lib/sdk/dsfa/__tests__/types.test.ts index 8b66249..388be3b 100644 --- a/admin-compliance/lib/sdk/dsfa/__tests__/types.test.ts +++ b/admin-compliance/lib/sdk/dsfa/__tests__/types.test.ts @@ -199,11 +199,14 @@ describe('DSFAMitigation type', () => { describe('DSFASectionProgress type', () => { it('should track completion for all 5 sections', () => { const progress: DSFASectionProgress = { + section_0_complete: false, section_1_complete: true, section_2_complete: true, section_3_complete: false, section_4_complete: false, section_5_complete: false, + section_6_complete: false, + section_7_complete: false, } expect(progress.section_1_complete).toBe(true) diff --git a/admin-compliance/lib/sdk/tom-generator/context.tsx b/admin-compliance/lib/sdk/tom-generator/context.tsx index fbb4920..e411c39 100644 --- a/admin-compliance/lib/sdk/tom-generator/context.tsx +++ b/admin-compliance/lib/sdk/tom-generator/context.tsx @@ -554,6 +554,15 @@ export function TOMGeneratorProvider({ [] ) + const bulkUpdateTOMs = useCallback( + (updates: Array<{ id: string; data: Partial }>) => { + for (const { id, data } of updates) { + dispatch({ type: 'UPDATE_DERIVED_TOM', payload: { id, data } }) + } + }, + [] + ) + // Gap analysis const runGapAnalysis = useCallback(() => { if (!rulesEngineRef.current) return @@ -666,6 +675,7 @@ export function TOMGeneratorProvider({ deriveTOMs, updateDerivedTOM, + bulkUpdateTOMs, runGapAnalysis, diff --git a/admin-compliance/package-lock.json b/admin-compliance/package-lock.json index 55af9e9..426fdc7 100644 --- a/admin-compliance/package-lock.json +++ b/admin-compliance/package-lock.json @@ -1,11 +1,11 @@ { - "name": "breakpilot-admin-v2", + "name": "breakpilot-compliance-sdk-admin", "version": "1.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "breakpilot-admin-v2", + "name": "breakpilot-compliance-sdk-admin", "version": "1.0.0", "dependencies": { "bpmn-js": "^18.0.1", @@ -1560,15 +1560,15 @@ } }, "node_modules/@next/env": { - "version": "15.5.9", - "resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.9.tgz", - "integrity": "sha512-4GlTZ+EJM7WaW2HEZcyU317tIQDjkQIyENDLxYJfSWlfqguN+dHkZgyQTV/7ykvobU7yEH5gKvreNrH4B6QgIg==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.12.tgz", + "integrity": "sha512-pUvdJN1on574wQHjaBfNGDt9Mz5utDSZFsIIQkMzPgNS8ZvT4H2mwOrOIClwsQOb6EGx5M76/CZr6G8i6pSpLg==", "license": "MIT" }, "node_modules/@next/swc-darwin-arm64": { - "version": "15.5.7", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.7.tgz", - "integrity": "sha512-IZwtxCEpI91HVU/rAUOOobWSZv4P2DeTtNaCdHqLcTJU4wdNXgAySvKa/qJCgR5m6KI8UsKDXtO2B31jcaw1Yw==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.12.tgz", + "integrity": "sha512-RnRjBtH8S8eXCpUNkQ+543DUc7ys8y15VxmFU9HRqlo9BG3CcBUiwNtF8SNoi2xvGCVJq1vl2yYq+3oISBS0Zg==", "cpu": [ "arm64" ], @@ -1582,9 +1582,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "15.5.7", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.7.tgz", - "integrity": "sha512-UP6CaDBcqaCBuiq/gfCEJw7sPEoX1aIjZHnBWN9v9qYHQdMKvCKcAVs4OX1vIjeE+tC5EIuwDTVIoXpUes29lg==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.12.tgz", + "integrity": "sha512-nqa9/7iQlboF1EFtNhWxQA0rQstmYRSBGxSM6g3GxvxHxcoeqVXfGNr9stJOme674m2V7r4E3+jEhhGvSQhJRA==", "cpu": [ "x64" ], @@ -1598,9 +1598,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "15.5.7", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.7.tgz", - "integrity": "sha512-NCslw3GrNIw7OgmRBxHtdWFQYhexoUCq+0oS2ccjyYLtcn1SzGzeM54jpTFonIMUjNbHmpKpziXnpxhSWLcmBA==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.12.tgz", + "integrity": "sha512-dCzAjqhDHwmoB2M4eYfVKqXs99QdQxNQVpftvP1eGVppamXh/OkDAwV737Zr0KPXEqRUMN4uCjh6mjO+XtF3Mw==", "cpu": [ "arm64" ], @@ -1614,9 +1614,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "15.5.7", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.7.tgz", - "integrity": "sha512-nfymt+SE5cvtTrG9u1wdoxBr9bVB7mtKTcj0ltRn6gkP/2Nu1zM5ei8rwP9qKQP0Y//umK+TtkKgNtfboBxRrw==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.12.tgz", + "integrity": "sha512-+fpGWvQiITgf7PUtbWY1H7qUSnBZsPPLyyq03QuAKpVoTy/QUx1JptEDTQMVvQhvizCEuNLEeghrQUyXQOekuw==", "cpu": [ "arm64" ], @@ -1630,9 +1630,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "15.5.7", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.7.tgz", - "integrity": "sha512-hvXcZvCaaEbCZcVzcY7E1uXN9xWZfFvkNHwbe/n4OkRhFWrs1J1QV+4U1BN06tXLdaS4DazEGXwgqnu/VMcmqw==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.12.tgz", + "integrity": "sha512-jSLvgdRRL/hrFAPqEjJf1fFguC719kmcptjNVDJl26BnJIpjL3KH5h6mzR4mAweociLQaqvt4UyzfbFjgAdDcw==", "cpu": [ "x64" ], @@ -1646,9 +1646,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "15.5.7", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.7.tgz", - "integrity": "sha512-4IUO539b8FmF0odY6/SqANJdgwn1xs1GkPO5doZugwZ3ETF6JUdckk7RGmsfSf7ws8Qb2YB5It33mvNL/0acqA==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.12.tgz", + "integrity": "sha512-/uaF0WfmYqQgLfPmN6BvULwxY0dufI2mlN2JbOKqqceZh1G4hjREyi7pg03zjfyS6eqNemHAZPSoP84x17vo6w==", "cpu": [ "x64" ], @@ -1662,9 +1662,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "15.5.7", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.7.tgz", - "integrity": "sha512-CpJVTkYI3ZajQkC5vajM7/ApKJUOlm6uP4BknM3XKvJ7VXAvCqSjSLmM0LKdYzn6nBJVSjdclx8nYJSa3xlTgQ==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.12.tgz", + "integrity": "sha512-xhsL1OvQSfGmlL5RbOmU+FV120urrgFpYLq+6U8C6KIym32gZT6XF/SDE92jKzzlPWskkbjOKCpqk5m4i8PEfg==", "cpu": [ "arm64" ], @@ -1678,9 +1678,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "15.5.7", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.7.tgz", - "integrity": "sha512-gMzgBX164I6DN+9/PGA+9dQiwmTkE4TloBNx8Kv9UiGARsr9Nba7IpcBRA1iTV9vwlYnrE3Uy6I7Aj6qLjQuqw==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.12.tgz", + "integrity": "sha512-Z1Dh6lhFkxvBDH1FoW6OU/L6prYwPSlwjLiZkExIAh8fbP6iI/M7iGTQAJPYJ9YFlWobCZ1PHbchFhFYb2ADkw==", "cpu": [ "x64" ], @@ -1857,9 +1857,9 @@ "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", - "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz", + "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==", "cpu": [ "arm" ], @@ -1871,9 +1871,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", - "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz", + "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==", "cpu": [ "arm64" ], @@ -1885,9 +1885,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", - "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz", + "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==", "cpu": [ "arm64" ], @@ -1899,9 +1899,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", - "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz", + "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==", "cpu": [ "x64" ], @@ -1913,9 +1913,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", - "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz", + "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==", "cpu": [ "arm64" ], @@ -1927,9 +1927,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", - "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz", + "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==", "cpu": [ "x64" ], @@ -1941,9 +1941,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", - "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz", + "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==", "cpu": [ "arm" ], @@ -1955,9 +1955,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", - "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz", + "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==", "cpu": [ "arm" ], @@ -1969,9 +1969,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", - "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz", + "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==", "cpu": [ "arm64" ], @@ -1983,9 +1983,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", - "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz", + "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==", "cpu": [ "arm64" ], @@ -1997,9 +1997,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", - "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz", + "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==", "cpu": [ "loong64" ], @@ -2011,9 +2011,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", - "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz", + "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==", "cpu": [ "loong64" ], @@ -2025,9 +2025,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", - "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz", + "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==", "cpu": [ "ppc64" ], @@ -2039,9 +2039,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", - "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz", + "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==", "cpu": [ "ppc64" ], @@ -2053,9 +2053,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", - "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz", + "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==", "cpu": [ "riscv64" ], @@ -2067,9 +2067,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", - "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz", + "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==", "cpu": [ "riscv64" ], @@ -2081,9 +2081,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", - "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz", + "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==", "cpu": [ "s390x" ], @@ -2095,9 +2095,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", - "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz", + "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==", "cpu": [ "x64" ], @@ -2109,9 +2109,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", - "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz", + "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==", "cpu": [ "x64" ], @@ -2123,9 +2123,9 @@ ] }, "node_modules/@rollup/rollup-openbsd-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", - "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz", + "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==", "cpu": [ "x64" ], @@ -2137,9 +2137,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", - "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz", + "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==", "cpu": [ "arm64" ], @@ -2151,9 +2151,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", - "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz", + "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==", "cpu": [ "arm64" ], @@ -2165,9 +2165,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", - "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz", + "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==", "cpu": [ "ia32" ], @@ -2179,9 +2179,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", - "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz", + "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==", "cpu": [ "x64" ], @@ -2193,9 +2193,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", - "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz", + "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==", "cpu": [ "x64" ], @@ -3661,11 +3661,14 @@ } }, "node_modules/dompurify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.1.tgz", - "integrity": "sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.2.tgz", + "integrity": "sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==", "license": "(MPL-2.0 OR Apache-2.0)", "optional": true, + "engines": { + "node": ">=20" + }, "optionalDependencies": { "@types/trusted-types": "^2.0.7" } @@ -4200,12 +4203,12 @@ } }, "node_modules/jspdf": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/jspdf/-/jspdf-4.1.0.tgz", - "integrity": "sha512-xd1d/XRkwqnsq6FP3zH1Q+Ejqn2ULIJeDZ+FTKpaabVpZREjsJKRJwuokTNgdqOU+fl55KgbvgZ1pRTSWCP2kQ==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/jspdf/-/jspdf-4.2.0.tgz", + "integrity": "sha512-hR/hnRevAXXlrjeqU5oahOE+Ln9ORJUB5brLHHqH67A+RBQZuFr5GkbI9XQI8OUFSEezKegsi45QRpc4bGj75Q==", "license": "MIT", "dependencies": { - "@babel/runtime": "^7.28.4", + "@babel/runtime": "^7.28.6", "fast-png": "^6.2.0", "fflate": "^0.8.1" }, @@ -4441,12 +4444,12 @@ } }, "node_modules/next": { - "version": "15.5.9", - "resolved": "https://registry.npmjs.org/next/-/next-15.5.9.tgz", - "integrity": "sha512-agNLK89seZEtC5zUHwtut0+tNrc0Xw4FT/Dg+B/VLEo9pAcS9rtTKpek3V6kVcVwsB2YlqMaHdfZL4eLEVYuCg==", + "version": "15.5.12", + "resolved": "https://registry.npmjs.org/next/-/next-15.5.12.tgz", + "integrity": "sha512-Fi/wQ4Etlrn60rz78bebG1i1SR20QxvV8tVp6iJspjLUSHcZoeUXCt+vmWoEcza85ElZzExK/jJ/F6SvtGktjA==", "license": "MIT", "dependencies": { - "@next/env": "15.5.9", + "@next/env": "15.5.12", "@swc/helpers": "0.5.15", "caniuse-lite": "^1.0.30001579", "postcss": "8.4.31", @@ -4459,14 +4462,14 @@ "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "15.5.7", - "@next/swc-darwin-x64": "15.5.7", - "@next/swc-linux-arm64-gnu": "15.5.7", - "@next/swc-linux-arm64-musl": "15.5.7", - "@next/swc-linux-x64-gnu": "15.5.7", - "@next/swc-linux-x64-musl": "15.5.7", - "@next/swc-win32-arm64-msvc": "15.5.7", - "@next/swc-win32-x64-msvc": "15.5.7", + "@next/swc-darwin-arm64": "15.5.12", + "@next/swc-darwin-x64": "15.5.12", + "@next/swc-linux-arm64-gnu": "15.5.12", + "@next/swc-linux-arm64-musl": "15.5.12", + "@next/swc-linux-x64-gnu": "15.5.12", + "@next/swc-linux-x64-musl": "15.5.12", + "@next/swc-win32-arm64-msvc": "15.5.12", + "@next/swc-win32-x64-msvc": "15.5.12", "sharp": "^0.34.3" }, "peerDependencies": { @@ -5333,9 +5336,9 @@ } }, "node_modules/rollup": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", - "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", + "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==", "dev": true, "license": "MIT", "dependencies": { @@ -5349,31 +5352,31 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.57.1", - "@rollup/rollup-android-arm64": "4.57.1", - "@rollup/rollup-darwin-arm64": "4.57.1", - "@rollup/rollup-darwin-x64": "4.57.1", - "@rollup/rollup-freebsd-arm64": "4.57.1", - "@rollup/rollup-freebsd-x64": "4.57.1", - "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", - "@rollup/rollup-linux-arm-musleabihf": "4.57.1", - "@rollup/rollup-linux-arm64-gnu": "4.57.1", - "@rollup/rollup-linux-arm64-musl": "4.57.1", - "@rollup/rollup-linux-loong64-gnu": "4.57.1", - "@rollup/rollup-linux-loong64-musl": "4.57.1", - "@rollup/rollup-linux-ppc64-gnu": "4.57.1", - "@rollup/rollup-linux-ppc64-musl": "4.57.1", - "@rollup/rollup-linux-riscv64-gnu": "4.57.1", - "@rollup/rollup-linux-riscv64-musl": "4.57.1", - "@rollup/rollup-linux-s390x-gnu": "4.57.1", - "@rollup/rollup-linux-x64-gnu": "4.57.1", - "@rollup/rollup-linux-x64-musl": "4.57.1", - "@rollup/rollup-openbsd-x64": "4.57.1", - "@rollup/rollup-openharmony-arm64": "4.57.1", - "@rollup/rollup-win32-arm64-msvc": "4.57.1", - "@rollup/rollup-win32-ia32-msvc": "4.57.1", - "@rollup/rollup-win32-x64-gnu": "4.57.1", - "@rollup/rollup-win32-x64-msvc": "4.57.1", + "@rollup/rollup-android-arm-eabi": "4.59.0", + "@rollup/rollup-android-arm64": "4.59.0", + "@rollup/rollup-darwin-arm64": "4.59.0", + "@rollup/rollup-darwin-x64": "4.59.0", + "@rollup/rollup-freebsd-arm64": "4.59.0", + "@rollup/rollup-freebsd-x64": "4.59.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.59.0", + "@rollup/rollup-linux-arm-musleabihf": "4.59.0", + "@rollup/rollup-linux-arm64-gnu": "4.59.0", + "@rollup/rollup-linux-arm64-musl": "4.59.0", + "@rollup/rollup-linux-loong64-gnu": "4.59.0", + "@rollup/rollup-linux-loong64-musl": "4.59.0", + "@rollup/rollup-linux-ppc64-gnu": "4.59.0", + "@rollup/rollup-linux-ppc64-musl": "4.59.0", + "@rollup/rollup-linux-riscv64-gnu": "4.59.0", + "@rollup/rollup-linux-riscv64-musl": "4.59.0", + "@rollup/rollup-linux-s390x-gnu": "4.59.0", + "@rollup/rollup-linux-x64-gnu": "4.59.0", + "@rollup/rollup-linux-x64-musl": "4.59.0", + "@rollup/rollup-openbsd-x64": "4.59.0", + "@rollup/rollup-openharmony-arm64": "4.59.0", + "@rollup/rollup-win32-arm64-msvc": "4.59.0", + "@rollup/rollup-win32-ia32-msvc": "4.59.0", + "@rollup/rollup-win32-x64-gnu": "4.59.0", + "@rollup/rollup-win32-x64-msvc": "4.59.0", "fsevents": "~2.3.2" } }, diff --git a/ai-compliance-sdk/internal/api/handlers/dsb_handlers.go b/ai-compliance-sdk/internal/api/handlers/dsb_handlers.go deleted file mode 100644 index 17053f1..0000000 --- a/ai-compliance-sdk/internal/api/handlers/dsb_handlers.go +++ /dev/null @@ -1,451 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/breakpilot/ai-compliance-sdk/internal/dsb" - "github.com/gin-gonic/gin" - "github.com/google/uuid" -) - -// DSBHandlers handles DSB-as-a-Service portal HTTP requests. -type DSBHandlers struct { - store *dsb.Store -} - -// NewDSBHandlers creates new DSB handlers. -func NewDSBHandlers(store *dsb.Store) *DSBHandlers { - return &DSBHandlers{store: store} -} - -// getDSBUserID extracts and parses the X-User-ID header as UUID. -func getDSBUserID(c *gin.Context) (uuid.UUID, bool) { - userIDStr := c.GetHeader("X-User-ID") - if userIDStr == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "X-User-ID header is required"}) - return uuid.Nil, false - } - userID, err := uuid.Parse(userIDStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid X-User-ID header: must be a valid UUID"}) - return uuid.Nil, false - } - return userID, true -} - -// ============================================================================ -// Dashboard -// ============================================================================ - -// GetDashboard returns the aggregated DSB dashboard. -// GET /sdk/v1/dsb/dashboard -func (h *DSBHandlers) GetDashboard(c *gin.Context) { - dsbUserID, ok := getDSBUserID(c) - if !ok { - return - } - - dashboard, err := h.store.GetDashboard(c.Request.Context(), dsbUserID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, dashboard) -} - -// ============================================================================ -// Assignments -// ============================================================================ - -// CreateAssignment creates a new DSB-to-tenant assignment. -// POST /sdk/v1/dsb/assignments -func (h *DSBHandlers) CreateAssignment(c *gin.Context) { - var req dsb.CreateAssignmentRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - assignment := &dsb.Assignment{ - DSBUserID: req.DSBUserID, - TenantID: req.TenantID, - Status: req.Status, - ContractStart: req.ContractStart, - ContractEnd: req.ContractEnd, - MonthlyHoursBudget: req.MonthlyHoursBudget, - Notes: req.Notes, - } - - if err := h.store.CreateAssignment(c.Request.Context(), assignment); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{"assignment": assignment}) -} - -// ListAssignments returns all assignments for the authenticated DSB user. -// GET /sdk/v1/dsb/assignments -func (h *DSBHandlers) ListAssignments(c *gin.Context) { - dsbUserID, ok := getDSBUserID(c) - if !ok { - return - } - - assignments, err := h.store.ListAssignments(c.Request.Context(), dsbUserID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "assignments": assignments, - "total": len(assignments), - }) -} - -// GetAssignment retrieves a single assignment by ID. -// GET /sdk/v1/dsb/assignments/:id -func (h *DSBHandlers) GetAssignment(c *gin.Context) { - id, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - assignment, err := h.store.GetAssignment(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "assignment not found"}) - return - } - - c.JSON(http.StatusOK, gin.H{"assignment": assignment}) -} - -// UpdateAssignment updates an existing assignment. -// PUT /sdk/v1/dsb/assignments/:id -func (h *DSBHandlers) UpdateAssignment(c *gin.Context) { - id, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - assignment, err := h.store.GetAssignment(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "assignment not found"}) - return - } - - var req dsb.UpdateAssignmentRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - // Apply non-nil fields - if req.Status != nil { - assignment.Status = *req.Status - } - if req.ContractEnd != nil { - assignment.ContractEnd = req.ContractEnd - } - if req.MonthlyHoursBudget != nil { - assignment.MonthlyHoursBudget = *req.MonthlyHoursBudget - } - if req.Notes != nil { - assignment.Notes = *req.Notes - } - - if err := h.store.UpdateAssignment(c.Request.Context(), assignment); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"assignment": assignment}) -} - -// ============================================================================ -// Hours -// ============================================================================ - -// CreateHourEntry creates a new time tracking entry for an assignment. -// POST /sdk/v1/dsb/assignments/:id/hours -func (h *DSBHandlers) CreateHourEntry(c *gin.Context) { - assignmentID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - var req dsb.CreateHourEntryRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - billable := true - if req.Billable != nil { - billable = *req.Billable - } - - entry := &dsb.HourEntry{ - AssignmentID: assignmentID, - Date: req.Date, - Hours: req.Hours, - Category: req.Category, - Description: req.Description, - Billable: billable, - } - - if err := h.store.CreateHourEntry(c.Request.Context(), entry); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{"hour_entry": entry}) -} - -// ListHours returns time entries for an assignment. -// GET /sdk/v1/dsb/assignments/:id/hours?month=YYYY-MM -func (h *DSBHandlers) ListHours(c *gin.Context) { - assignmentID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - month := c.Query("month") - - entries, err := h.store.ListHours(c.Request.Context(), assignmentID, month) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "hours": entries, - "total": len(entries), - }) -} - -// GetHoursSummary returns aggregated hour statistics for an assignment. -// GET /sdk/v1/dsb/assignments/:id/hours/summary?month=YYYY-MM -func (h *DSBHandlers) GetHoursSummary(c *gin.Context) { - assignmentID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - month := c.Query("month") - - summary, err := h.store.GetHoursSummary(c.Request.Context(), assignmentID, month) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, summary) -} - -// ============================================================================ -// Tasks -// ============================================================================ - -// CreateTask creates a new task for an assignment. -// POST /sdk/v1/dsb/assignments/:id/tasks -func (h *DSBHandlers) CreateTask(c *gin.Context) { - assignmentID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - var req dsb.CreateTaskRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - task := &dsb.Task{ - AssignmentID: assignmentID, - Title: req.Title, - Description: req.Description, - Category: req.Category, - Priority: req.Priority, - DueDate: req.DueDate, - } - - if err := h.store.CreateTask(c.Request.Context(), task); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{"task": task}) -} - -// ListTasks returns tasks for an assignment. -// GET /sdk/v1/dsb/assignments/:id/tasks?status=open -func (h *DSBHandlers) ListTasks(c *gin.Context) { - assignmentID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - status := c.Query("status") - - tasks, err := h.store.ListTasks(c.Request.Context(), assignmentID, status) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "tasks": tasks, - "total": len(tasks), - }) -} - -// UpdateTask updates an existing task. -// PUT /sdk/v1/dsb/tasks/:taskId -func (h *DSBHandlers) UpdateTask(c *gin.Context) { - taskID, err := uuid.Parse(c.Param("taskId")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid task ID"}) - return - } - - // We need to fetch the existing task first. Since tasks belong to assignments, - // we query by task ID directly. For now, we do a lightweight approach: bind the - // update request and apply changes via store. - var req dsb.UpdateTaskRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - // Fetch current task by querying all tasks and filtering. Since we don't have - // a GetTask(taskID) method, we build the task from partial data and update. - // The store UpdateTask uses the task ID to locate the row. - task := &dsb.Task{ID: taskID} - - // We need to get the current values to apply partial updates correctly. - // Query the task directly. - row := h.store.Pool().QueryRow(c.Request.Context(), ` - SELECT id, assignment_id, title, description, category, priority, status, due_date, completed_at, created_at, updated_at - FROM dsb_tasks WHERE id = $1 - `, taskID) - - if err := row.Scan( - &task.ID, &task.AssignmentID, &task.Title, &task.Description, - &task.Category, &task.Priority, &task.Status, &task.DueDate, - &task.CompletedAt, &task.CreatedAt, &task.UpdatedAt, - ); err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "task not found"}) - return - } - - // Apply non-nil fields - if req.Title != nil { - task.Title = *req.Title - } - if req.Description != nil { - task.Description = *req.Description - } - if req.Category != nil { - task.Category = *req.Category - } - if req.Priority != nil { - task.Priority = *req.Priority - } - if req.Status != nil { - task.Status = *req.Status - } - if req.DueDate != nil { - task.DueDate = req.DueDate - } - - if err := h.store.UpdateTask(c.Request.Context(), task); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"task": task}) -} - -// CompleteTask marks a task as completed. -// POST /sdk/v1/dsb/tasks/:taskId/complete -func (h *DSBHandlers) CompleteTask(c *gin.Context) { - taskID, err := uuid.Parse(c.Param("taskId")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid task ID"}) - return - } - - if err := h.store.CompleteTask(c.Request.Context(), taskID); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"message": "task completed"}) -} - -// ============================================================================ -// Communications -// ============================================================================ - -// CreateCommunication creates a new communication log entry. -// POST /sdk/v1/dsb/assignments/:id/communications -func (h *DSBHandlers) CreateCommunication(c *gin.Context) { - assignmentID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - var req dsb.CreateCommunicationRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - comm := &dsb.Communication{ - AssignmentID: assignmentID, - Direction: req.Direction, - Channel: req.Channel, - Subject: req.Subject, - Content: req.Content, - Participants: req.Participants, - } - - if err := h.store.CreateCommunication(c.Request.Context(), comm); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{"communication": comm}) -} - -// ListCommunications returns all communications for an assignment. -// GET /sdk/v1/dsb/assignments/:id/communications -func (h *DSBHandlers) ListCommunications(c *gin.Context) { - assignmentID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid assignment ID"}) - return - } - - comms, err := h.store.ListCommunications(c.Request.Context(), assignmentID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "communications": comms, - "total": len(comms), - }) -} diff --git a/ai-compliance-sdk/internal/api/handlers/funding_handlers.go b/ai-compliance-sdk/internal/api/handlers/funding_handlers.go deleted file mode 100644 index 6695303..0000000 --- a/ai-compliance-sdk/internal/api/handlers/funding_handlers.go +++ /dev/null @@ -1,638 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - "os" - "time" - - "github.com/breakpilot/ai-compliance-sdk/internal/funding" - "github.com/breakpilot/ai-compliance-sdk/internal/llm" - "github.com/breakpilot/ai-compliance-sdk/internal/rbac" - "github.com/gin-gonic/gin" - "github.com/google/uuid" - "gopkg.in/yaml.v3" -) - -// FundingHandlers handles funding application API endpoints -type FundingHandlers struct { - store funding.Store - providerRegistry *llm.ProviderRegistry - wizardSchema *WizardSchema - bundeslandProfiles map[string]*BundeslandProfile -} - -// WizardSchema represents the loaded wizard schema -type WizardSchema struct { - Metadata struct { - Version string `yaml:"version"` - Name string `yaml:"name"` - Description string `yaml:"description"` - TotalSteps int `yaml:"total_steps"` - } `yaml:"metadata"` - Steps []WizardStep `yaml:"steps"` - FundingAssistant struct { - Enabled bool `yaml:"enabled"` - Model string `yaml:"model"` - SystemPrompt string `yaml:"system_prompt"` - StepContexts map[int]string `yaml:"step_contexts"` - QuickPrompts []QuickPrompt `yaml:"quick_prompts"` - } `yaml:"funding_assistant"` - Presets map[string]Preset `yaml:"presets"` -} - -// WizardStep represents a step in the wizard -type WizardStep struct { - Number int `yaml:"number" json:"number"` - ID string `yaml:"id" json:"id"` - Title string `yaml:"title" json:"title"` - Subtitle string `yaml:"subtitle" json:"subtitle"` - Description string `yaml:"description" json:"description"` - Icon string `yaml:"icon" json:"icon"` - IsRequired bool `yaml:"is_required" json:"is_required"` - Fields []WizardField `yaml:"fields" json:"fields"` - AssistantContext string `yaml:"assistant_context" json:"assistant_context"` -} - -// WizardField represents a field in the wizard -type WizardField struct { - ID string `yaml:"id" json:"id"` - Type string `yaml:"type" json:"type"` - Label string `yaml:"label" json:"label"` - Placeholder string `yaml:"placeholder,omitempty" json:"placeholder,omitempty"` - Required bool `yaml:"required,omitempty" json:"required,omitempty"` - Options []FieldOption `yaml:"options,omitempty" json:"options,omitempty"` - HelpText string `yaml:"help_text,omitempty" json:"help_text,omitempty"` - MaxLength int `yaml:"max_length,omitempty" json:"max_length,omitempty"` - Min *int `yaml:"min,omitempty" json:"min,omitempty"` - Max *int `yaml:"max,omitempty" json:"max,omitempty"` - Default interface{} `yaml:"default,omitempty" json:"default,omitempty"` - Conditional string `yaml:"conditional,omitempty" json:"conditional,omitempty"` -} - -// FieldOption represents an option for select fields -type FieldOption struct { - Value string `yaml:"value" json:"value"` - Label string `yaml:"label" json:"label"` - Description string `yaml:"description,omitempty" json:"description,omitempty"` -} - -// QuickPrompt represents a quick prompt for the assistant -type QuickPrompt struct { - Label string `yaml:"label" json:"label"` - Prompt string `yaml:"prompt" json:"prompt"` -} - -// Preset represents a BreakPilot preset -type Preset struct { - ID string `yaml:"id" json:"id"` - Name string `yaml:"name" json:"name"` - Description string `yaml:"description" json:"description"` - BudgetItems []funding.BudgetItem `yaml:"budget_items" json:"budget_items"` - AutoFill map[string]interface{} `yaml:"auto_fill" json:"auto_fill"` -} - -// BundeslandProfile represents a federal state profile -type BundeslandProfile struct { - Name string `yaml:"name" json:"name"` - Short string `yaml:"short" json:"short"` - FundingPrograms []string `yaml:"funding_programs" json:"funding_programs"` - DefaultFundingRate float64 `yaml:"default_funding_rate" json:"default_funding_rate"` - RequiresMEP bool `yaml:"requires_mep" json:"requires_mep"` - ContactAuthority ContactAuthority `yaml:"contact_authority" json:"contact_authority"` - SpecialRequirements []string `yaml:"special_requirements" json:"special_requirements"` -} - -// ContactAuthority represents a contact authority -type ContactAuthority struct { - Name string `yaml:"name" json:"name"` - Department string `yaml:"department,omitempty" json:"department,omitempty"` - Website string `yaml:"website" json:"website"` - Email string `yaml:"email,omitempty" json:"email,omitempty"` -} - -// NewFundingHandlers creates new funding handlers -func NewFundingHandlers(store funding.Store, providerRegistry *llm.ProviderRegistry) *FundingHandlers { - h := &FundingHandlers{ - store: store, - providerRegistry: providerRegistry, - } - - // Load wizard schema - if err := h.loadWizardSchema(); err != nil { - fmt.Printf("Warning: Could not load wizard schema: %v\n", err) - } - - // Load bundesland profiles - if err := h.loadBundeslandProfiles(); err != nil { - fmt.Printf("Warning: Could not load bundesland profiles: %v\n", err) - } - - return h -} - -func (h *FundingHandlers) loadWizardSchema() error { - data, err := os.ReadFile("policies/funding/foerderantrag_wizard_v1.yaml") - if err != nil { - return err - } - - h.wizardSchema = &WizardSchema{} - return yaml.Unmarshal(data, h.wizardSchema) -} - -func (h *FundingHandlers) loadBundeslandProfiles() error { - data, err := os.ReadFile("policies/funding/bundesland_profiles.yaml") - if err != nil { - return err - } - - var profiles struct { - Bundeslaender map[string]*BundeslandProfile `yaml:"bundeslaender"` - } - if err := yaml.Unmarshal(data, &profiles); err != nil { - return err - } - - h.bundeslandProfiles = profiles.Bundeslaender - return nil -} - -// ============================================================================ -// Application CRUD -// ============================================================================ - -// CreateApplication creates a new funding application -// POST /sdk/v1/funding/applications -func (h *FundingHandlers) CreateApplication(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - userID := rbac.GetUserID(c) - if tenantID == uuid.Nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"}) - return - } - - var req funding.CreateApplicationRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - app := &funding.FundingApplication{ - TenantID: tenantID, - Title: req.Title, - FundingProgram: req.FundingProgram, - Status: funding.ApplicationStatusDraft, - CurrentStep: 1, - TotalSteps: 8, - WizardData: make(map[string]interface{}), - CreatedBy: userID, - UpdatedBy: userID, - } - - // Initialize school profile with federal state - app.SchoolProfile = &funding.SchoolProfile{ - FederalState: req.FederalState, - } - - // Apply preset if specified - if req.PresetID != "" && h.wizardSchema != nil { - if preset, ok := h.wizardSchema.Presets[req.PresetID]; ok { - app.Budget = &funding.Budget{ - BudgetItems: preset.BudgetItems, - } - app.WizardData["preset_id"] = req.PresetID - app.WizardData["preset_applied"] = true - for k, v := range preset.AutoFill { - app.WizardData[k] = v - } - } - } - - if err := h.store.CreateApplication(c.Request.Context(), app); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Add history entry - h.store.AddHistoryEntry(c.Request.Context(), &funding.ApplicationHistoryEntry{ - ApplicationID: app.ID, - Action: "created", - PerformedBy: userID, - Notes: "Antrag erstellt", - }) - - c.JSON(http.StatusCreated, app) -} - -// GetApplication retrieves a funding application -// GET /sdk/v1/funding/applications/:id -func (h *FundingHandlers) GetApplication(c *gin.Context) { - idStr := c.Param("id") - id, err := uuid.Parse(idStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"}) - return - } - - app, err := h.store.GetApplication(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, app) -} - -// ListApplications returns a list of funding applications -// GET /sdk/v1/funding/applications -func (h *FundingHandlers) ListApplications(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - if tenantID == uuid.Nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"}) - return - } - - filter := funding.ApplicationFilter{ - Page: 1, - PageSize: 20, - } - - // Parse query parameters - if status := c.Query("status"); status != "" { - s := funding.ApplicationStatus(status) - filter.Status = &s - } - if program := c.Query("program"); program != "" { - p := funding.FundingProgram(program) - filter.FundingProgram = &p - } - - result, err := h.store.ListApplications(c.Request.Context(), tenantID, filter) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, result) -} - -// UpdateApplication updates a funding application -// PUT /sdk/v1/funding/applications/:id -func (h *FundingHandlers) UpdateApplication(c *gin.Context) { - userID := rbac.GetUserID(c) - idStr := c.Param("id") - id, err := uuid.Parse(idStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"}) - return - } - - app, err := h.store.GetApplication(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) - return - } - - var req funding.UpdateApplicationRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - if req.Title != nil { - app.Title = *req.Title - } - if req.WizardData != nil { - for k, v := range req.WizardData { - app.WizardData[k] = v - } - } - if req.CurrentStep != nil { - app.CurrentStep = *req.CurrentStep - } - app.UpdatedBy = userID - - if err := h.store.UpdateApplication(c.Request.Context(), app); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, app) -} - -// DeleteApplication deletes a funding application -// DELETE /sdk/v1/funding/applications/:id -func (h *FundingHandlers) DeleteApplication(c *gin.Context) { - idStr := c.Param("id") - id, err := uuid.Parse(idStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"}) - return - } - - if err := h.store.DeleteApplication(c.Request.Context(), id); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"message": "application archived"}) -} - -// ============================================================================ -// Wizard Endpoints -// ============================================================================ - -// GetWizardSchema returns the wizard schema -// GET /sdk/v1/funding/wizard/schema -func (h *FundingHandlers) GetWizardSchema(c *gin.Context) { - if h.wizardSchema == nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "wizard schema not loaded"}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "metadata": h.wizardSchema.Metadata, - "steps": h.wizardSchema.Steps, - "presets": h.wizardSchema.Presets, - "assistant": gin.H{ - "enabled": h.wizardSchema.FundingAssistant.Enabled, - "quick_prompts": h.wizardSchema.FundingAssistant.QuickPrompts, - }, - }) -} - -// SaveWizardStep saves wizard step data -// POST /sdk/v1/funding/applications/:id/wizard -func (h *FundingHandlers) SaveWizardStep(c *gin.Context) { - userID := rbac.GetUserID(c) - idStr := c.Param("id") - id, err := uuid.Parse(idStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"}) - return - } - - var req funding.SaveWizardStepRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - // Save step data - if err := h.store.SaveWizardStep(c.Request.Context(), id, req.Step, req.Data); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Get updated progress - progress, err := h.store.GetWizardProgress(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Add history entry - h.store.AddHistoryEntry(c.Request.Context(), &funding.ApplicationHistoryEntry{ - ApplicationID: id, - Action: "wizard_step_saved", - PerformedBy: userID, - Notes: fmt.Sprintf("Schritt %d gespeichert", req.Step), - }) - - c.JSON(http.StatusOK, progress) -} - -// AskAssistant handles LLM assistant queries -// POST /sdk/v1/funding/wizard/ask -func (h *FundingHandlers) AskAssistant(c *gin.Context) { - var req funding.AssistantRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - if h.wizardSchema == nil || !h.wizardSchema.FundingAssistant.Enabled { - c.JSON(http.StatusServiceUnavailable, gin.H{"error": "assistant not available"}) - return - } - - // Build system prompt with step context - systemPrompt := h.wizardSchema.FundingAssistant.SystemPrompt - if stepContext, ok := h.wizardSchema.FundingAssistant.StepContexts[req.CurrentStep]; ok { - systemPrompt += "\n\nKontext fuer diesen Schritt:\n" + stepContext - } - - // Build messages - messages := []llm.Message{ - {Role: "system", Content: systemPrompt}, - } - for _, msg := range req.History { - messages = append(messages, llm.Message{ - Role: msg.Role, - Content: msg.Content, - }) - } - messages = append(messages, llm.Message{ - Role: "user", - Content: req.Question, - }) - - // Generate response using registry - chatReq := &llm.ChatRequest{ - Messages: messages, - Temperature: 0.3, - MaxTokens: 1000, - } - - response, err := h.providerRegistry.Chat(c.Request.Context(), chatReq) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, funding.AssistantResponse{ - Answer: response.Message.Content, - }) -} - -// ============================================================================ -// Status Endpoints -// ============================================================================ - -// SubmitApplication submits an application for review -// POST /sdk/v1/funding/applications/:id/submit -func (h *FundingHandlers) SubmitApplication(c *gin.Context) { - userID := rbac.GetUserID(c) - idStr := c.Param("id") - id, err := uuid.Parse(idStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"}) - return - } - - app, err := h.store.GetApplication(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) - return - } - - // Validate that all required steps are completed - progress, _ := h.store.GetWizardProgress(c.Request.Context(), id) - if progress == nil || len(progress.CompletedSteps) < app.TotalSteps { - c.JSON(http.StatusBadRequest, gin.H{"error": "not all required steps completed"}) - return - } - - // Update status - app.Status = funding.ApplicationStatusSubmitted - now := time.Now() - app.SubmittedAt = &now - app.UpdatedBy = userID - - if err := h.store.UpdateApplication(c.Request.Context(), app); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Add history entry - h.store.AddHistoryEntry(c.Request.Context(), &funding.ApplicationHistoryEntry{ - ApplicationID: id, - Action: "submitted", - PerformedBy: userID, - Notes: "Antrag eingereicht", - }) - - c.JSON(http.StatusOK, app) -} - -// ============================================================================ -// Export Endpoints -// ============================================================================ - -// ExportApplication exports all documents as ZIP -// GET /sdk/v1/funding/applications/:id/export -func (h *FundingHandlers) ExportApplication(c *gin.Context) { - idStr := c.Param("id") - id, err := uuid.Parse(idStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"}) - return - } - - app, err := h.store.GetApplication(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) - return - } - - // Generate export (this will be implemented in export.go) - // For now, return a placeholder response - c.JSON(http.StatusOK, gin.H{ - "message": "Export generation initiated", - "application_id": app.ID, - "status": "processing", - }) -} - -// PreviewApplication generates a PDF preview -// GET /sdk/v1/funding/applications/:id/preview -func (h *FundingHandlers) PreviewApplication(c *gin.Context) { - idStr := c.Param("id") - id, err := uuid.Parse(idStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"}) - return - } - - app, err := h.store.GetApplication(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) - return - } - - // Generate PDF preview (placeholder) - c.JSON(http.StatusOK, gin.H{ - "message": "Preview generation initiated", - "application_id": app.ID, - }) -} - -// ============================================================================ -// Bundesland Profile Endpoints -// ============================================================================ - -// GetBundeslandProfiles returns all bundesland profiles -// GET /sdk/v1/funding/bundeslaender -func (h *FundingHandlers) GetBundeslandProfiles(c *gin.Context) { - if h.bundeslandProfiles == nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "bundesland profiles not loaded"}) - return - } - - c.JSON(http.StatusOK, h.bundeslandProfiles) -} - -// GetBundeslandProfile returns a specific bundesland profile -// GET /sdk/v1/funding/bundeslaender/:state -func (h *FundingHandlers) GetBundeslandProfile(c *gin.Context) { - state := c.Param("state") - - if h.bundeslandProfiles == nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "bundesland profiles not loaded"}) - return - } - - profile, ok := h.bundeslandProfiles[state] - if !ok { - c.JSON(http.StatusNotFound, gin.H{"error": "bundesland not found"}) - return - } - - c.JSON(http.StatusOK, profile) -} - -// ============================================================================ -// Statistics Endpoint -// ============================================================================ - -// GetStatistics returns funding statistics -// GET /sdk/v1/funding/statistics -func (h *FundingHandlers) GetStatistics(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - if tenantID == uuid.Nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"}) - return - } - - stats, err := h.store.GetStatistics(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, stats) -} - -// ============================================================================ -// History Endpoint -// ============================================================================ - -// GetApplicationHistory returns the audit trail -// GET /sdk/v1/funding/applications/:id/history -func (h *FundingHandlers) GetApplicationHistory(c *gin.Context) { - idStr := c.Param("id") - id, err := uuid.Parse(idStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid application ID"}) - return - } - - history, err := h.store.GetHistory(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, history) -} diff --git a/ai-compliance-sdk/internal/api/handlers/gci_handlers.go b/ai-compliance-sdk/internal/api/handlers/gci_handlers.go deleted file mode 100644 index 538b8d3..0000000 --- a/ai-compliance-sdk/internal/api/handlers/gci_handlers.go +++ /dev/null @@ -1,188 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/breakpilot/ai-compliance-sdk/internal/gci" - "github.com/breakpilot/ai-compliance-sdk/internal/rbac" - "github.com/gin-gonic/gin" -) - -type GCIHandlers struct { - engine *gci.Engine -} - -func NewGCIHandlers(engine *gci.Engine) *GCIHandlers { - return &GCIHandlers{engine: engine} -} - -// GetScore returns the GCI score for the current tenant -// GET /sdk/v1/gci/score -func (h *GCIHandlers) GetScore(c *gin.Context) { - tenantID := rbac.GetTenantID(c).String() - profile := c.DefaultQuery("profile", "default") - - result := h.engine.Calculate(tenantID, profile) - c.JSON(http.StatusOK, result) -} - -// GetScoreBreakdown returns the detailed 4-level GCI breakdown -// GET /sdk/v1/gci/score/breakdown -func (h *GCIHandlers) GetScoreBreakdown(c *gin.Context) { - tenantID := rbac.GetTenantID(c).String() - profile := c.DefaultQuery("profile", "default") - - breakdown := h.engine.CalculateBreakdown(tenantID, profile) - c.JSON(http.StatusOK, breakdown) -} - -// GetHistory returns historical GCI snapshots for trend analysis -// GET /sdk/v1/gci/score/history -func (h *GCIHandlers) GetHistory(c *gin.Context) { - tenantID := rbac.GetTenantID(c).String() - - history := h.engine.GetHistory(tenantID) - c.JSON(http.StatusOK, gin.H{ - "tenant_id": tenantID, - "snapshots": history, - "total": len(history), - }) -} - -// GetMatrix returns the compliance matrix (roles x regulations) -// GET /sdk/v1/gci/matrix -func (h *GCIHandlers) GetMatrix(c *gin.Context) { - tenantID := rbac.GetTenantID(c).String() - - matrix := h.engine.GetMatrix(tenantID) - c.JSON(http.StatusOK, gin.H{ - "tenant_id": tenantID, - "matrix": matrix, - }) -} - -// GetAuditTrail returns the audit trail for the latest GCI calculation -// GET /sdk/v1/gci/audit-trail -func (h *GCIHandlers) GetAuditTrail(c *gin.Context) { - tenantID := rbac.GetTenantID(c).String() - profile := c.DefaultQuery("profile", "default") - - result := h.engine.Calculate(tenantID, profile) - c.JSON(http.StatusOK, gin.H{ - "tenant_id": tenantID, - "gci_score": result.GCIScore, - "audit_trail": result.AuditTrail, - }) -} - -// GetNIS2Score returns the NIS2-specific compliance score -// GET /sdk/v1/gci/nis2/score -func (h *GCIHandlers) GetNIS2Score(c *gin.Context) { - tenantID := rbac.GetTenantID(c).String() - - score := gci.CalculateNIS2Score(tenantID) - c.JSON(http.StatusOK, score) -} - -// ListNIS2Roles returns available NIS2 responsibility roles -// GET /sdk/v1/gci/nis2/roles -func (h *GCIHandlers) ListNIS2Roles(c *gin.Context) { - roles := gci.ListNIS2Roles() - c.JSON(http.StatusOK, gin.H{ - "roles": roles, - "total": len(roles), - }) -} - -// AssignNIS2Role assigns a NIS2 role to a user (stub - returns mock) -// POST /sdk/v1/gci/nis2/roles/assign -func (h *GCIHandlers) AssignNIS2Role(c *gin.Context) { - var req struct { - RoleID string `json:"role_id" binding:"required"` - UserID string `json:"user_id" binding:"required"` - } - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - role, found := gci.GetNIS2Role(req.RoleID) - if !found { - c.JSON(http.StatusNotFound, gin.H{"error": "NIS2 role not found"}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "status": "assigned", - "role": role, - "user_id": req.UserID, - }) -} - -// GetISOGapAnalysis returns the ISO 27001 gap analysis -// GET /sdk/v1/gci/iso/gap-analysis -func (h *GCIHandlers) GetISOGapAnalysis(c *gin.Context) { - tenantID := rbac.GetTenantID(c).String() - - analysis := gci.CalculateISOGapAnalysis(tenantID) - c.JSON(http.StatusOK, analysis) -} - -// ListISOMappings returns all ISO 27001 control mappings -// GET /sdk/v1/gci/iso/mappings -func (h *GCIHandlers) ListISOMappings(c *gin.Context) { - category := c.Query("category") - - if category != "" { - controls := gci.GetISOControlsByCategory(category) - c.JSON(http.StatusOK, gin.H{ - "controls": controls, - "total": len(controls), - "category": category, - }) - return - } - - categories := []string{"A.5", "A.6", "A.7", "A.8"} - result := make(map[string][]gci.ISOControl) - total := 0 - for _, cat := range categories { - controls := gci.GetISOControlsByCategory(cat) - if len(controls) > 0 { - result[cat] = controls - total += len(controls) - } - } - - c.JSON(http.StatusOK, gin.H{ - "categories": result, - "total": total, - }) -} - -// GetISOMapping returns a single ISO control by ID -// GET /sdk/v1/gci/iso/mappings/:controlId -func (h *GCIHandlers) GetISOMapping(c *gin.Context) { - controlID := c.Param("controlId") - - control, found := gci.GetISOControlByID(controlID) - if !found { - c.JSON(http.StatusNotFound, gin.H{"error": "ISO control not found"}) - return - } - - c.JSON(http.StatusOK, control) -} - -// GetWeightProfiles returns available weighting profiles -// GET /sdk/v1/gci/profiles -func (h *GCIHandlers) GetWeightProfiles(c *gin.Context) { - profiles := []string{"default", "nis2_relevant", "ki_nutzer"} - result := make([]gci.WeightProfile, 0, len(profiles)) - for _, id := range profiles { - result = append(result, gci.GetProfile(id)) - } - c.JSON(http.StatusOK, gin.H{ - "profiles": result, - }) -} diff --git a/ai-compliance-sdk/internal/api/handlers/industry_handlers.go b/ai-compliance-sdk/internal/api/handlers/industry_handlers.go deleted file mode 100644 index 3d8e61e..0000000 --- a/ai-compliance-sdk/internal/api/handlers/industry_handlers.go +++ /dev/null @@ -1,115 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/breakpilot/ai-compliance-sdk/internal/industry" - "github.com/gin-gonic/gin" -) - -// IndustryHandlers handles industry-specific compliance template requests. -// All data is static (embedded Go structs), so no store/database is needed. -type IndustryHandlers struct{} - -// NewIndustryHandlers creates new industry handlers -func NewIndustryHandlers() *IndustryHandlers { - return &IndustryHandlers{} -} - -// ============================================================================ -// Industry Template Endpoints -// ============================================================================ - -// ListIndustries returns a summary list of all available industry templates. -// GET /sdk/v1/industries -func (h *IndustryHandlers) ListIndustries(c *gin.Context) { - templates := industry.GetAllTemplates() - - summaries := make([]industry.IndustrySummary, 0, len(templates)) - for _, t := range templates { - summaries = append(summaries, industry.IndustrySummary{ - Slug: t.Slug, - Name: t.Name, - Description: t.Description, - Icon: t.Icon, - RegulationCount: len(t.Regulations), - TemplateCount: len(t.VVTTemplates), - }) - } - - c.JSON(http.StatusOK, industry.IndustryListResponse{ - Industries: summaries, - Total: len(summaries), - }) -} - -// GetIndustry returns the full industry template for a given slug. -// GET /sdk/v1/industries/:slug -func (h *IndustryHandlers) GetIndustry(c *gin.Context) { - slug := c.Param("slug") - - tmpl := industry.GetTemplateBySlug(slug) - if tmpl == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "industry template not found", "slug": slug}) - return - } - - c.JSON(http.StatusOK, tmpl) -} - -// GetVVTTemplates returns only the VVT templates for a given industry. -// GET /sdk/v1/industries/:slug/vvt-templates -func (h *IndustryHandlers) GetVVTTemplates(c *gin.Context) { - slug := c.Param("slug") - - tmpl := industry.GetTemplateBySlug(slug) - if tmpl == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "industry template not found", "slug": slug}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "slug": tmpl.Slug, - "industry": tmpl.Name, - "vvt_templates": tmpl.VVTTemplates, - "total": len(tmpl.VVTTemplates), - }) -} - -// GetTOMRecommendations returns only the TOM recommendations for a given industry. -// GET /sdk/v1/industries/:slug/tom-recommendations -func (h *IndustryHandlers) GetTOMRecommendations(c *gin.Context) { - slug := c.Param("slug") - - tmpl := industry.GetTemplateBySlug(slug) - if tmpl == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "industry template not found", "slug": slug}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "slug": tmpl.Slug, - "industry": tmpl.Name, - "tom_recommendations": tmpl.TOMRecommendations, - "total": len(tmpl.TOMRecommendations), - }) -} - -// GetRiskScenarios returns only the risk scenarios for a given industry. -// GET /sdk/v1/industries/:slug/risk-scenarios -func (h *IndustryHandlers) GetRiskScenarios(c *gin.Context) { - slug := c.Param("slug") - - tmpl := industry.GetTemplateBySlug(slug) - if tmpl == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "industry template not found", "slug": slug}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "slug": tmpl.Slug, - "industry": tmpl.Name, - "risk_scenarios": tmpl.RiskScenarios, - "total": len(tmpl.RiskScenarios), - }) -} diff --git a/ai-compliance-sdk/internal/api/handlers/multitenant_handlers.go b/ai-compliance-sdk/internal/api/handlers/multitenant_handlers.go deleted file mode 100644 index 377cd11..0000000 --- a/ai-compliance-sdk/internal/api/handlers/multitenant_handlers.go +++ /dev/null @@ -1,268 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/breakpilot/ai-compliance-sdk/internal/multitenant" - "github.com/breakpilot/ai-compliance-sdk/internal/rbac" - "github.com/gin-gonic/gin" - "github.com/google/uuid" -) - -// MultiTenantHandlers handles multi-tenant administration endpoints. -type MultiTenantHandlers struct { - store *multitenant.Store - rbacStore *rbac.Store -} - -// NewMultiTenantHandlers creates new multi-tenant handlers. -func NewMultiTenantHandlers(store *multitenant.Store, rbacStore *rbac.Store) *MultiTenantHandlers { - return &MultiTenantHandlers{ - store: store, - rbacStore: rbacStore, - } -} - -// GetOverview returns all tenants with compliance scores and module highlights. -// GET /sdk/v1/multi-tenant/overview -func (h *MultiTenantHandlers) GetOverview(c *gin.Context) { - overview, err := h.store.GetOverview(c.Request.Context()) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, overview) -} - -// GetTenantDetail returns detailed compliance info for one tenant. -// GET /sdk/v1/multi-tenant/tenants/:id -func (h *MultiTenantHandlers) GetTenantDetail(c *gin.Context) { - id, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"}) - return - } - - detail, err := h.store.GetTenantDetail(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "tenant not found"}) - return - } - - c.JSON(http.StatusOK, detail) -} - -// CreateTenant creates a new tenant with default setup. -// It creates the tenant via the RBAC store and then creates a default "main" namespace. -// POST /sdk/v1/multi-tenant/tenants -func (h *MultiTenantHandlers) CreateTenant(c *gin.Context) { - var req multitenant.CreateTenantRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - // Build the tenant from the request - tenant := &rbac.Tenant{ - Name: req.Name, - Slug: req.Slug, - MaxUsers: req.MaxUsers, - LLMQuotaMonthly: req.LLMQuotaMonthly, - } - - // Create tenant via RBAC store (assigns ID, timestamps, defaults) - if err := h.rbacStore.CreateTenant(c.Request.Context(), tenant); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Create default "main" namespace for the new tenant - defaultNamespace := &rbac.Namespace{ - TenantID: tenant.ID, - Name: "Main", - Slug: "main", - } - if err := h.rbacStore.CreateNamespace(c.Request.Context(), defaultNamespace); err != nil { - // Tenant was created successfully but namespace creation failed. - // Log and continue -- the tenant is still usable. - c.JSON(http.StatusCreated, gin.H{ - "tenant": tenant, - "warning": "tenant created but default namespace creation failed: " + err.Error(), - }) - return - } - - c.JSON(http.StatusCreated, gin.H{ - "tenant": tenant, - "namespace": defaultNamespace, - }) -} - -// UpdateTenant performs a partial update of tenant settings. -// Only non-nil fields in the request body are applied. -// PUT /sdk/v1/multi-tenant/tenants/:id -func (h *MultiTenantHandlers) UpdateTenant(c *gin.Context) { - id, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"}) - return - } - - var req multitenant.UpdateTenantRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - // Fetch the existing tenant so we can apply partial updates - tenant, err := h.rbacStore.GetTenant(c.Request.Context(), id) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "tenant not found"}) - return - } - - // Apply only the fields that were provided - if req.Name != nil { - tenant.Name = *req.Name - } - if req.MaxUsers != nil { - tenant.MaxUsers = *req.MaxUsers - } - if req.LLMQuotaMonthly != nil { - tenant.LLMQuotaMonthly = *req.LLMQuotaMonthly - } - if req.Status != nil { - tenant.Status = rbac.TenantStatus(*req.Status) - } - - if err := h.rbacStore.UpdateTenant(c.Request.Context(), tenant); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, tenant) -} - -// ListNamespaces returns all namespaces for a specific tenant. -// GET /sdk/v1/multi-tenant/tenants/:id/namespaces -func (h *MultiTenantHandlers) ListNamespaces(c *gin.Context) { - tenantID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"}) - return - } - - namespaces, err := h.rbacStore.ListNamespaces(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "namespaces": namespaces, - "total": len(namespaces), - }) -} - -// CreateNamespace creates a new namespace within a tenant. -// POST /sdk/v1/multi-tenant/tenants/:id/namespaces -func (h *MultiTenantHandlers) CreateNamespace(c *gin.Context) { - tenantID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"}) - return - } - - // Verify the tenant exists - _, err = h.rbacStore.GetTenant(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "tenant not found"}) - return - } - - var req multitenant.CreateNamespaceRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - namespace := &rbac.Namespace{ - TenantID: tenantID, - Name: req.Name, - Slug: req.Slug, - } - - // Apply optional fields if provided - if req.IsolationLevel != "" { - namespace.IsolationLevel = rbac.IsolationLevel(req.IsolationLevel) - } - if req.DataClassification != "" { - namespace.DataClassification = rbac.DataClassification(req.DataClassification) - } - - if err := h.rbacStore.CreateNamespace(c.Request.Context(), namespace); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusCreated, namespace) -} - -// SwitchTenant returns the tenant info needed for the frontend to switch context. -// The caller provides a tenant_id and receives back the tenant details needed -// to update the frontend's active tenant state. -// POST /sdk/v1/multi-tenant/switch -func (h *MultiTenantHandlers) SwitchTenant(c *gin.Context) { - var req multitenant.SwitchTenantRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - tenantID, err := uuid.Parse(req.TenantID) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant ID"}) - return - } - - tenant, err := h.rbacStore.GetTenant(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "tenant not found"}) - return - } - - // Verify the tenant is active - if tenant.Status != rbac.TenantStatusActive { - c.JSON(http.StatusForbidden, gin.H{ - "error": "tenant not active", - "status": string(tenant.Status), - }) - return - } - - // Get namespaces for the tenant so the frontend can populate namespace selectors - namespaces, err := h.rbacStore.ListNamespaces(c.Request.Context(), tenantID) - if err != nil { - // Non-fatal: return tenant info without namespaces - c.JSON(http.StatusOK, gin.H{ - "tenant": multitenant.SwitchTenantResponse{ - TenantID: tenant.ID, - TenantName: tenant.Name, - TenantSlug: tenant.Slug, - Status: string(tenant.Status), - }, - }) - return - } - - c.JSON(http.StatusOK, gin.H{ - "tenant": multitenant.SwitchTenantResponse{ - TenantID: tenant.ID, - TenantName: tenant.Name, - TenantSlug: tenant.Slug, - Status: string(tenant.Status), - }, - "namespaces": namespaces, - }) -} diff --git a/ai-compliance-sdk/internal/api/handlers/reporting_handlers.go b/ai-compliance-sdk/internal/api/handlers/reporting_handlers.go deleted file mode 100644 index 723ad5a..0000000 --- a/ai-compliance-sdk/internal/api/handlers/reporting_handlers.go +++ /dev/null @@ -1,97 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/breakpilot/ai-compliance-sdk/internal/rbac" - "github.com/breakpilot/ai-compliance-sdk/internal/reporting" - "github.com/gin-gonic/gin" - "github.com/google/uuid" -) - -type ReportingHandlers struct { - store *reporting.Store -} - -func NewReportingHandlers(store *reporting.Store) *ReportingHandlers { - return &ReportingHandlers{store: store} -} - -// GetExecutiveReport generates a comprehensive compliance report -// GET /sdk/v1/reporting/executive -func (h *ReportingHandlers) GetExecutiveReport(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - if tenantID == uuid.Nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"}) - return - } - - report, err := h.store.GenerateReport(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, report) -} - -// GetComplianceScore returns just the overall compliance score -// GET /sdk/v1/reporting/score -func (h *ReportingHandlers) GetComplianceScore(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - if tenantID == uuid.Nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"}) - return - } - - report, err := h.store.GenerateReport(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "compliance_score": report.ComplianceScore, - "risk_level": report.RiskOverview.OverallLevel, - "generated_at": report.GeneratedAt, - }) -} - -// GetUpcomingDeadlines returns deadlines across all modules -// GET /sdk/v1/reporting/deadlines -func (h *ReportingHandlers) GetUpcomingDeadlines(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - if tenantID == uuid.Nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"}) - return - } - - report, err := h.store.GenerateReport(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "deadlines": report.UpcomingDeadlines, - "total": len(report.UpcomingDeadlines), - }) -} - -// GetRiskOverview returns the aggregated risk assessment -// GET /sdk/v1/reporting/risks -func (h *ReportingHandlers) GetRiskOverview(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - if tenantID == uuid.Nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "tenant ID required"}) - return - } - - report, err := h.store.GenerateReport(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, report.RiskOverview) -} diff --git a/ai-compliance-sdk/internal/api/handlers/sso_handlers.go b/ai-compliance-sdk/internal/api/handlers/sso_handlers.go deleted file mode 100644 index fc39483..0000000 --- a/ai-compliance-sdk/internal/api/handlers/sso_handlers.go +++ /dev/null @@ -1,631 +0,0 @@ -package handlers - -import ( - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "github.com/breakpilot/ai-compliance-sdk/internal/rbac" - "github.com/breakpilot/ai-compliance-sdk/internal/sso" - "github.com/gin-gonic/gin" - "github.com/golang-jwt/jwt/v5" - "github.com/google/uuid" -) - -// SSOHandlers handles SSO-related HTTP requests. -type SSOHandlers struct { - store *sso.Store - jwtSecret string -} - -// NewSSOHandlers creates new SSO handlers. -func NewSSOHandlers(store *sso.Store, jwtSecret string) *SSOHandlers { - return &SSOHandlers{store: store, jwtSecret: jwtSecret} -} - -// ============================================================================ -// SSO Configuration CRUD -// ============================================================================ - -// CreateConfig creates a new SSO configuration for the tenant. -// POST /sdk/v1/sso/configs -func (h *SSOHandlers) CreateConfig(c *gin.Context) { - var req sso.CreateSSOConfigRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - tenantID := rbac.GetTenantID(c) - - cfg, err := h.store.CreateConfig(c.Request.Context(), tenantID, &req) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{"config": cfg}) -} - -// ListConfigs lists all SSO configurations for the tenant. -// GET /sdk/v1/sso/configs -func (h *SSOHandlers) ListConfigs(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - - configs, err := h.store.ListConfigs(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "configs": configs, - "total": len(configs), - }) -} - -// GetConfig retrieves an SSO configuration by ID. -// GET /sdk/v1/sso/configs/:id -func (h *SSOHandlers) GetConfig(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - - configID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid config ID"}) - return - } - - cfg, err := h.store.GetConfig(c.Request.Context(), tenantID, configID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - if cfg == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "sso configuration not found"}) - return - } - - c.JSON(http.StatusOK, gin.H{"config": cfg}) -} - -// UpdateConfig updates an SSO configuration. -// PUT /sdk/v1/sso/configs/:id -func (h *SSOHandlers) UpdateConfig(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - - configID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid config ID"}) - return - } - - var req sso.UpdateSSOConfigRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - cfg, err := h.store.UpdateConfig(c.Request.Context(), tenantID, configID, &req) - if err != nil { - if err.Error() == "sso configuration not found" { - c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) - return - } - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"config": cfg}) -} - -// DeleteConfig deletes an SSO configuration. -// DELETE /sdk/v1/sso/configs/:id -func (h *SSOHandlers) DeleteConfig(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - - configID, err := uuid.Parse(c.Param("id")) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid config ID"}) - return - } - - if err := h.store.DeleteConfig(c.Request.Context(), tenantID, configID); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"message": "sso configuration deleted"}) -} - -// ============================================================================ -// SSO Users -// ============================================================================ - -// ListUsers lists all SSO-provisioned users for the tenant. -// GET /sdk/v1/sso/users -func (h *SSOHandlers) ListUsers(c *gin.Context) { - tenantID := rbac.GetTenantID(c) - - users, err := h.store.ListUsers(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "users": users, - "total": len(users), - }) -} - -// ============================================================================ -// OIDC Flow -// ============================================================================ - -// InitiateOIDCLogin initiates the OIDC authorization code flow. -// It looks up the enabled SSO config for the tenant, builds the authorization -// URL, sets a state cookie, and redirects the user to the IdP. -// GET /sdk/v1/sso/oidc/login -func (h *SSOHandlers) InitiateOIDCLogin(c *gin.Context) { - // Resolve tenant ID from query param - tenantIDStr := c.Query("tenant_id") - if tenantIDStr == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "tenant_id query parameter is required"}) - return - } - - tenantID, err := uuid.Parse(tenantIDStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant_id"}) - return - } - - // Look up the enabled SSO config - cfg, err := h.store.GetEnabledConfig(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - if cfg == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "no enabled SSO configuration found for this tenant"}) - return - } - if cfg.ProviderType != sso.ProviderTypeOIDC { - c.JSON(http.StatusBadRequest, gin.H{"error": "SSO configuration is not OIDC"}) - return - } - - // Discover the authorization endpoint - discoveryURL := strings.TrimSuffix(cfg.OIDCIssuerURL, "/") + "/.well-known/openid-configuration" - authEndpoint, _, _, err := discoverOIDCEndpoints(discoveryURL) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("OIDC discovery failed: %v", err)}) - return - } - - // Generate state parameter (random bytes + tenant_id for correlation) - stateBytes := make([]byte, 32) - if _, err := rand.Read(stateBytes); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate state"}) - return - } - state := base64.URLEncoding.EncodeToString(stateBytes) + "." + tenantID.String() - - // Generate nonce - nonceBytes := make([]byte, 16) - if _, err := rand.Read(nonceBytes); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate nonce"}) - return - } - nonce := base64.URLEncoding.EncodeToString(nonceBytes) - - // Build authorization URL - scopes := cfg.OIDCScopes - if len(scopes) == 0 { - scopes = []string{"openid", "profile", "email"} - } - - params := url.Values{ - "client_id": {cfg.OIDCClientID}, - "redirect_uri": {cfg.OIDCRedirectURI}, - "response_type": {"code"}, - "scope": {strings.Join(scopes, " ")}, - "state": {state}, - "nonce": {nonce}, - } - - authURL := authEndpoint + "?" + params.Encode() - - // Set state cookie for CSRF protection (HttpOnly, 10 min expiry) - c.SetCookie("sso_state", state, 600, "/", "", true, true) - c.SetCookie("sso_nonce", nonce, 600, "/", "", true, true) - - c.Redirect(http.StatusFound, authURL) -} - -// HandleOIDCCallback handles the OIDC authorization code callback from the IdP. -// It validates the state, exchanges the code for tokens, extracts user info, -// performs JIT user provisioning, and issues a JWT. -// GET /sdk/v1/sso/oidc/callback -func (h *SSOHandlers) HandleOIDCCallback(c *gin.Context) { - // Check for errors from the IdP - if errParam := c.Query("error"); errParam != "" { - errDesc := c.Query("error_description") - c.JSON(http.StatusBadRequest, gin.H{ - "error": errParam, - "description": errDesc, - }) - return - } - - code := c.Query("code") - stateParam := c.Query("state") - if code == "" || stateParam == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "missing code or state parameter"}) - return - } - - // Validate state cookie - stateCookie, err := c.Cookie("sso_state") - if err != nil || stateCookie != stateParam { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid state parameter (CSRF check failed)"}) - return - } - - // Extract tenant ID from state - parts := strings.SplitN(stateParam, ".", 2) - if len(parts) != 2 { - c.JSON(http.StatusBadRequest, gin.H{"error": "malformed state parameter"}) - return - } - tenantID, err := uuid.Parse(parts[1]) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid tenant_id in state"}) - return - } - - // Look up the enabled SSO config - cfg, err := h.store.GetEnabledConfig(c.Request.Context(), tenantID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - if cfg == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "no enabled SSO configuration found"}) - return - } - - // Discover OIDC endpoints - discoveryURL := strings.TrimSuffix(cfg.OIDCIssuerURL, "/") + "/.well-known/openid-configuration" - _, tokenEndpoint, userInfoEndpoint, err := discoverOIDCEndpoints(discoveryURL) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("OIDC discovery failed: %v", err)}) - return - } - - // Exchange authorization code for tokens - tokenResp, err := exchangeCodeForTokens(tokenEndpoint, code, cfg.OIDCClientID, cfg.OIDCClientSecret, cfg.OIDCRedirectURI) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("token exchange failed: %v", err)}) - return - } - - // Extract user claims from ID token or UserInfo endpoint - claims, err := extractUserClaims(tokenResp, userInfoEndpoint) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to extract user claims: %v", err)}) - return - } - - sub := getStringClaim(claims, "sub") - email := getStringClaim(claims, "email") - name := getStringClaim(claims, "name") - groups := getStringSliceClaim(claims, "groups") - - if sub == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "ID token missing 'sub' claim"}) - return - } - if email == "" { - email = sub - } - if name == "" { - name = email - } - - // JIT provision the user - user, err := h.store.UpsertUser(c.Request.Context(), tenantID, cfg.ID, sub, email, name, groups) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("user provisioning failed: %v", err)}) - return - } - - // Determine roles from role mapping - roles := resolveRoles(cfg, groups) - - // Generate JWT - ssoClaims := sso.SSOClaims{ - UserID: user.ID, - TenantID: tenantID, - Email: user.Email, - DisplayName: user.DisplayName, - Roles: roles, - SSOConfigID: cfg.ID, - } - - jwtToken, err := h.generateJWT(ssoClaims) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("JWT generation failed: %v", err)}) - return - } - - // Clear state cookies - c.SetCookie("sso_state", "", -1, "/", "", true, true) - c.SetCookie("sso_nonce", "", -1, "/", "", true, true) - - // Return JWT as JSON (the frontend can also handle redirect) - c.JSON(http.StatusOK, gin.H{ - "token": jwtToken, - "user": user, - "roles": roles, - }) -} - -// ============================================================================ -// JWT Generation -// ============================================================================ - -// generateJWT creates a signed JWT token containing the SSO claims. -func (h *SSOHandlers) generateJWT(claims sso.SSOClaims) (string, error) { - now := time.Now().UTC() - expiry := now.Add(24 * time.Hour) - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "user_id": claims.UserID.String(), - "tenant_id": claims.TenantID.String(), - "email": claims.Email, - "display_name": claims.DisplayName, - "roles": claims.Roles, - "sso_config_id": claims.SSOConfigID.String(), - "iss": "ai-compliance-sdk", - "iat": now.Unix(), - "exp": expiry.Unix(), - }) - - tokenString, err := token.SignedString([]byte(h.jwtSecret)) - if err != nil { - return "", fmt.Errorf("failed to sign JWT: %w", err) - } - - return tokenString, nil -} - -// ============================================================================ -// OIDC Discovery & Token Exchange (manual HTTP, no external OIDC library) -// ============================================================================ - -// oidcDiscoveryResponse holds the relevant fields from the OIDC discovery document. -type oidcDiscoveryResponse struct { - AuthorizationEndpoint string `json:"authorization_endpoint"` - TokenEndpoint string `json:"token_endpoint"` - UserinfoEndpoint string `json:"userinfo_endpoint"` - JwksURI string `json:"jwks_uri"` - Issuer string `json:"issuer"` -} - -// discoverOIDCEndpoints fetches the OIDC discovery document and returns -// the authorization, token, and userinfo endpoints. -func discoverOIDCEndpoints(discoveryURL string) (authEndpoint, tokenEndpoint, userInfoEndpoint string, err error) { - client := &http.Client{Timeout: 10 * time.Second} - - resp, err := client.Get(discoveryURL) - if err != nil { - return "", "", "", fmt.Errorf("failed to fetch discovery document: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return "", "", "", fmt.Errorf("discovery endpoint returned %d: %s", resp.StatusCode, string(body)) - } - - var discovery oidcDiscoveryResponse - if err := json.NewDecoder(resp.Body).Decode(&discovery); err != nil { - return "", "", "", fmt.Errorf("failed to decode discovery document: %w", err) - } - - if discovery.AuthorizationEndpoint == "" { - return "", "", "", fmt.Errorf("discovery document missing authorization_endpoint") - } - if discovery.TokenEndpoint == "" { - return "", "", "", fmt.Errorf("discovery document missing token_endpoint") - } - - return discovery.AuthorizationEndpoint, discovery.TokenEndpoint, discovery.UserinfoEndpoint, nil -} - -// oidcTokenResponse holds the response from the OIDC token endpoint. -type oidcTokenResponse struct { - AccessToken string `json:"access_token"` - IDToken string `json:"id_token"` - TokenType string `json:"token_type"` - ExpiresIn int `json:"expires_in"` - RefreshToken string `json:"refresh_token,omitempty"` -} - -// exchangeCodeForTokens exchanges an authorization code for tokens at the token endpoint. -func exchangeCodeForTokens(tokenEndpoint, code, clientID, clientSecret, redirectURI string) (*oidcTokenResponse, error) { - client := &http.Client{Timeout: 10 * time.Second} - - data := url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - "client_id": {clientID}, - "redirect_uri": {redirectURI}, - } - - req, err := http.NewRequest("POST", tokenEndpoint, strings.NewReader(data.Encode())) - if err != nil { - return nil, fmt.Errorf("failed to create token request: %w", err) - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - // Use client_secret_basic if provided - if clientSecret != "" { - req.SetBasicAuth(clientID, clientSecret) - } - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("token request failed: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("token endpoint returned %d: %s", resp.StatusCode, string(body)) - } - - var tokenResp oidcTokenResponse - if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil { - return nil, fmt.Errorf("failed to decode token response: %w", err) - } - - return &tokenResp, nil -} - -// extractUserClaims extracts user claims from the ID token payload. -// If the ID token is unavailable or incomplete, it falls back to the UserInfo endpoint. -func extractUserClaims(tokenResp *oidcTokenResponse, userInfoEndpoint string) (map[string]interface{}, error) { - claims := make(map[string]interface{}) - - // Try to decode ID token payload (without signature verification for claims extraction; - // in production, you should verify the signature using the JWKS endpoint) - if tokenResp.IDToken != "" { - parts := strings.Split(tokenResp.IDToken, ".") - if len(parts) == 3 { - payload, err := base64.RawURLEncoding.DecodeString(parts[1]) - if err == nil { - if err := json.Unmarshal(payload, &claims); err == nil && claims["sub"] != nil { - return claims, nil - } - } - } - } - - // Fallback to UserInfo endpoint - if userInfoEndpoint != "" && tokenResp.AccessToken != "" { - userClaims, err := fetchUserInfo(userInfoEndpoint, tokenResp.AccessToken) - if err == nil && userClaims["sub"] != nil { - return userClaims, nil - } - } - - if claims["sub"] != nil { - return claims, nil - } - - return nil, fmt.Errorf("could not extract user claims from ID token or UserInfo endpoint") -} - -// fetchUserInfo calls the OIDC UserInfo endpoint with the access token. -func fetchUserInfo(userInfoEndpoint, accessToken string) (map[string]interface{}, error) { - client := &http.Client{Timeout: 10 * time.Second} - - req, err := http.NewRequest("GET", userInfoEndpoint, nil) - if err != nil { - return nil, err - } - req.Header.Set("Authorization", "Bearer "+accessToken) - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("userinfo endpoint returned %d", resp.StatusCode) - } - - var claims map[string]interface{} - if err := json.NewDecoder(resp.Body).Decode(&claims); err != nil { - return nil, err - } - - return claims, nil -} - -// ============================================================================ -// Claim Extraction Helpers -// ============================================================================ - -// getStringClaim extracts a string claim from a claims map. -func getStringClaim(claims map[string]interface{}, key string) string { - if v, ok := claims[key]; ok { - if s, ok := v.(string); ok { - return s - } - } - return "" -} - -// getStringSliceClaim extracts a string slice claim from a claims map. -func getStringSliceClaim(claims map[string]interface{}, key string) []string { - v, ok := claims[key] - if !ok { - return nil - } - - switch val := v.(type) { - case []interface{}: - result := make([]string, 0, len(val)) - for _, item := range val { - if s, ok := item.(string); ok { - result = append(result, s) - } - } - return result - case []string: - return val - default: - return nil - } -} - -// resolveRoles maps SSO groups to internal roles using the config's role mapping. -// If no groups match, the default role is returned. -func resolveRoles(cfg *sso.SSOConfig, groups []string) []string { - if cfg.RoleMapping == nil || len(cfg.RoleMapping) == 0 { - if cfg.DefaultRoleID != nil { - return []string{cfg.DefaultRoleID.String()} - } - return []string{"compliance_user"} - } - - roleSet := make(map[string]bool) - for _, group := range groups { - if role, ok := cfg.RoleMapping[group]; ok { - roleSet[role] = true - } - } - - if len(roleSet) == 0 { - if cfg.DefaultRoleID != nil { - return []string{cfg.DefaultRoleID.String()} - } - return []string{"compliance_user"} - } - - roles := make([]string, 0, len(roleSet)) - for role := range roleSet { - roles = append(roles, role) - } - return roles -} diff --git a/ai-compliance-sdk/internal/dsb/models.go b/ai-compliance-sdk/internal/dsb/models.go deleted file mode 100644 index 21a8c3d..0000000 --- a/ai-compliance-sdk/internal/dsb/models.go +++ /dev/null @@ -1,164 +0,0 @@ -package dsb - -import ( - "time" - - "github.com/google/uuid" -) - -// ============================================================================ -// Core Models -// ============================================================================ - -// Assignment represents a DSB-to-tenant assignment. -type Assignment struct { - ID uuid.UUID `json:"id"` - DSBUserID uuid.UUID `json:"dsb_user_id"` - TenantID uuid.UUID `json:"tenant_id"` - TenantName string `json:"tenant_name"` // populated via JOIN - TenantSlug string `json:"tenant_slug"` // populated via JOIN - Status string `json:"status"` // active, paused, terminated - ContractStart time.Time `json:"contract_start"` - ContractEnd *time.Time `json:"contract_end,omitempty"` - MonthlyHoursBudget float64 `json:"monthly_hours_budget"` - Notes string `json:"notes"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -// HourEntry represents a DSB time tracking entry. -type HourEntry struct { - ID uuid.UUID `json:"id"` - AssignmentID uuid.UUID `json:"assignment_id"` - Date time.Time `json:"date"` - Hours float64 `json:"hours"` - Category string `json:"category"` // dsfa_review, consultation, audit, training, incident_response, documentation, meeting, other - Description string `json:"description"` - Billable bool `json:"billable"` - CreatedAt time.Time `json:"created_at"` -} - -// Task represents a DSB task/work item. -type Task struct { - ID uuid.UUID `json:"id"` - AssignmentID uuid.UUID `json:"assignment_id"` - Title string `json:"title"` - Description string `json:"description"` - Category string `json:"category"` // dsfa_review, dsr_response, incident_review, audit_preparation, policy_review, training, consultation, other - Priority string `json:"priority"` // low, medium, high, urgent - Status string `json:"status"` // open, in_progress, waiting, completed, cancelled - DueDate *time.Time `json:"due_date,omitempty"` - CompletedAt *time.Time `json:"completed_at,omitempty"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -// Communication represents a DSB communication log entry. -type Communication struct { - ID uuid.UUID `json:"id"` - AssignmentID uuid.UUID `json:"assignment_id"` - Direction string `json:"direction"` // inbound, outbound - Channel string `json:"channel"` // email, phone, meeting, portal, letter - Subject string `json:"subject"` - Content string `json:"content"` - Participants string `json:"participants"` - CreatedAt time.Time `json:"created_at"` -} - -// ============================================================================ -// Dashboard Models -// ============================================================================ - -// DSBDashboard provides the aggregated overview for a DSB user. -type DSBDashboard struct { - Assignments []AssignmentOverview `json:"assignments"` - TotalAssignments int `json:"total_assignments"` - ActiveAssignments int `json:"active_assignments"` - TotalHoursThisMonth float64 `json:"total_hours_this_month"` - OpenTasks int `json:"open_tasks"` - UrgentTasks int `json:"urgent_tasks"` - GeneratedAt time.Time `json:"generated_at"` -} - -// AssignmentOverview enriches an Assignment with aggregated metrics. -type AssignmentOverview struct { - Assignment - ComplianceScore int `json:"compliance_score"` - HoursThisMonth float64 `json:"hours_this_month"` - HoursBudget float64 `json:"hours_budget"` - OpenTaskCount int `json:"open_task_count"` - UrgentTaskCount int `json:"urgent_task_count"` - NextDeadline *time.Time `json:"next_deadline,omitempty"` -} - -// ============================================================================ -// Request Models -// ============================================================================ - -// CreateAssignmentRequest is the request body for creating an assignment. -type CreateAssignmentRequest struct { - DSBUserID uuid.UUID `json:"dsb_user_id" binding:"required"` - TenantID uuid.UUID `json:"tenant_id" binding:"required"` - Status string `json:"status"` - ContractStart time.Time `json:"contract_start" binding:"required"` - ContractEnd *time.Time `json:"contract_end,omitempty"` - MonthlyHoursBudget float64 `json:"monthly_hours_budget"` - Notes string `json:"notes"` -} - -// UpdateAssignmentRequest is the request body for updating an assignment. -type UpdateAssignmentRequest struct { - Status *string `json:"status,omitempty"` - ContractEnd *time.Time `json:"contract_end,omitempty"` - MonthlyHoursBudget *float64 `json:"monthly_hours_budget,omitempty"` - Notes *string `json:"notes,omitempty"` -} - -// CreateHourEntryRequest is the request body for creating a time entry. -type CreateHourEntryRequest struct { - Date time.Time `json:"date" binding:"required"` - Hours float64 `json:"hours" binding:"required"` - Category string `json:"category" binding:"required"` - Description string `json:"description" binding:"required"` - Billable *bool `json:"billable,omitempty"` -} - -// CreateTaskRequest is the request body for creating a task. -type CreateTaskRequest struct { - Title string `json:"title" binding:"required"` - Description string `json:"description"` - Category string `json:"category" binding:"required"` - Priority string `json:"priority"` - DueDate *time.Time `json:"due_date,omitempty"` -} - -// UpdateTaskRequest is the request body for updating a task. -type UpdateTaskRequest struct { - Title *string `json:"title,omitempty"` - Description *string `json:"description,omitempty"` - Category *string `json:"category,omitempty"` - Priority *string `json:"priority,omitempty"` - Status *string `json:"status,omitempty"` - DueDate *time.Time `json:"due_date,omitempty"` -} - -// CreateCommunicationRequest is the request body for creating a communication entry. -type CreateCommunicationRequest struct { - Direction string `json:"direction" binding:"required"` - Channel string `json:"channel" binding:"required"` - Subject string `json:"subject" binding:"required"` - Content string `json:"content"` - Participants string `json:"participants"` -} - -// ============================================================================ -// Summary Models -// ============================================================================ - -// HoursSummary provides aggregated hour statistics for an assignment. -type HoursSummary struct { - TotalHours float64 `json:"total_hours"` - BillableHours float64 `json:"billable_hours"` - ByCategory map[string]float64 `json:"by_category"` - Period string `json:"period"` // YYYY-MM or "all" -} diff --git a/ai-compliance-sdk/internal/dsb/store.go b/ai-compliance-sdk/internal/dsb/store.go deleted file mode 100644 index 5a2c032..0000000 --- a/ai-compliance-sdk/internal/dsb/store.go +++ /dev/null @@ -1,510 +0,0 @@ -package dsb - -import ( - "context" - "fmt" - "time" - - "github.com/breakpilot/ai-compliance-sdk/internal/reporting" - "github.com/google/uuid" - "github.com/jackc/pgx/v5/pgxpool" -) - -// Store provides database operations for the DSB portal. -type Store struct { - pool *pgxpool.Pool - reportingStore *reporting.Store -} - -// NewStore creates a new DSB store. -func NewStore(pool *pgxpool.Pool, reportingStore *reporting.Store) *Store { - return &Store{ - pool: pool, - reportingStore: reportingStore, - } -} - -// Pool returns the underlying connection pool for direct queries when needed. -func (s *Store) Pool() *pgxpool.Pool { - return s.pool -} - -// ============================================================================ -// Dashboard -// ============================================================================ - -// GetDashboard generates the aggregated DSB dashboard for a given DSB user. -func (s *Store) GetDashboard(ctx context.Context, dsbUserID uuid.UUID) (*DSBDashboard, error) { - assignments, err := s.ListAssignments(ctx, dsbUserID) - if err != nil { - return nil, fmt.Errorf("list assignments: %w", err) - } - - now := time.Now().UTC() - currentMonth := now.Format("2006-01") - - dashboard := &DSBDashboard{ - Assignments: make([]AssignmentOverview, 0, len(assignments)), - GeneratedAt: now, - } - - for _, a := range assignments { - overview := AssignmentOverview{ - Assignment: a, - HoursBudget: a.MonthlyHoursBudget, - } - - // Enrich with compliance score (error-tolerant) - if s.reportingStore != nil { - report, err := s.reportingStore.GenerateReport(ctx, a.TenantID) - if err == nil && report != nil { - overview.ComplianceScore = report.ComplianceScore - } - } - - // Hours this month - summary, err := s.GetHoursSummary(ctx, a.ID, currentMonth) - if err == nil && summary != nil { - overview.HoursThisMonth = summary.TotalHours - } - - // Open and urgent tasks - openTasks, err := s.ListTasks(ctx, a.ID, "open") - if err == nil { - overview.OpenTaskCount = len(openTasks) - for _, t := range openTasks { - if t.Priority == "urgent" { - overview.UrgentTaskCount++ - } - if t.DueDate != nil && (overview.NextDeadline == nil || t.DueDate.Before(*overview.NextDeadline)) { - overview.NextDeadline = t.DueDate - } - } - } - - // Also count in_progress tasks - inProgressTasks, err := s.ListTasks(ctx, a.ID, "in_progress") - if err == nil { - overview.OpenTaskCount += len(inProgressTasks) - for _, t := range inProgressTasks { - if t.Priority == "urgent" { - overview.UrgentTaskCount++ - } - if t.DueDate != nil && (overview.NextDeadline == nil || t.DueDate.Before(*overview.NextDeadline)) { - overview.NextDeadline = t.DueDate - } - } - } - - dashboard.Assignments = append(dashboard.Assignments, overview) - dashboard.TotalAssignments++ - if a.Status == "active" { - dashboard.ActiveAssignments++ - } - dashboard.TotalHoursThisMonth += overview.HoursThisMonth - dashboard.OpenTasks += overview.OpenTaskCount - dashboard.UrgentTasks += overview.UrgentTaskCount - } - - return dashboard, nil -} - -// ============================================================================ -// Assignments -// ============================================================================ - -// CreateAssignment inserts a new DSB assignment. -func (s *Store) CreateAssignment(ctx context.Context, a *Assignment) error { - a.ID = uuid.New() - now := time.Now().UTC() - a.CreatedAt = now - a.UpdatedAt = now - - if a.Status == "" { - a.Status = "active" - } - - _, err := s.pool.Exec(ctx, ` - INSERT INTO dsb_assignments (id, dsb_user_id, tenant_id, status, contract_start, contract_end, monthly_hours_budget, notes, created_at, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) - `, a.ID, a.DSBUserID, a.TenantID, a.Status, a.ContractStart, a.ContractEnd, a.MonthlyHoursBudget, a.Notes, a.CreatedAt, a.UpdatedAt) - if err != nil { - return fmt.Errorf("insert assignment: %w", err) - } - return nil -} - -// ListAssignments returns all assignments for a given DSB user, joined with tenant info. -func (s *Store) ListAssignments(ctx context.Context, dsbUserID uuid.UUID) ([]Assignment, error) { - rows, err := s.pool.Query(ctx, ` - SELECT a.id, a.dsb_user_id, a.tenant_id, ct.name, ct.slug, - a.status, a.contract_start, a.contract_end, - a.monthly_hours_budget, a.notes, a.created_at, a.updated_at - FROM dsb_assignments a - JOIN compliance_tenants ct ON ct.id = a.tenant_id - WHERE a.dsb_user_id = $1 - ORDER BY a.created_at DESC - `, dsbUserID) - if err != nil { - return nil, fmt.Errorf("query assignments: %w", err) - } - defer rows.Close() - - var assignments []Assignment - for rows.Next() { - var a Assignment - if err := rows.Scan( - &a.ID, &a.DSBUserID, &a.TenantID, &a.TenantName, &a.TenantSlug, - &a.Status, &a.ContractStart, &a.ContractEnd, - &a.MonthlyHoursBudget, &a.Notes, &a.CreatedAt, &a.UpdatedAt, - ); err != nil { - return nil, fmt.Errorf("scan assignment: %w", err) - } - assignments = append(assignments, a) - } - - if assignments == nil { - assignments = []Assignment{} - } - return assignments, nil -} - -// GetAssignment retrieves a single assignment by ID. -func (s *Store) GetAssignment(ctx context.Context, id uuid.UUID) (*Assignment, error) { - var a Assignment - err := s.pool.QueryRow(ctx, ` - SELECT a.id, a.dsb_user_id, a.tenant_id, ct.name, ct.slug, - a.status, a.contract_start, a.contract_end, - a.monthly_hours_budget, a.notes, a.created_at, a.updated_at - FROM dsb_assignments a - JOIN compliance_tenants ct ON ct.id = a.tenant_id - WHERE a.id = $1 - `, id).Scan( - &a.ID, &a.DSBUserID, &a.TenantID, &a.TenantName, &a.TenantSlug, - &a.Status, &a.ContractStart, &a.ContractEnd, - &a.MonthlyHoursBudget, &a.Notes, &a.CreatedAt, &a.UpdatedAt, - ) - if err != nil { - return nil, fmt.Errorf("get assignment: %w", err) - } - return &a, nil -} - -// UpdateAssignment updates an existing assignment. -func (s *Store) UpdateAssignment(ctx context.Context, a *Assignment) error { - _, err := s.pool.Exec(ctx, ` - UPDATE dsb_assignments - SET status = $2, contract_end = $3, monthly_hours_budget = $4, notes = $5, updated_at = NOW() - WHERE id = $1 - `, a.ID, a.Status, a.ContractEnd, a.MonthlyHoursBudget, a.Notes) - if err != nil { - return fmt.Errorf("update assignment: %w", err) - } - return nil -} - -// ============================================================================ -// Hours -// ============================================================================ - -// CreateHourEntry inserts a new time tracking entry. -func (s *Store) CreateHourEntry(ctx context.Context, h *HourEntry) error { - h.ID = uuid.New() - h.CreatedAt = time.Now().UTC() - - _, err := s.pool.Exec(ctx, ` - INSERT INTO dsb_hours (id, assignment_id, date, hours, category, description, billable, created_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - `, h.ID, h.AssignmentID, h.Date, h.Hours, h.Category, h.Description, h.Billable, h.CreatedAt) - if err != nil { - return fmt.Errorf("insert hour entry: %w", err) - } - return nil -} - -// ListHours returns time entries for an assignment, optionally filtered by month (YYYY-MM). -func (s *Store) ListHours(ctx context.Context, assignmentID uuid.UUID, month string) ([]HourEntry, error) { - var query string - var args []interface{} - - if month != "" { - query = ` - SELECT id, assignment_id, date, hours, category, description, billable, created_at - FROM dsb_hours - WHERE assignment_id = $1 AND to_char(date, 'YYYY-MM') = $2 - ORDER BY date DESC, created_at DESC - ` - args = []interface{}{assignmentID, month} - } else { - query = ` - SELECT id, assignment_id, date, hours, category, description, billable, created_at - FROM dsb_hours - WHERE assignment_id = $1 - ORDER BY date DESC, created_at DESC - ` - args = []interface{}{assignmentID} - } - - rows, err := s.pool.Query(ctx, query, args...) - if err != nil { - return nil, fmt.Errorf("query hours: %w", err) - } - defer rows.Close() - - var entries []HourEntry - for rows.Next() { - var h HourEntry - if err := rows.Scan( - &h.ID, &h.AssignmentID, &h.Date, &h.Hours, &h.Category, - &h.Description, &h.Billable, &h.CreatedAt, - ); err != nil { - return nil, fmt.Errorf("scan hour entry: %w", err) - } - entries = append(entries, h) - } - - if entries == nil { - entries = []HourEntry{} - } - return entries, nil -} - -// GetHoursSummary returns aggregated hour statistics for an assignment, optionally filtered by month. -func (s *Store) GetHoursSummary(ctx context.Context, assignmentID uuid.UUID, month string) (*HoursSummary, error) { - summary := &HoursSummary{ - ByCategory: make(map[string]float64), - Period: "all", - } - - if month != "" { - summary.Period = month - } - - // Total and billable hours - var totalQuery string - var totalArgs []interface{} - - if month != "" { - totalQuery = ` - SELECT COALESCE(SUM(hours), 0), COALESCE(SUM(CASE WHEN billable THEN hours ELSE 0 END), 0) - FROM dsb_hours - WHERE assignment_id = $1 AND to_char(date, 'YYYY-MM') = $2 - ` - totalArgs = []interface{}{assignmentID, month} - } else { - totalQuery = ` - SELECT COALESCE(SUM(hours), 0), COALESCE(SUM(CASE WHEN billable THEN hours ELSE 0 END), 0) - FROM dsb_hours - WHERE assignment_id = $1 - ` - totalArgs = []interface{}{assignmentID} - } - - err := s.pool.QueryRow(ctx, totalQuery, totalArgs...).Scan(&summary.TotalHours, &summary.BillableHours) - if err != nil { - return nil, fmt.Errorf("query hours summary totals: %w", err) - } - - // Hours by category - var catQuery string - var catArgs []interface{} - - if month != "" { - catQuery = ` - SELECT category, COALESCE(SUM(hours), 0) - FROM dsb_hours - WHERE assignment_id = $1 AND to_char(date, 'YYYY-MM') = $2 - GROUP BY category - ` - catArgs = []interface{}{assignmentID, month} - } else { - catQuery = ` - SELECT category, COALESCE(SUM(hours), 0) - FROM dsb_hours - WHERE assignment_id = $1 - GROUP BY category - ` - catArgs = []interface{}{assignmentID} - } - - rows, err := s.pool.Query(ctx, catQuery, catArgs...) - if err != nil { - return nil, fmt.Errorf("query hours by category: %w", err) - } - defer rows.Close() - - for rows.Next() { - var cat string - var hours float64 - if err := rows.Scan(&cat, &hours); err != nil { - return nil, fmt.Errorf("scan category hours: %w", err) - } - summary.ByCategory[cat] = hours - } - - return summary, nil -} - -// ============================================================================ -// Tasks -// ============================================================================ - -// CreateTask inserts a new DSB task. -func (s *Store) CreateTask(ctx context.Context, t *Task) error { - t.ID = uuid.New() - now := time.Now().UTC() - t.CreatedAt = now - t.UpdatedAt = now - - if t.Status == "" { - t.Status = "open" - } - if t.Priority == "" { - t.Priority = "medium" - } - - _, err := s.pool.Exec(ctx, ` - INSERT INTO dsb_tasks (id, assignment_id, title, description, category, priority, status, due_date, created_at, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) - `, t.ID, t.AssignmentID, t.Title, t.Description, t.Category, t.Priority, t.Status, t.DueDate, t.CreatedAt, t.UpdatedAt) - if err != nil { - return fmt.Errorf("insert task: %w", err) - } - return nil -} - -// ListTasks returns tasks for an assignment, optionally filtered by status. -func (s *Store) ListTasks(ctx context.Context, assignmentID uuid.UUID, status string) ([]Task, error) { - var query string - var args []interface{} - - if status != "" { - query = ` - SELECT id, assignment_id, title, description, category, priority, status, due_date, completed_at, created_at, updated_at - FROM dsb_tasks - WHERE assignment_id = $1 AND status = $2 - ORDER BY CASE priority - WHEN 'urgent' THEN 1 - WHEN 'high' THEN 2 - WHEN 'medium' THEN 3 - WHEN 'low' THEN 4 - ELSE 5 - END, due_date ASC NULLS LAST, created_at DESC - ` - args = []interface{}{assignmentID, status} - } else { - query = ` - SELECT id, assignment_id, title, description, category, priority, status, due_date, completed_at, created_at, updated_at - FROM dsb_tasks - WHERE assignment_id = $1 - ORDER BY CASE priority - WHEN 'urgent' THEN 1 - WHEN 'high' THEN 2 - WHEN 'medium' THEN 3 - WHEN 'low' THEN 4 - ELSE 5 - END, due_date ASC NULLS LAST, created_at DESC - ` - args = []interface{}{assignmentID} - } - - rows, err := s.pool.Query(ctx, query, args...) - if err != nil { - return nil, fmt.Errorf("query tasks: %w", err) - } - defer rows.Close() - - var tasks []Task - for rows.Next() { - var t Task - if err := rows.Scan( - &t.ID, &t.AssignmentID, &t.Title, &t.Description, &t.Category, - &t.Priority, &t.Status, &t.DueDate, &t.CompletedAt, - &t.CreatedAt, &t.UpdatedAt, - ); err != nil { - return nil, fmt.Errorf("scan task: %w", err) - } - tasks = append(tasks, t) - } - - if tasks == nil { - tasks = []Task{} - } - return tasks, nil -} - -// UpdateTask updates an existing task. -func (s *Store) UpdateTask(ctx context.Context, t *Task) error { - _, err := s.pool.Exec(ctx, ` - UPDATE dsb_tasks - SET title = $2, description = $3, category = $4, priority = $5, status = $6, due_date = $7, updated_at = NOW() - WHERE id = $1 - `, t.ID, t.Title, t.Description, t.Category, t.Priority, t.Status, t.DueDate) - if err != nil { - return fmt.Errorf("update task: %w", err) - } - return nil -} - -// CompleteTask marks a task as completed with the current timestamp. -func (s *Store) CompleteTask(ctx context.Context, taskID uuid.UUID) error { - _, err := s.pool.Exec(ctx, ` - UPDATE dsb_tasks - SET status = 'completed', completed_at = NOW(), updated_at = NOW() - WHERE id = $1 - `, taskID) - if err != nil { - return fmt.Errorf("complete task: %w", err) - } - return nil -} - -// ============================================================================ -// Communications -// ============================================================================ - -// CreateCommunication inserts a new communication log entry. -func (s *Store) CreateCommunication(ctx context.Context, c *Communication) error { - c.ID = uuid.New() - c.CreatedAt = time.Now().UTC() - - _, err := s.pool.Exec(ctx, ` - INSERT INTO dsb_communications (id, assignment_id, direction, channel, subject, content, participants, created_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - `, c.ID, c.AssignmentID, c.Direction, c.Channel, c.Subject, c.Content, c.Participants, c.CreatedAt) - if err != nil { - return fmt.Errorf("insert communication: %w", err) - } - return nil -} - -// ListCommunications returns all communication entries for an assignment. -func (s *Store) ListCommunications(ctx context.Context, assignmentID uuid.UUID) ([]Communication, error) { - rows, err := s.pool.Query(ctx, ` - SELECT id, assignment_id, direction, channel, subject, content, participants, created_at - FROM dsb_communications - WHERE assignment_id = $1 - ORDER BY created_at DESC - `, assignmentID) - if err != nil { - return nil, fmt.Errorf("query communications: %w", err) - } - defer rows.Close() - - var comms []Communication - for rows.Next() { - var c Communication - if err := rows.Scan( - &c.ID, &c.AssignmentID, &c.Direction, &c.Channel, - &c.Subject, &c.Content, &c.Participants, &c.CreatedAt, - ); err != nil { - return nil, fmt.Errorf("scan communication: %w", err) - } - comms = append(comms, c) - } - - if comms == nil { - comms = []Communication{} - } - return comms, nil -} diff --git a/ai-compliance-sdk/internal/funding/export.go b/ai-compliance-sdk/internal/funding/export.go deleted file mode 100644 index e780227..0000000 --- a/ai-compliance-sdk/internal/funding/export.go +++ /dev/null @@ -1,395 +0,0 @@ -package funding - -import ( - "archive/zip" - "bytes" - "fmt" - "io" - "time" - - "github.com/jung-kurt/gofpdf" - "github.com/xuri/excelize/v2" -) - -// ExportService handles document generation -type ExportService struct{} - -// NewExportService creates a new export service -func NewExportService() *ExportService { - return &ExportService{} -} - -// GenerateApplicationLetter generates the main application letter as PDF -func (s *ExportService) GenerateApplicationLetter(app *FundingApplication) ([]byte, error) { - pdf := gofpdf.New("P", "mm", "A4", "") - pdf.SetMargins(25, 25, 25) - pdf.AddPage() - - // Header - pdf.SetFont("Helvetica", "B", 14) - pdf.Cell(0, 10, "Antrag auf Foerderung im Rahmen der digitalen Bildungsinfrastruktur") - pdf.Ln(15) - - // Application number - pdf.SetFont("Helvetica", "", 10) - pdf.Cell(0, 6, fmt.Sprintf("Antragsnummer: %s", app.ApplicationNumber)) - pdf.Ln(6) - pdf.Cell(0, 6, fmt.Sprintf("Datum: %s", time.Now().Format("02.01.2006"))) - pdf.Ln(15) - - // Section 1: Einleitung - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "1. Einleitung") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - - if app.SchoolProfile != nil { - pdf.MultiCell(0, 6, fmt.Sprintf( - "Die %s (Schulnummer: %s) beantragt hiermit Foerdermittel aus dem Programm %s.\n\n"+ - "Schultraeger: %s\n"+ - "Schulform: %s\n"+ - "Schueleranzahl: %d\n"+ - "Lehrkraefte: %d", - app.SchoolProfile.Name, - app.SchoolProfile.SchoolNumber, - app.FundingProgram, - app.SchoolProfile.CarrierName, - app.SchoolProfile.Type, - app.SchoolProfile.StudentCount, - app.SchoolProfile.TeacherCount, - ), "", "", false) - } - pdf.Ln(10) - - // Section 2: Projektziel - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "2. Projektziel") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - if app.ProjectPlan != nil { - pdf.MultiCell(0, 6, app.ProjectPlan.Summary, "", "", false) - pdf.Ln(5) - pdf.MultiCell(0, 6, app.ProjectPlan.Goals, "", "", false) - } - pdf.Ln(10) - - // Section 3: Beschreibung der Massnahme - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "3. Beschreibung der Massnahme") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - if app.ProjectPlan != nil { - pdf.MultiCell(0, 6, app.ProjectPlan.DidacticConcept, "", "", false) - } - pdf.Ln(10) - - // Section 4: Datenschutz & IT-Betrieb - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "4. Datenschutz & IT-Betrieb") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - if app.ProjectPlan != nil && app.ProjectPlan.DataProtection != "" { - pdf.MultiCell(0, 6, app.ProjectPlan.DataProtection, "", "", false) - } - pdf.Ln(10) - - // Section 5: Kosten & Finanzierung - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "5. Kosten & Finanzierung") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - if app.Budget != nil { - pdf.Cell(0, 6, fmt.Sprintf("Gesamtkosten: %.2f EUR", app.Budget.TotalCost)) - pdf.Ln(6) - pdf.Cell(0, 6, fmt.Sprintf("Beantragter Foerderbetrag: %.2f EUR (%.0f%%)", app.Budget.RequestedFunding, app.Budget.FundingRate*100)) - pdf.Ln(6) - pdf.Cell(0, 6, fmt.Sprintf("Eigenanteil: %.2f EUR", app.Budget.OwnContribution)) - } - pdf.Ln(10) - - // Section 6: Laufzeit - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "6. Laufzeit") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - if app.Timeline != nil { - pdf.Cell(0, 6, fmt.Sprintf("Projektbeginn: %s", app.Timeline.PlannedStart.Format("02.01.2006"))) - pdf.Ln(6) - pdf.Cell(0, 6, fmt.Sprintf("Projektende: %s", app.Timeline.PlannedEnd.Format("02.01.2006"))) - } - pdf.Ln(15) - - // Footer note - pdf.SetFont("Helvetica", "I", 9) - pdf.MultiCell(0, 5, "Hinweis: Dieser Antrag wurde mit dem Foerderantrag-Wizard von BreakPilot erstellt. "+ - "Die finale Pruefung und Einreichung erfolgt durch den Schultraeger.", "", "", false) - - var buf bytes.Buffer - if err := pdf.Output(&buf); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// GenerateBudgetPlan generates the budget plan as XLSX -func (s *ExportService) GenerateBudgetPlan(app *FundingApplication) ([]byte, error) { - f := excelize.NewFile() - sheetName := "Kostenplan" - f.SetSheetName("Sheet1", sheetName) - - // Header row - headers := []string{ - "Pos.", "Kategorie", "Beschreibung", "Hersteller", - "Anzahl", "Einzelpreis", "Gesamt", "Foerderfahig", "Finanzierung", - } - for i, h := range headers { - cell, _ := excelize.CoordinatesToCellName(i+1, 1) - f.SetCellValue(sheetName, cell, h) - } - - // Style header - headerStyle, _ := f.NewStyle(&excelize.Style{ - Font: &excelize.Font{Bold: true}, - Fill: excelize.Fill{Type: "pattern", Color: []string{"#E0E0E0"}, Pattern: 1}, - }) - f.SetRowStyle(sheetName, 1, 1, headerStyle) - - // Data rows - row := 2 - if app.Budget != nil { - for i, item := range app.Budget.BudgetItems { - f.SetCellValue(sheetName, fmt.Sprintf("A%d", row), i+1) - f.SetCellValue(sheetName, fmt.Sprintf("B%d", row), string(item.Category)) - f.SetCellValue(sheetName, fmt.Sprintf("C%d", row), item.Description) - f.SetCellValue(sheetName, fmt.Sprintf("D%d", row), item.Manufacturer) - f.SetCellValue(sheetName, fmt.Sprintf("E%d", row), item.Quantity) - f.SetCellValue(sheetName, fmt.Sprintf("F%d", row), item.UnitPrice) - f.SetCellValue(sheetName, fmt.Sprintf("G%d", row), item.TotalPrice) - fundable := "Nein" - if item.IsFundable { - fundable = "Ja" - } - f.SetCellValue(sheetName, fmt.Sprintf("H%d", row), fundable) - f.SetCellValue(sheetName, fmt.Sprintf("I%d", row), item.FundingSource) - row++ - } - - // Summary rows - row += 2 - f.SetCellValue(sheetName, fmt.Sprintf("F%d", row), "Gesamtkosten:") - f.SetCellValue(sheetName, fmt.Sprintf("G%d", row), app.Budget.TotalCost) - row++ - f.SetCellValue(sheetName, fmt.Sprintf("F%d", row), "Foerderbetrag:") - f.SetCellValue(sheetName, fmt.Sprintf("G%d", row), app.Budget.RequestedFunding) - row++ - f.SetCellValue(sheetName, fmt.Sprintf("F%d", row), "Eigenanteil:") - f.SetCellValue(sheetName, fmt.Sprintf("G%d", row), app.Budget.OwnContribution) - } - - // Set column widths - f.SetColWidth(sheetName, "A", "A", 6) - f.SetColWidth(sheetName, "B", "B", 15) - f.SetColWidth(sheetName, "C", "C", 35) - f.SetColWidth(sheetName, "D", "D", 15) - f.SetColWidth(sheetName, "E", "E", 8) - f.SetColWidth(sheetName, "F", "F", 12) - f.SetColWidth(sheetName, "G", "G", 12) - f.SetColWidth(sheetName, "H", "H", 12) - f.SetColWidth(sheetName, "I", "I", 15) - - // Add currency format - currencyStyle, _ := f.NewStyle(&excelize.Style{ - NumFmt: 44, // Currency format - }) - f.SetColStyle(sheetName, "F", currencyStyle) - f.SetColStyle(sheetName, "G", currencyStyle) - - var buf bytes.Buffer - if err := f.Write(&buf); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// GenerateDataProtectionConcept generates the data protection concept as PDF -func (s *ExportService) GenerateDataProtectionConcept(app *FundingApplication) ([]byte, error) { - pdf := gofpdf.New("P", "mm", "A4", "") - pdf.SetMargins(25, 25, 25) - pdf.AddPage() - - // Header - pdf.SetFont("Helvetica", "B", 14) - pdf.Cell(0, 10, "Datenschutz- und Betriebskonzept") - pdf.Ln(15) - - pdf.SetFont("Helvetica", "", 10) - pdf.Cell(0, 6, fmt.Sprintf("Antragsnummer: %s", app.ApplicationNumber)) - pdf.Ln(6) - if app.SchoolProfile != nil { - pdf.Cell(0, 6, fmt.Sprintf("Schule: %s", app.SchoolProfile.Name)) - } - pdf.Ln(15) - - // Section: Lokale Verarbeitung - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "1. Grundsaetze der Datenverarbeitung") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - - if app.ProjectPlan != nil && app.ProjectPlan.DataProtection != "" { - pdf.MultiCell(0, 6, app.ProjectPlan.DataProtection, "", "", false) - } else { - pdf.MultiCell(0, 6, "Das Projekt setzt auf eine vollstaendig lokale Datenverarbeitung:\n\n"+ - "- Alle Daten werden ausschliesslich auf den schuleigenen Systemen verarbeitet\n"+ - "- Keine Uebermittlung personenbezogener Daten an externe Dienste\n"+ - "- Keine Cloud-Speicherung sensibler Daten\n"+ - "- Betrieb im Verantwortungsbereich der Schule", "", "", false) - } - pdf.Ln(10) - - // Section: Technische Massnahmen - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "2. Technische und organisatorische Massnahmen") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - pdf.MultiCell(0, 6, "Folgende TOMs werden umgesetzt:\n\n"+ - "- Zugriffskontrolle ueber schuleigene Benutzerverwaltung\n"+ - "- Verschluesselte Datenspeicherung\n"+ - "- Regelmaessige Sicherheitsupdates\n"+ - "- Protokollierung von Zugriffen\n"+ - "- Automatische Loeschung nach definierten Fristen", "", "", false) - pdf.Ln(10) - - // Section: Betriebskonzept - pdf.SetFont("Helvetica", "B", 12) - pdf.Cell(0, 8, "3. Betriebskonzept") - pdf.Ln(10) - pdf.SetFont("Helvetica", "", 10) - if app.ProjectPlan != nil && app.ProjectPlan.MaintenancePlan != "" { - pdf.MultiCell(0, 6, app.ProjectPlan.MaintenancePlan, "", "", false) - } else { - pdf.MultiCell(0, 6, "Der laufende Betrieb wird wie folgt sichergestellt:\n\n"+ - "- Schulung des technischen Personals\n"+ - "- Dokumentierte Betriebsverfahren\n"+ - "- Regelmaessige Wartung und Updates\n"+ - "- Definierte Ansprechpartner", "", "", false) - } - - var buf bytes.Buffer - if err := pdf.Output(&buf); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// GenerateExportBundle generates a ZIP file with all documents -func (s *ExportService) GenerateExportBundle(app *FundingApplication) ([]byte, error) { - var buf bytes.Buffer - zipWriter := zip.NewWriter(&buf) - - // Generate and add application letter - letter, err := s.GenerateApplicationLetter(app) - if err == nil { - w, _ := zipWriter.Create(fmt.Sprintf("%s_Antragsschreiben.pdf", app.ApplicationNumber)) - w.Write(letter) - } - - // Generate and add budget plan - budget, err := s.GenerateBudgetPlan(app) - if err == nil { - w, _ := zipWriter.Create(fmt.Sprintf("%s_Kostenplan.xlsx", app.ApplicationNumber)) - w.Write(budget) - } - - // Generate and add data protection concept - dp, err := s.GenerateDataProtectionConcept(app) - if err == nil { - w, _ := zipWriter.Create(fmt.Sprintf("%s_Datenschutzkonzept.pdf", app.ApplicationNumber)) - w.Write(dp) - } - - // Add attachments - for _, attachment := range app.Attachments { - // Read attachment from storage and add to ZIP - // This would need actual file system access - _ = attachment - } - - if err := zipWriter.Close(); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// ExportDocument represents a generated document -type GeneratedDocument struct { - Name string - Type string // pdf, xlsx, docx - Content []byte - MimeType string -} - -// GenerateAllDocuments generates all documents for an application -func (s *ExportService) GenerateAllDocuments(app *FundingApplication) ([]GeneratedDocument, error) { - var docs []GeneratedDocument - - // Application letter - letter, err := s.GenerateApplicationLetter(app) - if err == nil { - docs = append(docs, GeneratedDocument{ - Name: fmt.Sprintf("%s_Antragsschreiben.pdf", app.ApplicationNumber), - Type: "pdf", - Content: letter, - MimeType: "application/pdf", - }) - } - - // Budget plan - budget, err := s.GenerateBudgetPlan(app) - if err == nil { - docs = append(docs, GeneratedDocument{ - Name: fmt.Sprintf("%s_Kostenplan.xlsx", app.ApplicationNumber), - Type: "xlsx", - Content: budget, - MimeType: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", - }) - } - - // Data protection concept - dp, err := s.GenerateDataProtectionConcept(app) - if err == nil { - docs = append(docs, GeneratedDocument{ - Name: fmt.Sprintf("%s_Datenschutzkonzept.pdf", app.ApplicationNumber), - Type: "pdf", - Content: dp, - MimeType: "application/pdf", - }) - } - - return docs, nil -} - -// WriteZipToWriter writes all documents to a zip writer -func (s *ExportService) WriteZipToWriter(app *FundingApplication, w io.Writer) error { - zipWriter := zip.NewWriter(w) - defer zipWriter.Close() - - docs, err := s.GenerateAllDocuments(app) - if err != nil { - return err - } - - for _, doc := range docs { - f, err := zipWriter.Create(doc.Name) - if err != nil { - continue - } - f.Write(doc.Content) - } - - return nil -} diff --git a/ai-compliance-sdk/internal/funding/models.go b/ai-compliance-sdk/internal/funding/models.go deleted file mode 100644 index c3a0c91..0000000 --- a/ai-compliance-sdk/internal/funding/models.go +++ /dev/null @@ -1,394 +0,0 @@ -package funding - -import ( - "time" - - "github.com/google/uuid" -) - -// ============================================================================ -// Constants / Enums -// ============================================================================ - -// FundingProgram represents the type of funding program -type FundingProgram string - -const ( - FundingProgramDigitalPakt1 FundingProgram = "DIGITALPAKT_1" - FundingProgramDigitalPakt2 FundingProgram = "DIGITALPAKT_2" - FundingProgramLandesfoerderung FundingProgram = "LANDESFOERDERUNG" - FundingProgramSchultraeger FundingProgram = "SCHULTRAEGER" - FundingProgramSonstige FundingProgram = "SONSTIGE" -) - -// ApplicationStatus represents the workflow status -type ApplicationStatus string - -const ( - ApplicationStatusDraft ApplicationStatus = "DRAFT" - ApplicationStatusInProgress ApplicationStatus = "IN_PROGRESS" - ApplicationStatusReview ApplicationStatus = "REVIEW" - ApplicationStatusSubmitted ApplicationStatus = "SUBMITTED" - ApplicationStatusApproved ApplicationStatus = "APPROVED" - ApplicationStatusRejected ApplicationStatus = "REJECTED" - ApplicationStatusArchived ApplicationStatus = "ARCHIVED" -) - -// FederalState represents German federal states -type FederalState string - -const ( - FederalStateNI FederalState = "NI" // Niedersachsen - FederalStateNRW FederalState = "NRW" // Nordrhein-Westfalen - FederalStateBAY FederalState = "BAY" // Bayern - FederalStateBW FederalState = "BW" // Baden-Wuerttemberg - FederalStateHE FederalState = "HE" // Hessen - FederalStateSN FederalState = "SN" // Sachsen - FederalStateTH FederalState = "TH" // Thueringen - FederalStateSA FederalState = "SA" // Sachsen-Anhalt - FederalStateBB FederalState = "BB" // Brandenburg - FederalStateMV FederalState = "MV" // Mecklenburg-Vorpommern - FederalStateSH FederalState = "SH" // Schleswig-Holstein - FederalStateHH FederalState = "HH" // Hamburg - FederalStateHB FederalState = "HB" // Bremen - FederalStateBE FederalState = "BE" // Berlin - FederalStateSL FederalState = "SL" // Saarland - FederalStateRP FederalState = "RP" // Rheinland-Pfalz -) - -// SchoolType represents different school types -type SchoolType string - -const ( - SchoolTypeGrundschule SchoolType = "GRUNDSCHULE" - SchoolTypeHauptschule SchoolType = "HAUPTSCHULE" - SchoolTypeRealschule SchoolType = "REALSCHULE" - SchoolTypeGymnasium SchoolType = "GYMNASIUM" - SchoolTypeGesamtschule SchoolType = "GESAMTSCHULE" - SchoolTypeOberschule SchoolType = "OBERSCHULE" - SchoolTypeFoerderschule SchoolType = "FOERDERSCHULE" - SchoolTypeBerufsschule SchoolType = "BERUFSSCHULE" - SchoolTypeBerufskolleg SchoolType = "BERUFSKOLLEG" - SchoolTypeFachoberschule SchoolType = "FACHOBERSCHULE" - SchoolTypeBerufliches SchoolType = "BERUFLICHES_GYMNASIUM" - SchoolTypeSonstige SchoolType = "SONSTIGE" -) - -// CarrierType represents the school carrier type -type CarrierType string - -const ( - CarrierTypePublic CarrierType = "PUBLIC" // Oeffentlich - CarrierTypePrivate CarrierType = "PRIVATE" // Privat - CarrierTypeChurch CarrierType = "CHURCH" // Kirchlich - CarrierTypeNonProfit CarrierType = "NON_PROFIT" // Gemeinnuetzig -) - -// BudgetCategory represents categories for budget items -type BudgetCategory string - -const ( - BudgetCategoryNetwork BudgetCategory = "NETWORK" // Netzwerk/Verkabelung - BudgetCategoryWLAN BudgetCategory = "WLAN" // WLAN-Infrastruktur - BudgetCategoryDevices BudgetCategory = "DEVICES" // Endgeraete - BudgetCategoryPresentation BudgetCategory = "PRESENTATION" // Praesentationstechnik - BudgetCategorySoftware BudgetCategory = "SOFTWARE" // Software-Lizenzen - BudgetCategoryServer BudgetCategory = "SERVER" // Server/Rechenzentrum - BudgetCategoryServices BudgetCategory = "SERVICES" // Dienstleistungen - BudgetCategoryTraining BudgetCategory = "TRAINING" // Schulungen - BudgetCategorySonstige BudgetCategory = "SONSTIGE" // Sonstige -) - -// ============================================================================ -// Main Entities -// ============================================================================ - -// FundingApplication represents a funding application -type FundingApplication struct { - ID uuid.UUID `json:"id"` - TenantID uuid.UUID `json:"tenant_id"` - ApplicationNumber string `json:"application_number"` // e.g., DP2-NI-2026-00123 - Title string `json:"title"` - FundingProgram FundingProgram `json:"funding_program"` - Status ApplicationStatus `json:"status"` - - // Wizard State - CurrentStep int `json:"current_step"` - TotalSteps int `json:"total_steps"` - WizardData map[string]interface{} `json:"wizard_data,omitempty"` - - // School Information - SchoolProfile *SchoolProfile `json:"school_profile,omitempty"` - - // Project Information - ProjectPlan *ProjectPlan `json:"project_plan,omitempty"` - Budget *Budget `json:"budget,omitempty"` - Timeline *ProjectTimeline `json:"timeline,omitempty"` - - // Financial Summary - RequestedAmount float64 `json:"requested_amount"` - OwnContribution float64 `json:"own_contribution"` - ApprovedAmount *float64 `json:"approved_amount,omitempty"` - - // Attachments - Attachments []Attachment `json:"attachments,omitempty"` - - // Audit Trail - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - SubmittedAt *time.Time `json:"submitted_at,omitempty"` - CreatedBy uuid.UUID `json:"created_by"` - UpdatedBy uuid.UUID `json:"updated_by"` -} - -// SchoolProfile contains school information -type SchoolProfile struct { - Name string `json:"name"` - SchoolNumber string `json:"school_number"` // Official school number - Type SchoolType `json:"type"` - FederalState FederalState `json:"federal_state"` - Address Address `json:"address"` - ContactPerson ContactPerson `json:"contact_person"` - StudentCount int `json:"student_count"` - TeacherCount int `json:"teacher_count"` - ClassCount int `json:"class_count"` - CarrierType CarrierType `json:"carrier_type"` - CarrierName string `json:"carrier_name"` - CarrierAddress *Address `json:"carrier_address,omitempty"` - Infrastructure *InfrastructureStatus `json:"infrastructure,omitempty"` -} - -// Address represents a postal address -type Address struct { - Street string `json:"street"` - HouseNo string `json:"house_no"` - PostalCode string `json:"postal_code"` - City string `json:"city"` - Country string `json:"country,omitempty"` -} - -// ContactPerson represents a contact person -type ContactPerson struct { - Salutation string `json:"salutation,omitempty"` // Herr/Frau - Title string `json:"title,omitempty"` // Dr., Prof. - FirstName string `json:"first_name"` - LastName string `json:"last_name"` - Position string `json:"position,omitempty"` // Schulleitung, IT-Beauftragter - Email string `json:"email"` - Phone string `json:"phone,omitempty"` -} - -// InfrastructureStatus describes current IT infrastructure -type InfrastructureStatus struct { - HasWLAN bool `json:"has_wlan"` - WLANCoverage int `json:"wlan_coverage"` // Percentage 0-100 - HasStructuredCabling bool `json:"has_structured_cabling"` - InternetBandwidth string `json:"internet_bandwidth"` // e.g., "100 Mbit/s" - DeviceCount int `json:"device_count"` // Current devices - HasServerRoom bool `json:"has_server_room"` - Notes string `json:"notes,omitempty"` -} - -// ProjectPlan describes the project -type ProjectPlan struct { - ProjectName string `json:"project_name"` - Summary string `json:"summary"` // Kurzbeschreibung - Goals string `json:"goals"` // Projektziele - DidacticConcept string `json:"didactic_concept"` // Paedagogisches Konzept - MEPReference string `json:"mep_reference,omitempty"` // Medienentwicklungsplan Bezug - DataProtection string `json:"data_protection"` // Datenschutzkonzept - MaintenancePlan string `json:"maintenance_plan"` // Wartungs-/Betriebskonzept - TargetGroups []string `json:"target_groups"` // e.g., ["Schueler", "Lehrer"] - SubjectsAffected []string `json:"subjects_affected,omitempty"` // Betroffene Faecher -} - -// Budget represents the financial plan -type Budget struct { - TotalCost float64 `json:"total_cost"` - RequestedFunding float64 `json:"requested_funding"` - OwnContribution float64 `json:"own_contribution"` - OtherFunding float64 `json:"other_funding"` - FundingRate float64 `json:"funding_rate"` // 0.90 = 90% - BudgetItems []BudgetItem `json:"budget_items"` - IsWithinLimits bool `json:"is_within_limits"` - Justification string `json:"justification,omitempty"` // Begruendung -} - -// BudgetItem represents a single budget line item -type BudgetItem struct { - ID uuid.UUID `json:"id"` - Position int `json:"position"` // Order number - Category BudgetCategory `json:"category"` - Description string `json:"description"` - Manufacturer string `json:"manufacturer,omitempty"` - ProductName string `json:"product_name,omitempty"` - Quantity int `json:"quantity"` - UnitPrice float64 `json:"unit_price"` - TotalPrice float64 `json:"total_price"` - IsFundable bool `json:"is_fundable"` // Foerderfahig Ja/Nein - FundingSource string `json:"funding_source"` // digitalpakt, eigenanteil, sonstige - Notes string `json:"notes,omitempty"` -} - -// ProjectTimeline represents project schedule -type ProjectTimeline struct { - PlannedStart time.Time `json:"planned_start"` - PlannedEnd time.Time `json:"planned_end"` - Milestones []Milestone `json:"milestones,omitempty"` - ProjectPhase string `json:"project_phase,omitempty"` // Current phase -} - -// Milestone represents a project milestone -type Milestone struct { - ID uuid.UUID `json:"id"` - Title string `json:"title"` - Description string `json:"description,omitempty"` - DueDate time.Time `json:"due_date"` - CompletedAt *time.Time `json:"completed_at,omitempty"` - Status string `json:"status"` // planned, in_progress, completed -} - -// Attachment represents an uploaded file -type Attachment struct { - ID uuid.UUID `json:"id"` - FileName string `json:"file_name"` - FileType string `json:"file_type"` // pdf, docx, xlsx, jpg, png - FileSize int64 `json:"file_size"` // bytes - Category string `json:"category"` // angebot, mep, nachweis, sonstiges - Description string `json:"description,omitempty"` - StoragePath string `json:"-"` // Internal path, not exposed - UploadedAt time.Time `json:"uploaded_at"` - UploadedBy uuid.UUID `json:"uploaded_by"` -} - -// ============================================================================ -// Wizard Step Data -// ============================================================================ - -// WizardStep represents a single wizard step -type WizardStep struct { - Number int `json:"number"` - Title string `json:"title"` - Description string `json:"description"` - Fields []string `json:"fields"` // Field IDs for this step - IsCompleted bool `json:"is_completed"` - IsRequired bool `json:"is_required"` - HelpContext string `json:"help_context"` // Context for LLM assistant -} - -// WizardProgress tracks wizard completion -type WizardProgress struct { - CurrentStep int `json:"current_step"` - TotalSteps int `json:"total_steps"` - CompletedSteps []int `json:"completed_steps"` - StepValidation map[int][]string `json:"step_validation,omitempty"` // Errors per step - FormData map[string]interface{} `json:"form_data"` - LastSavedAt time.Time `json:"last_saved_at"` -} - -// ============================================================================ -// BreakPilot Presets -// ============================================================================ - -// ProductPreset represents a BreakPilot product preset -type ProductPreset struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - BudgetItems []BudgetItem `json:"budget_items"` - AutoFill map[string]interface{} `json:"auto_fill"` - DataProtection string `json:"data_protection"` -} - -// ============================================================================ -// Export Structures -// ============================================================================ - -// ExportDocument represents a generated document -type ExportDocument struct { - Type string `json:"type"` // antragsschreiben, kostenplan, datenschutz - Format string `json:"format"` // pdf, docx, xlsx - FileName string `json:"file_name"` - GeneratedAt time.Time `json:"generated_at"` - ContentHash string `json:"content_hash"` - StoragePath string `json:"-"` -} - -// ExportBundle represents a ZIP bundle of all documents -type ExportBundle struct { - ID uuid.UUID `json:"id"` - ApplicationID uuid.UUID `json:"application_id"` - Documents []ExportDocument `json:"documents"` - GeneratedAt time.Time `json:"generated_at"` - DownloadURL string `json:"download_url"` - ExpiresAt time.Time `json:"expires_at"` -} - -// ============================================================================ -// LLM Assistant -// ============================================================================ - -// AssistantMessage represents a chat message with the assistant -type AssistantMessage struct { - Role string `json:"role"` // user, assistant, system - Content string `json:"content"` - Step int `json:"step,omitempty"` // Current wizard step -} - -// AssistantRequest for asking questions -type AssistantRequest struct { - ApplicationID uuid.UUID `json:"application_id"` - Question string `json:"question"` - CurrentStep int `json:"current_step"` - Context map[string]interface{} `json:"context,omitempty"` - History []AssistantMessage `json:"history,omitempty"` -} - -// AssistantResponse from the assistant -type AssistantResponse struct { - Answer string `json:"answer"` - Suggestions []string `json:"suggestions,omitempty"` - References []string `json:"references,omitempty"` // Links to help resources - FormFills map[string]interface{} `json:"form_fills,omitempty"` // Suggested form values -} - -// ============================================================================ -// API Request/Response Types -// ============================================================================ - -// CreateApplicationRequest for creating a new application -type CreateApplicationRequest struct { - Title string `json:"title"` - FundingProgram FundingProgram `json:"funding_program"` - FederalState FederalState `json:"federal_state"` - PresetID string `json:"preset_id,omitempty"` // Optional BreakPilot preset -} - -// UpdateApplicationRequest for updating an application -type UpdateApplicationRequest struct { - Title *string `json:"title,omitempty"` - WizardData map[string]interface{} `json:"wizard_data,omitempty"` - CurrentStep *int `json:"current_step,omitempty"` -} - -// SaveWizardStepRequest for saving a wizard step -type SaveWizardStepRequest struct { - Step int `json:"step"` - Data map[string]interface{} `json:"data"` - Complete bool `json:"complete"` // Mark step as complete -} - -// ApplicationListResponse for list endpoints -type ApplicationListResponse struct { - Applications []FundingApplication `json:"applications"` - Total int `json:"total"` - Page int `json:"page"` - PageSize int `json:"page_size"` -} - -// ExportRequest for export endpoints -type ExportRequest struct { - Format string `json:"format"` // zip, pdf, docx - Documents []string `json:"documents"` // Which documents to include - Language string `json:"language"` // de, en -} diff --git a/ai-compliance-sdk/internal/funding/postgres_store.go b/ai-compliance-sdk/internal/funding/postgres_store.go deleted file mode 100644 index d6b9c0a..0000000 --- a/ai-compliance-sdk/internal/funding/postgres_store.go +++ /dev/null @@ -1,652 +0,0 @@ -package funding - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/google/uuid" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" -) - -// PostgresStore implements Store using PostgreSQL -type PostgresStore struct { - pool *pgxpool.Pool -} - -// NewPostgresStore creates a new PostgreSQL store -func NewPostgresStore(pool *pgxpool.Pool) *PostgresStore { - return &PostgresStore{pool: pool} -} - -// CreateApplication creates a new funding application -func (s *PostgresStore) CreateApplication(ctx context.Context, app *FundingApplication) error { - app.ID = uuid.New() - app.CreatedAt = time.Now() - app.UpdatedAt = time.Now() - app.TotalSteps = 8 // Default 8-step wizard - - // Generate application number - app.ApplicationNumber = s.generateApplicationNumber(app.FundingProgram, app.SchoolProfile) - - // Marshal JSON fields - wizardDataJSON, err := json.Marshal(app.WizardData) - if err != nil { - return fmt.Errorf("failed to marshal wizard data: %w", err) - } - - schoolProfileJSON, err := json.Marshal(app.SchoolProfile) - if err != nil { - return fmt.Errorf("failed to marshal school profile: %w", err) - } - - projectPlanJSON, err := json.Marshal(app.ProjectPlan) - if err != nil { - return fmt.Errorf("failed to marshal project plan: %w", err) - } - - budgetJSON, err := json.Marshal(app.Budget) - if err != nil { - return fmt.Errorf("failed to marshal budget: %w", err) - } - - timelineJSON, err := json.Marshal(app.Timeline) - if err != nil { - return fmt.Errorf("failed to marshal timeline: %w", err) - } - - query := ` - INSERT INTO funding_applications ( - id, tenant_id, application_number, title, funding_program, status, - current_step, total_steps, wizard_data, - school_profile, project_plan, budget, timeline, - requested_amount, own_contribution, - created_at, updated_at, created_by, updated_by - ) VALUES ( - $1, $2, $3, $4, $5, $6, - $7, $8, $9, - $10, $11, $12, $13, - $14, $15, - $16, $17, $18, $19 - ) - ` - - _, err = s.pool.Exec(ctx, query, - app.ID, app.TenantID, app.ApplicationNumber, app.Title, app.FundingProgram, app.Status, - app.CurrentStep, app.TotalSteps, wizardDataJSON, - schoolProfileJSON, projectPlanJSON, budgetJSON, timelineJSON, - app.RequestedAmount, app.OwnContribution, - app.CreatedAt, app.UpdatedAt, app.CreatedBy, app.UpdatedBy, - ) - - if err != nil { - return fmt.Errorf("failed to create application: %w", err) - } - - return nil -} - -// GetApplication retrieves an application by ID -func (s *PostgresStore) GetApplication(ctx context.Context, id uuid.UUID) (*FundingApplication, error) { - query := ` - SELECT - id, tenant_id, application_number, title, funding_program, status, - current_step, total_steps, wizard_data, - school_profile, project_plan, budget, timeline, - requested_amount, own_contribution, approved_amount, - created_at, updated_at, submitted_at, created_by, updated_by - FROM funding_applications - WHERE id = $1 - ` - - var app FundingApplication - var wizardDataJSON, schoolProfileJSON, projectPlanJSON, budgetJSON, timelineJSON []byte - - err := s.pool.QueryRow(ctx, query, id).Scan( - &app.ID, &app.TenantID, &app.ApplicationNumber, &app.Title, &app.FundingProgram, &app.Status, - &app.CurrentStep, &app.TotalSteps, &wizardDataJSON, - &schoolProfileJSON, &projectPlanJSON, &budgetJSON, &timelineJSON, - &app.RequestedAmount, &app.OwnContribution, &app.ApprovedAmount, - &app.CreatedAt, &app.UpdatedAt, &app.SubmittedAt, &app.CreatedBy, &app.UpdatedBy, - ) - - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - return nil, fmt.Errorf("application not found: %s", id) - } - return nil, fmt.Errorf("failed to get application: %w", err) - } - - // Unmarshal JSON fields - if len(wizardDataJSON) > 0 { - if err := json.Unmarshal(wizardDataJSON, &app.WizardData); err != nil { - return nil, fmt.Errorf("failed to unmarshal wizard data: %w", err) - } - } - - if len(schoolProfileJSON) > 0 { - app.SchoolProfile = &SchoolProfile{} - if err := json.Unmarshal(schoolProfileJSON, app.SchoolProfile); err != nil { - return nil, fmt.Errorf("failed to unmarshal school profile: %w", err) - } - } - - if len(projectPlanJSON) > 0 { - app.ProjectPlan = &ProjectPlan{} - if err := json.Unmarshal(projectPlanJSON, app.ProjectPlan); err != nil { - return nil, fmt.Errorf("failed to unmarshal project plan: %w", err) - } - } - - if len(budgetJSON) > 0 { - app.Budget = &Budget{} - if err := json.Unmarshal(budgetJSON, app.Budget); err != nil { - return nil, fmt.Errorf("failed to unmarshal budget: %w", err) - } - } - - if len(timelineJSON) > 0 { - app.Timeline = &ProjectTimeline{} - if err := json.Unmarshal(timelineJSON, app.Timeline); err != nil { - return nil, fmt.Errorf("failed to unmarshal timeline: %w", err) - } - } - - // Load attachments - attachments, err := s.GetAttachments(ctx, id) - if err == nil { - app.Attachments = attachments - } - - return &app, nil -} - -// GetApplicationByNumber retrieves an application by number -func (s *PostgresStore) GetApplicationByNumber(ctx context.Context, number string) (*FundingApplication, error) { - query := `SELECT id FROM funding_applications WHERE application_number = $1` - - var id uuid.UUID - err := s.pool.QueryRow(ctx, query, number).Scan(&id) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - return nil, fmt.Errorf("application not found: %s", number) - } - return nil, fmt.Errorf("failed to find application by number: %w", err) - } - - return s.GetApplication(ctx, id) -} - -// UpdateApplication updates an existing application -func (s *PostgresStore) UpdateApplication(ctx context.Context, app *FundingApplication) error { - app.UpdatedAt = time.Now() - - // Marshal JSON fields - wizardDataJSON, _ := json.Marshal(app.WizardData) - schoolProfileJSON, _ := json.Marshal(app.SchoolProfile) - projectPlanJSON, _ := json.Marshal(app.ProjectPlan) - budgetJSON, _ := json.Marshal(app.Budget) - timelineJSON, _ := json.Marshal(app.Timeline) - - query := ` - UPDATE funding_applications SET - title = $2, funding_program = $3, status = $4, - current_step = $5, wizard_data = $6, - school_profile = $7, project_plan = $8, budget = $9, timeline = $10, - requested_amount = $11, own_contribution = $12, approved_amount = $13, - updated_at = $14, submitted_at = $15, updated_by = $16 - WHERE id = $1 - ` - - result, err := s.pool.Exec(ctx, query, - app.ID, app.Title, app.FundingProgram, app.Status, - app.CurrentStep, wizardDataJSON, - schoolProfileJSON, projectPlanJSON, budgetJSON, timelineJSON, - app.RequestedAmount, app.OwnContribution, app.ApprovedAmount, - app.UpdatedAt, app.SubmittedAt, app.UpdatedBy, - ) - - if err != nil { - return fmt.Errorf("failed to update application: %w", err) - } - - if result.RowsAffected() == 0 { - return fmt.Errorf("application not found: %s", app.ID) - } - - return nil -} - -// DeleteApplication soft-deletes an application -func (s *PostgresStore) DeleteApplication(ctx context.Context, id uuid.UUID) error { - query := `UPDATE funding_applications SET status = 'ARCHIVED', updated_at = $2 WHERE id = $1` - result, err := s.pool.Exec(ctx, query, id, time.Now()) - if err != nil { - return fmt.Errorf("failed to delete application: %w", err) - } - if result.RowsAffected() == 0 { - return fmt.Errorf("application not found: %s", id) - } - return nil -} - -// ListApplications returns a paginated list of applications -func (s *PostgresStore) ListApplications(ctx context.Context, tenantID uuid.UUID, filter ApplicationFilter) (*ApplicationListResponse, error) { - // Build query with filters - query := ` - SELECT - id, tenant_id, application_number, title, funding_program, status, - current_step, total_steps, wizard_data, - school_profile, project_plan, budget, timeline, - requested_amount, own_contribution, approved_amount, - created_at, updated_at, submitted_at, created_by, updated_by - FROM funding_applications - WHERE tenant_id = $1 AND status != 'ARCHIVED' - ` - args := []interface{}{tenantID} - argIndex := 2 - - if filter.Status != nil { - query += fmt.Sprintf(" AND status = $%d", argIndex) - args = append(args, *filter.Status) - argIndex++ - } - - if filter.FundingProgram != nil { - query += fmt.Sprintf(" AND funding_program = $%d", argIndex) - args = append(args, *filter.FundingProgram) - argIndex++ - } - - // Count total - countQuery := `SELECT COUNT(*) FROM funding_applications WHERE tenant_id = $1 AND status != 'ARCHIVED'` - var total int - s.pool.QueryRow(ctx, countQuery, tenantID).Scan(&total) - - // Add sorting and pagination - sortBy := "created_at" - if filter.SortBy != "" { - sortBy = filter.SortBy - } - sortOrder := "DESC" - if filter.SortOrder == "asc" { - sortOrder = "ASC" - } - query += fmt.Sprintf(" ORDER BY %s %s", sortBy, sortOrder) - - if filter.PageSize <= 0 { - filter.PageSize = 20 - } - if filter.Page <= 0 { - filter.Page = 1 - } - offset := (filter.Page - 1) * filter.PageSize - query += fmt.Sprintf(" LIMIT %d OFFSET %d", filter.PageSize, offset) - - rows, err := s.pool.Query(ctx, query, args...) - if err != nil { - return nil, fmt.Errorf("failed to list applications: %w", err) - } - defer rows.Close() - - var apps []FundingApplication - for rows.Next() { - var app FundingApplication - var wizardDataJSON, schoolProfileJSON, projectPlanJSON, budgetJSON, timelineJSON []byte - - err := rows.Scan( - &app.ID, &app.TenantID, &app.ApplicationNumber, &app.Title, &app.FundingProgram, &app.Status, - &app.CurrentStep, &app.TotalSteps, &wizardDataJSON, - &schoolProfileJSON, &projectPlanJSON, &budgetJSON, &timelineJSON, - &app.RequestedAmount, &app.OwnContribution, &app.ApprovedAmount, - &app.CreatedAt, &app.UpdatedAt, &app.SubmittedAt, &app.CreatedBy, &app.UpdatedBy, - ) - if err != nil { - return nil, fmt.Errorf("failed to scan application: %w", err) - } - - // Unmarshal JSON fields - if len(schoolProfileJSON) > 0 { - app.SchoolProfile = &SchoolProfile{} - json.Unmarshal(schoolProfileJSON, app.SchoolProfile) - } - - apps = append(apps, app) - } - - return &ApplicationListResponse{ - Applications: apps, - Total: total, - Page: filter.Page, - PageSize: filter.PageSize, - }, nil -} - -// SearchApplications searches applications by text -func (s *PostgresStore) SearchApplications(ctx context.Context, tenantID uuid.UUID, query string) ([]FundingApplication, error) { - searchQuery := ` - SELECT id FROM funding_applications - WHERE tenant_id = $1 - AND status != 'ARCHIVED' - AND ( - title ILIKE $2 - OR application_number ILIKE $2 - OR school_profile::text ILIKE $2 - ) - ORDER BY updated_at DESC - LIMIT 50 - ` - - rows, err := s.pool.Query(ctx, searchQuery, tenantID, "%"+query+"%") - if err != nil { - return nil, fmt.Errorf("failed to search applications: %w", err) - } - defer rows.Close() - - var apps []FundingApplication - for rows.Next() { - var id uuid.UUID - if err := rows.Scan(&id); err != nil { - continue - } - app, err := s.GetApplication(ctx, id) - if err == nil { - apps = append(apps, *app) - } - } - - return apps, nil -} - -// SaveWizardStep saves data for a wizard step -func (s *PostgresStore) SaveWizardStep(ctx context.Context, appID uuid.UUID, step int, data map[string]interface{}) error { - // Get current wizard data - app, err := s.GetApplication(ctx, appID) - if err != nil { - return err - } - - // Initialize wizard data if nil - if app.WizardData == nil { - app.WizardData = make(map[string]interface{}) - } - - // Merge step data - stepKey := fmt.Sprintf("step_%d", step) - app.WizardData[stepKey] = data - app.CurrentStep = step - - // Update application - return s.UpdateApplication(ctx, app) -} - -// GetWizardProgress returns the wizard progress -func (s *PostgresStore) GetWizardProgress(ctx context.Context, appID uuid.UUID) (*WizardProgress, error) { - app, err := s.GetApplication(ctx, appID) - if err != nil { - return nil, err - } - - progress := &WizardProgress{ - CurrentStep: app.CurrentStep, - TotalSteps: app.TotalSteps, - CompletedSteps: []int{}, - FormData: app.WizardData, - LastSavedAt: app.UpdatedAt, - } - - // Determine completed steps from wizard data - for i := 1; i <= app.TotalSteps; i++ { - stepKey := fmt.Sprintf("step_%d", i) - if _, ok := app.WizardData[stepKey]; ok { - progress.CompletedSteps = append(progress.CompletedSteps, i) - } - } - - return progress, nil -} - -// AddAttachment adds an attachment to an application -func (s *PostgresStore) AddAttachment(ctx context.Context, appID uuid.UUID, attachment *Attachment) error { - attachment.ID = uuid.New() - attachment.UploadedAt = time.Now() - - query := ` - INSERT INTO funding_attachments ( - id, application_id, file_name, file_type, file_size, - category, description, storage_path, uploaded_at, uploaded_by - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) - ` - - _, err := s.pool.Exec(ctx, query, - attachment.ID, appID, attachment.FileName, attachment.FileType, attachment.FileSize, - attachment.Category, attachment.Description, attachment.StoragePath, - attachment.UploadedAt, attachment.UploadedBy, - ) - - return err -} - -// GetAttachments returns all attachments for an application -func (s *PostgresStore) GetAttachments(ctx context.Context, appID uuid.UUID) ([]Attachment, error) { - query := ` - SELECT id, file_name, file_type, file_size, category, description, storage_path, uploaded_at, uploaded_by - FROM funding_attachments - WHERE application_id = $1 - ORDER BY uploaded_at DESC - ` - - rows, err := s.pool.Query(ctx, query, appID) - if err != nil { - return nil, err - } - defer rows.Close() - - var attachments []Attachment - for rows.Next() { - var a Attachment - err := rows.Scan(&a.ID, &a.FileName, &a.FileType, &a.FileSize, &a.Category, &a.Description, &a.StoragePath, &a.UploadedAt, &a.UploadedBy) - if err != nil { - continue - } - attachments = append(attachments, a) - } - - return attachments, nil -} - -// DeleteAttachment deletes an attachment -func (s *PostgresStore) DeleteAttachment(ctx context.Context, attachmentID uuid.UUID) error { - query := `DELETE FROM funding_attachments WHERE id = $1` - _, err := s.pool.Exec(ctx, query, attachmentID) - return err -} - -// AddHistoryEntry adds an audit trail entry -func (s *PostgresStore) AddHistoryEntry(ctx context.Context, entry *ApplicationHistoryEntry) error { - entry.ID = uuid.New() - entry.PerformedAt = time.Now().Format(time.RFC3339) - - oldValuesJSON, _ := json.Marshal(entry.OldValues) - newValuesJSON, _ := json.Marshal(entry.NewValues) - changedFieldsJSON, _ := json.Marshal(entry.ChangedFields) - - query := ` - INSERT INTO funding_application_history ( - id, application_id, action, changed_fields, old_values, new_values, - performed_by, performed_at, notes - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) - ` - - _, err := s.pool.Exec(ctx, query, - entry.ID, entry.ApplicationID, entry.Action, changedFieldsJSON, oldValuesJSON, newValuesJSON, - entry.PerformedBy, entry.PerformedAt, entry.Notes, - ) - - return err -} - -// GetHistory returns the audit trail for an application -func (s *PostgresStore) GetHistory(ctx context.Context, appID uuid.UUID) ([]ApplicationHistoryEntry, error) { - query := ` - SELECT id, application_id, action, changed_fields, old_values, new_values, performed_by, performed_at, notes - FROM funding_application_history - WHERE application_id = $1 - ORDER BY performed_at DESC - ` - - rows, err := s.pool.Query(ctx, query, appID) - if err != nil { - return nil, err - } - defer rows.Close() - - var history []ApplicationHistoryEntry - for rows.Next() { - var entry ApplicationHistoryEntry - var changedFieldsJSON, oldValuesJSON, newValuesJSON []byte - - err := rows.Scan( - &entry.ID, &entry.ApplicationID, &entry.Action, &changedFieldsJSON, &oldValuesJSON, &newValuesJSON, - &entry.PerformedBy, &entry.PerformedAt, &entry.Notes, - ) - if err != nil { - continue - } - - json.Unmarshal(changedFieldsJSON, &entry.ChangedFields) - json.Unmarshal(oldValuesJSON, &entry.OldValues) - json.Unmarshal(newValuesJSON, &entry.NewValues) - - history = append(history, entry) - } - - return history, nil -} - -// GetStatistics returns funding statistics -func (s *PostgresStore) GetStatistics(ctx context.Context, tenantID uuid.UUID) (*FundingStatistics, error) { - stats := &FundingStatistics{ - ByProgram: make(map[FundingProgram]int), - ByState: make(map[FederalState]int), - } - - // Total and by status - query := ` - SELECT - COUNT(*) as total, - COUNT(*) FILTER (WHERE status = 'DRAFT') as draft, - COUNT(*) FILTER (WHERE status = 'SUBMITTED') as submitted, - COUNT(*) FILTER (WHERE status = 'APPROVED') as approved, - COUNT(*) FILTER (WHERE status = 'REJECTED') as rejected, - COALESCE(SUM(requested_amount), 0) as total_requested, - COALESCE(SUM(COALESCE(approved_amount, 0)), 0) as total_approved - FROM funding_applications - WHERE tenant_id = $1 AND status != 'ARCHIVED' - ` - - err := s.pool.QueryRow(ctx, query, tenantID).Scan( - &stats.TotalApplications, &stats.DraftCount, &stats.SubmittedCount, - &stats.ApprovedCount, &stats.RejectedCount, - &stats.TotalRequested, &stats.TotalApproved, - ) - if err != nil { - return nil, err - } - - // By program - programQuery := ` - SELECT funding_program, COUNT(*) - FROM funding_applications - WHERE tenant_id = $1 AND status != 'ARCHIVED' - GROUP BY funding_program - ` - rows, _ := s.pool.Query(ctx, programQuery, tenantID) - for rows.Next() { - var program FundingProgram - var count int - rows.Scan(&program, &count) - stats.ByProgram[program] = count - } - rows.Close() - - return stats, nil -} - -// SaveExportBundle saves an export bundle record -func (s *PostgresStore) SaveExportBundle(ctx context.Context, bundle *ExportBundle) error { - bundle.ID = uuid.New() - bundle.GeneratedAt = time.Now() - bundle.ExpiresAt = time.Now().Add(24 * time.Hour) // 24h expiry - - documentsJSON, _ := json.Marshal(bundle.Documents) - - query := ` - INSERT INTO funding_export_bundles ( - id, application_id, documents, generated_at, download_url, expires_at - ) VALUES ($1, $2, $3, $4, $5, $6) - ` - - _, err := s.pool.Exec(ctx, query, - bundle.ID, bundle.ApplicationID, documentsJSON, - bundle.GeneratedAt, bundle.DownloadURL, bundle.ExpiresAt, - ) - - return err -} - -// GetExportBundle retrieves an export bundle -func (s *PostgresStore) GetExportBundle(ctx context.Context, bundleID uuid.UUID) (*ExportBundle, error) { - query := ` - SELECT id, application_id, documents, generated_at, download_url, expires_at - FROM funding_export_bundles - WHERE id = $1 AND expires_at > NOW() - ` - - var bundle ExportBundle - var documentsJSON []byte - - err := s.pool.QueryRow(ctx, query, bundleID).Scan( - &bundle.ID, &bundle.ApplicationID, &documentsJSON, - &bundle.GeneratedAt, &bundle.DownloadURL, &bundle.ExpiresAt, - ) - if err != nil { - return nil, err - } - - json.Unmarshal(documentsJSON, &bundle.Documents) - - return &bundle, nil -} - -// generateApplicationNumber creates a unique application number -func (s *PostgresStore) generateApplicationNumber(program FundingProgram, school *SchoolProfile) string { - year := time.Now().Year() - state := "XX" - if school != nil { - state = string(school.FederalState) - } - - prefix := "FA" - switch program { - case FundingProgramDigitalPakt1: - prefix = "DP1" - case FundingProgramDigitalPakt2: - prefix = "DP2" - case FundingProgramLandesfoerderung: - prefix = "LF" - } - - // Get sequence number - var seq int - s.pool.QueryRow(context.Background(), - `SELECT COALESCE(MAX(CAST(SUBSTRING(application_number FROM '\d{5}$') AS INTEGER)), 0) + 1 - FROM funding_applications WHERE application_number LIKE $1`, - fmt.Sprintf("%s-%s-%d-%%", prefix, state, year), - ).Scan(&seq) - - return fmt.Sprintf("%s-%s-%d-%05d", prefix, state, year, seq) -} diff --git a/ai-compliance-sdk/internal/funding/store.go b/ai-compliance-sdk/internal/funding/store.go deleted file mode 100644 index 3143fca..0000000 --- a/ai-compliance-sdk/internal/funding/store.go +++ /dev/null @@ -1,81 +0,0 @@ -package funding - -import ( - "context" - - "github.com/google/uuid" -) - -// Store defines the interface for funding application persistence -type Store interface { - // Application CRUD - CreateApplication(ctx context.Context, app *FundingApplication) error - GetApplication(ctx context.Context, id uuid.UUID) (*FundingApplication, error) - GetApplicationByNumber(ctx context.Context, number string) (*FundingApplication, error) - UpdateApplication(ctx context.Context, app *FundingApplication) error - DeleteApplication(ctx context.Context, id uuid.UUID) error - - // List & Search - ListApplications(ctx context.Context, tenantID uuid.UUID, filter ApplicationFilter) (*ApplicationListResponse, error) - SearchApplications(ctx context.Context, tenantID uuid.UUID, query string) ([]FundingApplication, error) - - // Wizard Data - SaveWizardStep(ctx context.Context, appID uuid.UUID, step int, data map[string]interface{}) error - GetWizardProgress(ctx context.Context, appID uuid.UUID) (*WizardProgress, error) - - // Attachments - AddAttachment(ctx context.Context, appID uuid.UUID, attachment *Attachment) error - GetAttachments(ctx context.Context, appID uuid.UUID) ([]Attachment, error) - DeleteAttachment(ctx context.Context, attachmentID uuid.UUID) error - - // Application History (Audit Trail) - AddHistoryEntry(ctx context.Context, entry *ApplicationHistoryEntry) error - GetHistory(ctx context.Context, appID uuid.UUID) ([]ApplicationHistoryEntry, error) - - // Statistics - GetStatistics(ctx context.Context, tenantID uuid.UUID) (*FundingStatistics, error) - - // Export Tracking - SaveExportBundle(ctx context.Context, bundle *ExportBundle) error - GetExportBundle(ctx context.Context, bundleID uuid.UUID) (*ExportBundle, error) -} - -// ApplicationFilter for filtering list queries -type ApplicationFilter struct { - Status *ApplicationStatus `json:"status,omitempty"` - FundingProgram *FundingProgram `json:"funding_program,omitempty"` - FederalState *FederalState `json:"federal_state,omitempty"` - CreatedAfter *string `json:"created_after,omitempty"` - CreatedBefore *string `json:"created_before,omitempty"` - Page int `json:"page"` - PageSize int `json:"page_size"` - SortBy string `json:"sort_by,omitempty"` - SortOrder string `json:"sort_order,omitempty"` // asc, desc -} - -// ApplicationHistoryEntry for audit trail -type ApplicationHistoryEntry struct { - ID uuid.UUID `json:"id"` - ApplicationID uuid.UUID `json:"application_id"` - Action string `json:"action"` // created, updated, submitted, approved, etc. - ChangedFields []string `json:"changed_fields,omitempty"` - OldValues map[string]interface{} `json:"old_values,omitempty"` - NewValues map[string]interface{} `json:"new_values,omitempty"` - PerformedBy uuid.UUID `json:"performed_by"` - PerformedAt string `json:"performed_at"` - Notes string `json:"notes,omitempty"` -} - -// FundingStatistics for dashboard -type FundingStatistics struct { - TotalApplications int `json:"total_applications"` - DraftCount int `json:"draft_count"` - SubmittedCount int `json:"submitted_count"` - ApprovedCount int `json:"approved_count"` - RejectedCount int `json:"rejected_count"` - TotalRequested float64 `json:"total_requested"` - TotalApproved float64 `json:"total_approved"` - AverageProcessDays float64 `json:"average_process_days"` - ByProgram map[FundingProgram]int `json:"by_program"` - ByState map[FederalState]int `json:"by_state"` -} diff --git a/ai-compliance-sdk/internal/gci/engine.go b/ai-compliance-sdk/internal/gci/engine.go deleted file mode 100644 index 1599f8b..0000000 --- a/ai-compliance-sdk/internal/gci/engine.go +++ /dev/null @@ -1,371 +0,0 @@ -package gci - -import ( - "fmt" - "math" - "time" -) - -// Engine calculates the GCI score -type Engine struct{} - -// NewEngine creates a new GCI calculation engine -func NewEngine() *Engine { - return &Engine{} -} - -// Calculate computes the full GCI result for a tenant -func (e *Engine) Calculate(tenantID string, profileID string) *GCIResult { - now := time.Now() - profile := GetProfile(profileID) - auditTrail := []AuditEntry{} - - // Step 1: Get module data (mock for now) - modules := MockModuleData(tenantID) - certDates := MockCertificateData() - - // Step 2: Calculate Level 1 - Module Scores with validity - for i := range modules { - m := &modules[i] - if m.Assigned > 0 { - m.RawScore = float64(m.Completed) / float64(m.Assigned) * 100.0 - } - // Apply validity factor - if validUntil, ok := certDates[m.ModuleID]; ok { - m.ValidityFactor = CalculateValidityFactor(validUntil, now) - } else { - m.ValidityFactor = 1.0 // No certificate tracking = assume valid - } - m.FinalScore = m.RawScore * m.ValidityFactor - - if m.ValidityFactor < 1.0 { - auditTrail = append(auditTrail, AuditEntry{ - Timestamp: now, - Factor: "validity_decay", - Description: fmt.Sprintf("Modul '%s': Gueltigkeitsfaktor %.2f (Zertifikat laeuft ab/abgelaufen)", m.ModuleName, m.ValidityFactor), - Value: m.ValidityFactor, - Impact: "negative", - }) - } - } - - // Step 3: Calculate Level 2 - Risk-Weighted Scores per area - areaModules := map[string][]ModuleScore{ - "dsgvo": {}, - "nis2": {}, - "iso27001": {}, - "ai_act": {}, - } - for _, m := range modules { - if _, ok := areaModules[m.Category]; ok { - areaModules[m.Category] = append(areaModules[m.Category], m) - } - } - - level2Areas := []RiskWeightedScore{} - areaNames := map[string]string{ - "dsgvo": "DSGVO", - "nis2": "NIS2", - "iso27001": "ISO 27001", - "ai_act": "EU AI Act", - } - - for areaID, mods := range areaModules { - rws := RiskWeightedScore{ - AreaID: areaID, - AreaName: areaNames[areaID], - Modules: mods, - } - for _, m := range mods { - rws.WeightedSum += m.FinalScore * m.RiskWeight - rws.TotalWeight += m.RiskWeight - } - if rws.TotalWeight > 0 { - rws.AreaScore = rws.WeightedSum / rws.TotalWeight - } - level2Areas = append(level2Areas, rws) - } - - // Step 4: Calculate Level 3 - Regulation Area Scores - areaScores := []RegulationAreaScore{} - for _, rws := range level2Areas { - weight := profile.Weights[rws.AreaID] - completedCount := 0 - for _, m := range rws.Modules { - if m.Completed >= m.Assigned && m.Assigned > 0 { - completedCount++ - } - } - ras := RegulationAreaScore{ - RegulationID: rws.AreaID, - RegulationName: rws.AreaName, - Score: math.Round(rws.AreaScore*100) / 100, - Weight: weight, - WeightedScore: rws.AreaScore * weight, - ModuleCount: len(rws.Modules), - CompletedCount: completedCount, - } - areaScores = append(areaScores, ras) - - auditTrail = append(auditTrail, AuditEntry{ - Timestamp: now, - Factor: "area_score", - Description: fmt.Sprintf("Bereich '%s': Score %.1f, Gewicht %.0f%%", rws.AreaName, rws.AreaScore, weight*100), - Value: rws.AreaScore, - Impact: "neutral", - }) - } - - // Step 5: Calculate raw GCI - rawGCI := 0.0 - totalWeight := 0.0 - for _, ras := range areaScores { - rawGCI += ras.WeightedScore - totalWeight += ras.Weight - } - if totalWeight > 0 { - rawGCI = rawGCI / totalWeight - } - - // Step 6: Apply Criticality Multiplier - criticalityMult := calculateCriticalityMultiplier(modules) - auditTrail = append(auditTrail, AuditEntry{ - Timestamp: now, - Factor: "criticality_multiplier", - Description: fmt.Sprintf("Kritikalitaetsmultiplikator: %.3f", criticalityMult), - Value: criticalityMult, - Impact: func() string { - if criticalityMult < 1.0 { - return "negative" - } - return "neutral" - }(), - }) - - // Step 7: Apply Incident Adjustment - openInc, critInc := MockIncidentData() - incidentAdj := calculateIncidentAdjustment(openInc, critInc) - auditTrail = append(auditTrail, AuditEntry{ - Timestamp: now, - Factor: "incident_adjustment", - Description: fmt.Sprintf("Vorfallsanpassung: %.3f (%d offen, %d kritisch)", incidentAdj, openInc, critInc), - Value: incidentAdj, - Impact: "negative", - }) - - // Step 8: Final GCI - finalGCI := rawGCI * criticalityMult * incidentAdj - finalGCI = math.Max(0, math.Min(100, math.Round(finalGCI*10)/10)) - - // Step 9: Determine Maturity Level - maturity := determineMaturityLevel(finalGCI) - - auditTrail = append(auditTrail, AuditEntry{ - Timestamp: now, - Factor: "final_gci", - Description: fmt.Sprintf("GCI-Endergebnis: %.1f → Reifegrad: %s", finalGCI, MaturityLabels[maturity]), - Value: finalGCI, - Impact: "neutral", - }) - - return &GCIResult{ - TenantID: tenantID, - GCIScore: finalGCI, - MaturityLevel: maturity, - MaturityLabel: MaturityLabels[maturity], - CalculatedAt: now, - Profile: profileID, - AreaScores: areaScores, - CriticalityMult: criticalityMult, - IncidentAdj: incidentAdj, - AuditTrail: auditTrail, - } -} - -// CalculateBreakdown returns the full 4-level breakdown -func (e *Engine) CalculateBreakdown(tenantID string, profileID string) *GCIBreakdown { - result := e.Calculate(tenantID, profileID) - modules := MockModuleData(tenantID) - certDates := MockCertificateData() - now := time.Now() - - // Recalculate module scores for the breakdown - for i := range modules { - m := &modules[i] - if m.Assigned > 0 { - m.RawScore = float64(m.Completed) / float64(m.Assigned) * 100.0 - } - if validUntil, ok := certDates[m.ModuleID]; ok { - m.ValidityFactor = CalculateValidityFactor(validUntil, now) - } else { - m.ValidityFactor = 1.0 - } - m.FinalScore = m.RawScore * m.ValidityFactor - } - - // Build Level 2 areas - areaModules := map[string][]ModuleScore{} - for _, m := range modules { - areaModules[m.Category] = append(areaModules[m.Category], m) - } - - areaNames := map[string]string{"dsgvo": "DSGVO", "nis2": "NIS2", "iso27001": "ISO 27001", "ai_act": "EU AI Act"} - level2 := []RiskWeightedScore{} - for areaID, mods := range areaModules { - rws := RiskWeightedScore{AreaID: areaID, AreaName: areaNames[areaID], Modules: mods} - for _, m := range mods { - rws.WeightedSum += m.FinalScore * m.RiskWeight - rws.TotalWeight += m.RiskWeight - } - if rws.TotalWeight > 0 { - rws.AreaScore = rws.WeightedSum / rws.TotalWeight - } - level2 = append(level2, rws) - } - - return &GCIBreakdown{ - GCIResult: *result, - Level1Modules: modules, - Level2Areas: level2, - } -} - -// GetHistory returns historical GCI snapshots -func (e *Engine) GetHistory(tenantID string) []GCISnapshot { - // Add current score to history - result := e.Calculate(tenantID, "default") - history := MockGCIHistory(tenantID) - current := GCISnapshot{ - TenantID: tenantID, - Score: result.GCIScore, - MaturityLevel: result.MaturityLevel, - AreaScores: make(map[string]float64), - CalculatedAt: result.CalculatedAt, - } - for _, as := range result.AreaScores { - current.AreaScores[as.RegulationID] = as.Score - } - history = append(history, current) - return history -} - -// GetMatrix returns the compliance matrix (roles x regulations) -func (e *Engine) GetMatrix(tenantID string) []ComplianceMatrixEntry { - modules := MockModuleData(tenantID) - - roles := []struct { - ID string - Name string - }{ - {"management", "Geschaeftsfuehrung"}, - {"it_security", "IT-Sicherheit / CISO"}, - {"data_protection", "Datenschutz / DSB"}, - {"hr", "Personalwesen"}, - {"general", "Allgemeine Mitarbeiter"}, - } - - // Define which modules are relevant per role - roleModules := map[string][]string{ - "management": {"dsgvo-grundlagen", "nis2-management", "ai-governance", "iso-isms"}, - "it_security": {"nis2-risikomanagement", "nis2-incident-response", "iso-zugangssteuerung", "iso-kryptografie", "ai-hochrisiko"}, - "data_protection": {"dsgvo-grundlagen", "dsgvo-betroffenenrechte", "dsgvo-tom", "dsgvo-dsfa", "dsgvo-auftragsverarbeitung"}, - "hr": {"dsgvo-grundlagen", "dsgvo-betroffenenrechte", "nis2-management"}, - "general": {"dsgvo-grundlagen", "nis2-risikomanagement", "ai-risikokategorien", "ai-transparenz"}, - } - - moduleMap := map[string]ModuleScore{} - for _, m := range modules { - moduleMap[m.ModuleID] = m - } - - entries := []ComplianceMatrixEntry{} - for _, role := range roles { - entry := ComplianceMatrixEntry{ - Role: role.ID, - RoleName: role.Name, - Regulations: map[string]float64{}, - } - - regScores := map[string][]float64{} - requiredModuleIDs := roleModules[role.ID] - entry.RequiredModules = len(requiredModuleIDs) - - for _, modID := range requiredModuleIDs { - if m, ok := moduleMap[modID]; ok { - score := 0.0 - if m.Assigned > 0 { - score = float64(m.Completed) / float64(m.Assigned) * 100 - } - regScores[m.Category] = append(regScores[m.Category], score) - if m.Completed >= m.Assigned && m.Assigned > 0 { - entry.CompletedModules++ - } - } - } - - totalScore := 0.0 - count := 0 - for reg, scores := range regScores { - sum := 0.0 - for _, s := range scores { - sum += s - } - avg := sum / float64(len(scores)) - entry.Regulations[reg] = math.Round(avg*10) / 10 - totalScore += avg - count++ - } - if count > 0 { - entry.OverallScore = math.Round(totalScore/float64(count)*10) / 10 - } - - entries = append(entries, entry) - } - - return entries -} - -// Helper functions - -func calculateCriticalityMultiplier(modules []ModuleScore) float64 { - criticalModules := 0 - criticalLow := 0 - for _, m := range modules { - if m.RiskWeight >= 2.5 { - criticalModules++ - if m.FinalScore < 50 { - criticalLow++ - } - } - } - if criticalModules == 0 { - return 1.0 - } - // Reduce score if critical modules have low completion - ratio := float64(criticalLow) / float64(criticalModules) - return 1.0 - (ratio * 0.15) // max 15% reduction -} - -func calculateIncidentAdjustment(openIncidents, criticalIncidents int) float64 { - adj := 1.0 - // Each open incident reduces by 1% - adj -= float64(openIncidents) * 0.01 - // Each critical incident reduces by additional 3% - adj -= float64(criticalIncidents) * 0.03 - return math.Max(0.8, adj) // minimum 80% (max 20% reduction) -} - -func determineMaturityLevel(score float64) string { - switch { - case score >= 90: - return MaturityOptimized - case score >= 75: - return MaturityManaged - case score >= 60: - return MaturityDefined - case score >= 40: - return MaturityReactive - default: - return MaturityHighRisk - } -} diff --git a/ai-compliance-sdk/internal/gci/iso_gap_analysis.go b/ai-compliance-sdk/internal/gci/iso_gap_analysis.go deleted file mode 100644 index 9032f45..0000000 --- a/ai-compliance-sdk/internal/gci/iso_gap_analysis.go +++ /dev/null @@ -1,188 +0,0 @@ -package gci - -import "math" - -// ISOGapAnalysis represents the complete ISO 27001 gap analysis -type ISOGapAnalysis struct { - TenantID string `json:"tenant_id"` - TotalControls int `json:"total_controls"` - CoveredFull int `json:"covered_full"` - CoveredPartial int `json:"covered_partial"` - NotCovered int `json:"not_covered"` - CoveragePercent float64 `json:"coverage_percent"` - CategorySummaries []ISOCategorySummary `json:"category_summaries"` - ControlDetails []ISOControlDetail `json:"control_details"` - Gaps []ISOGap `json:"gaps"` -} - -// ISOControlDetail shows coverage status for a single control -type ISOControlDetail struct { - Control ISOControl `json:"control"` - CoverageLevel string `json:"coverage_level"` // full, partial, none - CoveredBy []string `json:"covered_by"` // module IDs - Score float64 `json:"score"` // 0-100 -} - -// ISOGap represents an identified gap in ISO coverage -type ISOGap struct { - ControlID string `json:"control_id"` - ControlName string `json:"control_name"` - Category string `json:"category"` - Priority string `json:"priority"` // high, medium, low - Recommendation string `json:"recommendation"` -} - -// CalculateISOGapAnalysis performs the ISO 27001 gap analysis -func CalculateISOGapAnalysis(tenantID string) *ISOGapAnalysis { - modules := MockModuleData(tenantID) - moduleMap := map[string]ModuleScore{} - for _, m := range modules { - moduleMap[m.ModuleID] = m - } - - // Build reverse mapping: control -> modules covering it - controlCoverage := map[string][]string{} - controlCoverageLevel := map[string]string{} - for _, mapping := range DefaultISOModuleMappings { - for _, controlID := range mapping.ISOControls { - controlCoverage[controlID] = append(controlCoverage[controlID], mapping.ModuleID) - // Use the highest coverage level - existingLevel := controlCoverageLevel[controlID] - if mapping.CoverageLevel == "full" || existingLevel == "" { - controlCoverageLevel[controlID] = mapping.CoverageLevel - } - } - } - - // Analyze each control - details := []ISOControlDetail{} - gaps := []ISOGap{} - coveredFull := 0 - coveredPartial := 0 - notCovered := 0 - - categoryCounts := map[string]*ISOCategorySummary{ - "A.5": {CategoryID: "A.5", CategoryName: "Organisatorische Massnahmen"}, - "A.6": {CategoryID: "A.6", CategoryName: "Personelle Massnahmen"}, - "A.7": {CategoryID: "A.7", CategoryName: "Physische Massnahmen"}, - "A.8": {CategoryID: "A.8", CategoryName: "Technologische Massnahmen"}, - } - - for _, control := range ISOControls { - coveredBy := controlCoverage[control.ID] - level := controlCoverageLevel[control.ID] - - if len(coveredBy) == 0 { - level = "none" - } - - // Calculate score based on module completion - score := 0.0 - if len(coveredBy) > 0 { - scoreSum := 0.0 - count := 0 - for _, modID := range coveredBy { - if m, ok := moduleMap[modID]; ok && m.Assigned > 0 { - scoreSum += float64(m.Completed) / float64(m.Assigned) * 100 - count++ - } - } - if count > 0 { - score = scoreSum / float64(count) - } - // Adjust for coverage level - if level == "partial" { - score *= 0.7 // partial coverage reduces effective score - } - } - - detail := ISOControlDetail{ - Control: control, - CoverageLevel: level, - CoveredBy: coveredBy, - Score: math.Round(score*10) / 10, - } - details = append(details, detail) - - // Count by category - cat := categoryCounts[control.CategoryID] - if cat != nil { - cat.TotalControls++ - switch level { - case "full": - coveredFull++ - cat.CoveredFull++ - case "partial": - coveredPartial++ - cat.CoveredPartial++ - default: - notCovered++ - cat.NotCovered++ - // Generate gap recommendation - gap := ISOGap{ - ControlID: control.ID, - ControlName: control.Name, - Category: control.Category, - Priority: determineGapPriority(control), - Recommendation: generateGapRecommendation(control), - } - gaps = append(gaps, gap) - } - } - } - - totalControls := len(ISOControls) - coveragePercent := 0.0 - if totalControls > 0 { - coveragePercent = math.Round(float64(coveredFull+coveredPartial)/float64(totalControls)*100*10) / 10 - } - - summaries := []ISOCategorySummary{} - for _, catID := range []string{"A.5", "A.6", "A.7", "A.8"} { - if cat, ok := categoryCounts[catID]; ok { - summaries = append(summaries, *cat) - } - } - - return &ISOGapAnalysis{ - TenantID: tenantID, - TotalControls: totalControls, - CoveredFull: coveredFull, - CoveredPartial: coveredPartial, - NotCovered: notCovered, - CoveragePercent: coveragePercent, - CategorySummaries: summaries, - ControlDetails: details, - Gaps: gaps, - } -} - -func determineGapPriority(control ISOControl) string { - // High priority for access, incident, and data protection controls - highPriority := map[string]bool{ - "A.5.15": true, "A.5.17": true, "A.5.24": true, "A.5.26": true, - "A.5.34": true, "A.8.2": true, "A.8.5": true, "A.8.7": true, - "A.8.10": true, "A.8.20": true, - } - if highPriority[control.ID] { - return "high" - } - // Medium for organizational and people controls - if control.CategoryID == "A.5" || control.CategoryID == "A.6" { - return "medium" - } - return "low" -} - -func generateGapRecommendation(control ISOControl) string { - recommendations := map[string]string{ - "organizational": "Erstellen Sie eine Richtlinie und weisen Sie Verantwortlichkeiten zu fuer: " + control.Name, - "people": "Implementieren Sie Schulungen und Prozesse fuer: " + control.Name, - "physical": "Definieren Sie physische Sicherheitsmassnahmen fuer: " + control.Name, - "technological": "Implementieren Sie technische Kontrollen fuer: " + control.Name, - } - if rec, ok := recommendations[control.Category]; ok { - return rec - } - return "Massnahmen implementieren fuer: " + control.Name -} diff --git a/ai-compliance-sdk/internal/gci/iso_mapping.go b/ai-compliance-sdk/internal/gci/iso_mapping.go deleted file mode 100644 index 8f1a8fa..0000000 --- a/ai-compliance-sdk/internal/gci/iso_mapping.go +++ /dev/null @@ -1,207 +0,0 @@ -package gci - -// ISOControl represents an ISO 27001:2022 Annex A control -type ISOControl struct { - ID string `json:"id"` // e.g. "A.5.1" - Name string `json:"name"` - Category string `json:"category"` // organizational, people, physical, technological - CategoryID string `json:"category_id"` // A.5, A.6, A.7, A.8 - Description string `json:"description"` -} - -// ISOModuleMapping maps a course/module to ISO controls -type ISOModuleMapping struct { - ModuleID string `json:"module_id"` - ModuleName string `json:"module_name"` - ISOControls []string `json:"iso_controls"` // control IDs - CoverageLevel string `json:"coverage_level"` // full, partial, none -} - -// ISO 27001:2022 Annex A controls (representative selection) -var ISOControls = []ISOControl{ - // A.5 Organizational Controls (37 controls, showing key ones) - {ID: "A.5.1", Name: "Informationssicherheitsrichtlinien", Category: "organizational", CategoryID: "A.5", Description: "Informationssicherheitsleitlinie und themenspezifische Richtlinien"}, - {ID: "A.5.2", Name: "Rollen und Verantwortlichkeiten", Category: "organizational", CategoryID: "A.5", Description: "Definition und Zuweisung von Informationssicherheitsrollen"}, - {ID: "A.5.3", Name: "Aufgabentrennung", Category: "organizational", CategoryID: "A.5", Description: "Trennung von konfligierenden Aufgaben und Verantwortlichkeiten"}, - {ID: "A.5.4", Name: "Managementverantwortung", Category: "organizational", CategoryID: "A.5", Description: "Fuehrungskraefte muessen Sicherheitsrichtlinien einhalten und durchsetzen"}, - {ID: "A.5.5", Name: "Kontakt mit Behoerden", Category: "organizational", CategoryID: "A.5", Description: "Pflege von Kontakten zu relevanten Aufsichtsbehoerden"}, - {ID: "A.5.6", Name: "Kontakt mit Interessengruppen", Category: "organizational", CategoryID: "A.5", Description: "Kontakt zu Fachgruppen und Sicherheitsforen"}, - {ID: "A.5.7", Name: "Bedrohungsintelligenz", Category: "organizational", CategoryID: "A.5", Description: "Sammlung und Analyse von Bedrohungsinformationen"}, - {ID: "A.5.8", Name: "Informationssicherheit im Projektmanagement", Category: "organizational", CategoryID: "A.5", Description: "Integration von Sicherheit in Projektmanagement"}, - {ID: "A.5.9", Name: "Inventar der Informationswerte", Category: "organizational", CategoryID: "A.5", Description: "Inventarisierung und Verwaltung von Informationswerten"}, - {ID: "A.5.10", Name: "Zuleassige Nutzung", Category: "organizational", CategoryID: "A.5", Description: "Regeln fuer die zuleassige Nutzung von Informationswerten"}, - {ID: "A.5.11", Name: "Rueckgabe von Werten", Category: "organizational", CategoryID: "A.5", Description: "Rueckgabe von Werten bei Beendigung"}, - {ID: "A.5.12", Name: "Klassifizierung von Informationen", Category: "organizational", CategoryID: "A.5", Description: "Klassifizierungsschema fuer Informationen"}, - {ID: "A.5.13", Name: "Kennzeichnung von Informationen", Category: "organizational", CategoryID: "A.5", Description: "Kennzeichnung gemaess Klassifizierung"}, - {ID: "A.5.14", Name: "Informationsuebertragung", Category: "organizational", CategoryID: "A.5", Description: "Regeln fuer sichere Informationsuebertragung"}, - {ID: "A.5.15", Name: "Zugangssteuerung", Category: "organizational", CategoryID: "A.5", Description: "Zugangssteuerungsrichtlinie"}, - {ID: "A.5.16", Name: "Identitaetsmanagement", Category: "organizational", CategoryID: "A.5", Description: "Verwaltung des Lebenszyklus von Identitaeten"}, - {ID: "A.5.17", Name: "Authentifizierungsinformationen", Category: "organizational", CategoryID: "A.5", Description: "Verwaltung von Authentifizierungsinformationen"}, - {ID: "A.5.18", Name: "Zugriffsrechte", Category: "organizational", CategoryID: "A.5", Description: "Vergabe, Pruefung und Entzug von Zugriffsrechten"}, - {ID: "A.5.19", Name: "Informationssicherheit in Lieferantenbeziehungen", Category: "organizational", CategoryID: "A.5", Description: "Sicherheitsanforderungen an Lieferanten"}, - {ID: "A.5.20", Name: "Informationssicherheit in Lieferantenvereinbarungen", Category: "organizational", CategoryID: "A.5", Description: "Sicherheitsklauseln in Vertraegen"}, - {ID: "A.5.21", Name: "IKT-Lieferkette", Category: "organizational", CategoryID: "A.5", Description: "Management der IKT-Lieferkette"}, - {ID: "A.5.22", Name: "Ueberwachung von Lieferantenservices", Category: "organizational", CategoryID: "A.5", Description: "Ueberwachung und Pruefung von Lieferantenservices"}, - {ID: "A.5.23", Name: "Cloud-Sicherheit", Category: "organizational", CategoryID: "A.5", Description: "Informationssicherheit fuer Cloud-Dienste"}, - {ID: "A.5.24", Name: "Vorfallsmanagement - Planung", Category: "organizational", CategoryID: "A.5", Description: "Planung und Vorbereitung des Vorfallsmanagements"}, - {ID: "A.5.25", Name: "Vorfallsbeurteilung", Category: "organizational", CategoryID: "A.5", Description: "Beurteilung und Entscheidung ueber Sicherheitsereignisse"}, - {ID: "A.5.26", Name: "Vorfallsreaktion", Category: "organizational", CategoryID: "A.5", Description: "Reaktion auf Sicherheitsvorfaelle"}, - {ID: "A.5.27", Name: "Aus Vorfaellen lernen", Category: "organizational", CategoryID: "A.5", Description: "Lessons Learned aus Sicherheitsvorfaellen"}, - {ID: "A.5.28", Name: "Beweissicherung", Category: "organizational", CategoryID: "A.5", Description: "Identifikation und Sicherung von Beweisen"}, - {ID: "A.5.29", Name: "Informationssicherheit bei Stoerungen", Category: "organizational", CategoryID: "A.5", Description: "Sicherheit waehrend Stoerungen und Krisen"}, - {ID: "A.5.30", Name: "IKT-Bereitschaft fuer Business Continuity", Category: "organizational", CategoryID: "A.5", Description: "IKT-Bereitschaft zur Unterstuetzung der Geschaeftskontinuitaet"}, - {ID: "A.5.31", Name: "Rechtliche Anforderungen", Category: "organizational", CategoryID: "A.5", Description: "Einhaltung rechtlicher und vertraglicher Anforderungen"}, - {ID: "A.5.32", Name: "Geistige Eigentumsrechte", Category: "organizational", CategoryID: "A.5", Description: "Schutz geistigen Eigentums"}, - {ID: "A.5.33", Name: "Schutz von Aufzeichnungen", Category: "organizational", CategoryID: "A.5", Description: "Schutz von Aufzeichnungen vor Verlust und Manipulation"}, - {ID: "A.5.34", Name: "Datenschutz und PII", Category: "organizational", CategoryID: "A.5", Description: "Datenschutz und Schutz personenbezogener Daten"}, - {ID: "A.5.35", Name: "Unabhaengige Ueberpruefung", Category: "organizational", CategoryID: "A.5", Description: "Unabhaengige Ueberpruefung der Informationssicherheit"}, - {ID: "A.5.36", Name: "Richtlinienkonformitaet", Category: "organizational", CategoryID: "A.5", Description: "Einhaltung von Richtlinien und Standards"}, - {ID: "A.5.37", Name: "Dokumentierte Betriebsverfahren", Category: "organizational", CategoryID: "A.5", Description: "Dokumentation von Betriebsverfahren"}, - - // A.6 People Controls (8 controls) - {ID: "A.6.1", Name: "Ueberpruefen", Category: "people", CategoryID: "A.6", Description: "Hintergrundpruefungen vor der Einstellung"}, - {ID: "A.6.2", Name: "Beschaeftigungsbedingungen", Category: "people", CategoryID: "A.6", Description: "Sicherheitsanforderungen in Arbeitsvertraegen"}, - {ID: "A.6.3", Name: "Sensibilisierung und Schulung", Category: "people", CategoryID: "A.6", Description: "Awareness-Programme und Schulungen"}, - {ID: "A.6.4", Name: "Disziplinarverfahren", Category: "people", CategoryID: "A.6", Description: "Formales Disziplinarverfahren"}, - {ID: "A.6.5", Name: "Verantwortlichkeiten nach Beendigung", Category: "people", CategoryID: "A.6", Description: "Sicherheitspflichten nach Beendigung des Beschaeftigungsverhaeltnisses"}, - {ID: "A.6.6", Name: "Vertraulichkeitsvereinbarungen", Category: "people", CategoryID: "A.6", Description: "Vertraulichkeits- und Geheimhaltungsvereinbarungen"}, - {ID: "A.6.7", Name: "Remote-Arbeit", Category: "people", CategoryID: "A.6", Description: "Sicherheitsmassnahmen fuer Remote-Arbeit"}, - {ID: "A.6.8", Name: "Meldung von Sicherheitsereignissen", Category: "people", CategoryID: "A.6", Description: "Mechanismen zur Meldung von Sicherheitsereignissen"}, - - // A.7 Physical Controls (14 controls, showing key ones) - {ID: "A.7.1", Name: "Physische Sicherheitsperimeter", Category: "physical", CategoryID: "A.7", Description: "Definition physischer Sicherheitszonen"}, - {ID: "A.7.2", Name: "Physischer Zutritt", Category: "physical", CategoryID: "A.7", Description: "Zutrittskontrolle zu Sicherheitszonen"}, - {ID: "A.7.3", Name: "Sicherung von Bueros und Raeumen", Category: "physical", CategoryID: "A.7", Description: "Physische Sicherheit fuer Bueros und Raeume"}, - {ID: "A.7.4", Name: "Physische Sicherheitsueberwachung", Category: "physical", CategoryID: "A.7", Description: "Ueberwachung physischer Sicherheit"}, - {ID: "A.7.5", Name: "Schutz vor Umweltgefahren", Category: "physical", CategoryID: "A.7", Description: "Schutz gegen natuerliche und menschgemachte Gefahren"}, - {ID: "A.7.6", Name: "Arbeit in Sicherheitszonen", Category: "physical", CategoryID: "A.7", Description: "Regeln fuer das Arbeiten in Sicherheitszonen"}, - {ID: "A.7.7", Name: "Aufgeraemter Schreibtisch", Category: "physical", CategoryID: "A.7", Description: "Clean-Desk und Clear-Screen Richtlinie"}, - {ID: "A.7.8", Name: "Geraeteplatzierung", Category: "physical", CategoryID: "A.7", Description: "Platzierung und Schutz von Geraeten"}, - {ID: "A.7.9", Name: "Sicherheit von Geraeten ausserhalb", Category: "physical", CategoryID: "A.7", Description: "Sicherheit von Geraeten ausserhalb der Raeumlichkeiten"}, - {ID: "A.7.10", Name: "Speichermedien", Category: "physical", CategoryID: "A.7", Description: "Verwaltung von Speichermedien"}, - {ID: "A.7.11", Name: "Versorgungseinrichtungen", Category: "physical", CategoryID: "A.7", Description: "Schutz vor Ausfaellen der Versorgungseinrichtungen"}, - {ID: "A.7.12", Name: "Verkabelungssicherheit", Category: "physical", CategoryID: "A.7", Description: "Schutz der Verkabelung"}, - {ID: "A.7.13", Name: "Instandhaltung von Geraeten", Category: "physical", CategoryID: "A.7", Description: "Korrekte Instandhaltung von Geraeten"}, - {ID: "A.7.14", Name: "Sichere Entsorgung", Category: "physical", CategoryID: "A.7", Description: "Sichere Entsorgung oder Wiederverwendung"}, - - // A.8 Technological Controls (34 controls, showing key ones) - {ID: "A.8.1", Name: "Endbenutzergeraete", Category: "technological", CategoryID: "A.8", Description: "Sicherheit von Endbenutzergeraeten"}, - {ID: "A.8.2", Name: "Privilegierte Zugriffsrechte", Category: "technological", CategoryID: "A.8", Description: "Verwaltung privilegierter Zugriffsrechte"}, - {ID: "A.8.3", Name: "Informationszugangsbeschraenkung", Category: "technological", CategoryID: "A.8", Description: "Beschraenkung des Zugangs zu Informationen"}, - {ID: "A.8.4", Name: "Zugang zu Quellcode", Category: "technological", CategoryID: "A.8", Description: "Sicherer Zugang zu Quellcode"}, - {ID: "A.8.5", Name: "Sichere Authentifizierung", Category: "technological", CategoryID: "A.8", Description: "Sichere Authentifizierungstechnologien"}, - {ID: "A.8.6", Name: "Kapazitaetsmanagement", Category: "technological", CategoryID: "A.8", Description: "Ueberwachung und Anpassung der Kapazitaet"}, - {ID: "A.8.7", Name: "Schutz gegen Malware", Category: "technological", CategoryID: "A.8", Description: "Schutz vor Schadprogrammen"}, - {ID: "A.8.8", Name: "Management technischer Schwachstellen", Category: "technological", CategoryID: "A.8", Description: "Identifikation und Behebung von Schwachstellen"}, - {ID: "A.8.9", Name: "Konfigurationsmanagement", Category: "technological", CategoryID: "A.8", Description: "Sichere Konfiguration von Systemen"}, - {ID: "A.8.10", Name: "Datensicherung", Category: "technological", CategoryID: "A.8", Description: "Erstellen und Testen von Datensicherungen"}, - {ID: "A.8.11", Name: "Datenredundanz", Category: "technological", CategoryID: "A.8", Description: "Redundanz von Informationsverarbeitungseinrichtungen"}, - {ID: "A.8.12", Name: "Protokollierung", Category: "technological", CategoryID: "A.8", Description: "Aufzeichnung und Ueberwachung von Aktivitaeten"}, - {ID: "A.8.13", Name: "Ueberwachung von Aktivitaeten", Category: "technological", CategoryID: "A.8", Description: "Ueberwachung von Netzwerken und Systemen"}, - {ID: "A.8.14", Name: "Zeitsynchronisation", Category: "technological", CategoryID: "A.8", Description: "Synchronisation von Uhren"}, - {ID: "A.8.15", Name: "Nutzung privilegierter Hilfsprogramme", Category: "technological", CategoryID: "A.8", Description: "Einschraenkung privilegierter Hilfsprogramme"}, - {ID: "A.8.16", Name: "Softwareinstallation", Category: "technological", CategoryID: "A.8", Description: "Kontrolle der Softwareinstallation"}, - {ID: "A.8.17", Name: "Netzwerksicherheit", Category: "technological", CategoryID: "A.8", Description: "Sicherheit von Netzwerken"}, - {ID: "A.8.18", Name: "Netzwerksegmentierung", Category: "technological", CategoryID: "A.8", Description: "Segmentierung von Netzwerken"}, - {ID: "A.8.19", Name: "Webfilterung", Category: "technological", CategoryID: "A.8", Description: "Filterung des Webzugangs"}, - {ID: "A.8.20", Name: "Kryptografie", Category: "technological", CategoryID: "A.8", Description: "Einsatz kryptografischer Massnahmen"}, - {ID: "A.8.21", Name: "Sichere Entwicklung", Category: "technological", CategoryID: "A.8", Description: "Sichere Entwicklungslebenszyklus"}, - {ID: "A.8.22", Name: "Sicherheitsanforderungen bei Applikationen", Category: "technological", CategoryID: "A.8", Description: "Sicherheitsanforderungen bei Anwendungen"}, - {ID: "A.8.23", Name: "Sichere Systemarchitektur", Category: "technological", CategoryID: "A.8", Description: "Sicherheitsprinzipien in der Systemarchitektur"}, - {ID: "A.8.24", Name: "Sicheres Programmieren", Category: "technological", CategoryID: "A.8", Description: "Sichere Programmierpraktiken"}, - {ID: "A.8.25", Name: "Sicherheitstests", Category: "technological", CategoryID: "A.8", Description: "Sicherheitstests in der Entwicklung und Abnahme"}, - {ID: "A.8.26", Name: "Auslagerung der Entwicklung", Category: "technological", CategoryID: "A.8", Description: "Ueberwachung ausgelagerter Entwicklung"}, - {ID: "A.8.27", Name: "Trennung von Umgebungen", Category: "technological", CategoryID: "A.8", Description: "Trennung von Entwicklungs-, Test- und Produktionsumgebungen"}, - {ID: "A.8.28", Name: "Aenderungsmanagement", Category: "technological", CategoryID: "A.8", Description: "Formales Aenderungsmanagement"}, - {ID: "A.8.29", Name: "Sicherheitstests in der Abnahme", Category: "technological", CategoryID: "A.8", Description: "Durchfuehrung von Sicherheitstests vor Abnahme"}, - {ID: "A.8.30", Name: "Datenloeschung", Category: "technological", CategoryID: "A.8", Description: "Sichere Datenloeschung"}, - {ID: "A.8.31", Name: "Datenmaskierung", Category: "technological", CategoryID: "A.8", Description: "Techniken zur Datenmaskierung"}, - {ID: "A.8.32", Name: "Verhinderung von Datenverlust", Category: "technological", CategoryID: "A.8", Description: "DLP-Massnahmen"}, - {ID: "A.8.33", Name: "Testinformationen", Category: "technological", CategoryID: "A.8", Description: "Schutz von Testinformationen"}, - {ID: "A.8.34", Name: "Audit-Informationssysteme", Category: "technological", CategoryID: "A.8", Description: "Schutz von Audit-Tools und -systemen"}, -} - -// Default mappings: which modules cover which ISO controls -var DefaultISOModuleMappings = []ISOModuleMapping{ - { - ModuleID: "iso-isms", ModuleName: "ISMS Grundlagen", - ISOControls: []string{"A.5.1", "A.5.2", "A.5.3", "A.5.4", "A.5.35", "A.5.36"}, - CoverageLevel: "full", - }, - { - ModuleID: "iso-risikobewertung", ModuleName: "Risikobewertung", - ISOControls: []string{"A.5.7", "A.5.8", "A.5.9", "A.5.10", "A.5.12", "A.5.13"}, - CoverageLevel: "full", - }, - { - ModuleID: "iso-zugangssteuerung", ModuleName: "Zugangssteuerung", - ISOControls: []string{"A.5.15", "A.5.16", "A.5.17", "A.5.18", "A.8.2", "A.8.3", "A.8.5"}, - CoverageLevel: "full", - }, - { - ModuleID: "iso-kryptografie", ModuleName: "Kryptografie", - ISOControls: []string{"A.8.20", "A.8.21", "A.8.24"}, - CoverageLevel: "partial", - }, - { - ModuleID: "iso-physisch", ModuleName: "Physische Sicherheit", - ISOControls: []string{"A.7.1", "A.7.2", "A.7.3", "A.7.4", "A.7.5", "A.7.7", "A.7.8"}, - CoverageLevel: "full", - }, - { - ModuleID: "dsgvo-tom", ModuleName: "Technisch-Organisatorische Massnahmen", - ISOControls: []string{"A.5.34", "A.8.10", "A.8.12", "A.8.30", "A.8.31"}, - CoverageLevel: "partial", - }, - { - ModuleID: "nis2-incident-response", ModuleName: "NIS2 Incident Response", - ISOControls: []string{"A.5.24", "A.5.25", "A.5.26", "A.5.27", "A.5.28", "A.6.8"}, - CoverageLevel: "full", - }, - { - ModuleID: "nis2-supply-chain", ModuleName: "NIS2 Lieferkettensicherheit", - ISOControls: []string{"A.5.19", "A.5.20", "A.5.21", "A.5.22", "A.5.23"}, - CoverageLevel: "full", - }, - { - ModuleID: "nis2-risikomanagement", ModuleName: "NIS2 Risikomanagement", - ISOControls: []string{"A.5.29", "A.5.30", "A.8.6", "A.8.7", "A.8.8", "A.8.9"}, - CoverageLevel: "partial", - }, - { - ModuleID: "dsgvo-grundlagen", ModuleName: "DSGVO Grundlagen", - ISOControls: []string{"A.5.31", "A.5.34", "A.6.2", "A.6.3"}, - CoverageLevel: "partial", - }, -} - -// GetISOControlByID returns a control by its ID -func GetISOControlByID(id string) (ISOControl, bool) { - for _, c := range ISOControls { - if c.ID == id { - return c, true - } - } - return ISOControl{}, false -} - -// GetISOControlsByCategory returns all controls in a category -func GetISOControlsByCategory(categoryID string) []ISOControl { - var result []ISOControl - for _, c := range ISOControls { - if c.CategoryID == categoryID { - result = append(result, c) - } - } - return result -} - -// ISOCategorySummary provides a summary per ISO category -type ISOCategorySummary struct { - CategoryID string `json:"category_id"` - CategoryName string `json:"category_name"` - TotalControls int `json:"total_controls"` - CoveredFull int `json:"covered_full"` - CoveredPartial int `json:"covered_partial"` - NotCovered int `json:"not_covered"` -} diff --git a/ai-compliance-sdk/internal/gci/mock_data.go b/ai-compliance-sdk/internal/gci/mock_data.go deleted file mode 100644 index bb8c074..0000000 --- a/ai-compliance-sdk/internal/gci/mock_data.go +++ /dev/null @@ -1,74 +0,0 @@ -package gci - -import "time" - -// MockModuleData provides fallback data when academy store is empty -func MockModuleData(tenantID string) []ModuleScore { - return []ModuleScore{ - // DSGVO modules - {ModuleID: "dsgvo-grundlagen", ModuleName: "DSGVO Grundlagen", Assigned: 25, Completed: 22, Category: "dsgvo", RiskWeight: 2.0}, - {ModuleID: "dsgvo-betroffenenrechte", ModuleName: "Betroffenenrechte", Assigned: 25, Completed: 18, Category: "dsgvo", RiskWeight: 2.5}, - {ModuleID: "dsgvo-tom", ModuleName: "Technisch-Organisatorische Massnahmen", Assigned: 20, Completed: 17, Category: "dsgvo", RiskWeight: 2.5}, - {ModuleID: "dsgvo-dsfa", ModuleName: "Datenschutz-Folgenabschaetzung", Assigned: 15, Completed: 10, Category: "dsgvo", RiskWeight: 2.0}, - {ModuleID: "dsgvo-auftragsverarbeitung", ModuleName: "Auftragsverarbeitung", Assigned: 20, Completed: 16, Category: "dsgvo", RiskWeight: 2.0}, - - // NIS2 modules - {ModuleID: "nis2-risikomanagement", ModuleName: "NIS2 Risikomanagement", Assigned: 15, Completed: 11, Category: "nis2", RiskWeight: 3.0}, - {ModuleID: "nis2-incident-response", ModuleName: "NIS2 Incident Response", Assigned: 15, Completed: 9, Category: "nis2", RiskWeight: 3.0}, - {ModuleID: "nis2-supply-chain", ModuleName: "NIS2 Lieferkettensicherheit", Assigned: 10, Completed: 6, Category: "nis2", RiskWeight: 2.0}, - {ModuleID: "nis2-management", ModuleName: "NIS2 Geschaeftsleitungspflicht", Assigned: 10, Completed: 8, Category: "nis2", RiskWeight: 3.0}, - - // ISO 27001 modules - {ModuleID: "iso-isms", ModuleName: "ISMS Grundlagen", Assigned: 20, Completed: 16, Category: "iso27001", RiskWeight: 2.0}, - {ModuleID: "iso-risikobewertung", ModuleName: "Risikobewertung", Assigned: 15, Completed: 12, Category: "iso27001", RiskWeight: 2.0}, - {ModuleID: "iso-zugangssteuerung", ModuleName: "Zugangssteuerung", Assigned: 20, Completed: 18, Category: "iso27001", RiskWeight: 2.0}, - {ModuleID: "iso-kryptografie", ModuleName: "Kryptografie", Assigned: 10, Completed: 7, Category: "iso27001", RiskWeight: 1.5}, - {ModuleID: "iso-physisch", ModuleName: "Physische Sicherheit", Assigned: 10, Completed: 9, Category: "iso27001", RiskWeight: 1.0}, - - // AI Act modules - {ModuleID: "ai-risikokategorien", ModuleName: "KI-Risikokategorien", Assigned: 15, Completed: 12, Category: "ai_act", RiskWeight: 2.5}, - {ModuleID: "ai-transparenz", ModuleName: "KI-Transparenzpflichten", Assigned: 15, Completed: 10, Category: "ai_act", RiskWeight: 2.0}, - {ModuleID: "ai-hochrisiko", ModuleName: "Hochrisiko-KI-Systeme", Assigned: 10, Completed: 6, Category: "ai_act", RiskWeight: 2.5}, - {ModuleID: "ai-governance", ModuleName: "KI-Governance", Assigned: 10, Completed: 7, Category: "ai_act", RiskWeight: 2.0}, - } -} - -// MockCertificateData provides mock certificate validity dates -func MockCertificateData() map[string]time.Time { - now := time.Now() - return map[string]time.Time{ - "dsgvo-grundlagen": now.AddDate(0, 8, 0), // valid 8 months - "dsgvo-betroffenenrechte": now.AddDate(0, 3, 0), // expiring in 3 months - "dsgvo-tom": now.AddDate(0, 10, 0), // valid - "dsgvo-dsfa": now.AddDate(0, -1, 0), // expired 1 month ago - "dsgvo-auftragsverarbeitung": now.AddDate(0, 6, 0), - "nis2-risikomanagement": now.AddDate(0, 5, 0), - "nis2-incident-response": now.AddDate(0, 2, 0), // expiring soon - "nis2-supply-chain": now.AddDate(0, -2, 0), // expired 2 months - "nis2-management": now.AddDate(0, 9, 0), - "iso-isms": now.AddDate(1, 0, 0), - "iso-risikobewertung": now.AddDate(0, 4, 0), - "iso-zugangssteuerung": now.AddDate(0, 11, 0), - "iso-kryptografie": now.AddDate(0, 1, 0), // expiring in 1 month - "iso-physisch": now.AddDate(0, 7, 0), - "ai-risikokategorien": now.AddDate(0, 6, 0), - "ai-transparenz": now.AddDate(0, 3, 0), - "ai-hochrisiko": now.AddDate(0, -3, 0), // expired 3 months - "ai-governance": now.AddDate(0, 5, 0), - } -} - -// MockIncidentData returns mock incident counts for adjustment -func MockIncidentData() (openIncidents int, criticalIncidents int) { - return 3, 1 -} - -// MockGCIHistory returns mock historical GCI snapshots -func MockGCIHistory(tenantID string) []GCISnapshot { - now := time.Now() - return []GCISnapshot{ - {TenantID: tenantID, Score: 58.2, MaturityLevel: MaturityReactive, AreaScores: map[string]float64{"dsgvo": 62, "nis2": 48, "iso27001": 60, "ai_act": 55}, CalculatedAt: now.AddDate(0, -3, 0)}, - {TenantID: tenantID, Score: 62.5, MaturityLevel: MaturityDefined, AreaScores: map[string]float64{"dsgvo": 65, "nis2": 55, "iso27001": 63, "ai_act": 58}, CalculatedAt: now.AddDate(0, -2, 0)}, - {TenantID: tenantID, Score: 67.8, MaturityLevel: MaturityDefined, AreaScores: map[string]float64{"dsgvo": 70, "nis2": 60, "iso27001": 68, "ai_act": 62}, CalculatedAt: now.AddDate(0, -1, 0)}, - } -} diff --git a/ai-compliance-sdk/internal/gci/models.go b/ai-compliance-sdk/internal/gci/models.go deleted file mode 100644 index 0f75779..0000000 --- a/ai-compliance-sdk/internal/gci/models.go +++ /dev/null @@ -1,104 +0,0 @@ -package gci - -import "time" - -// Level 1: Module Score -type ModuleScore struct { - ModuleID string `json:"module_id"` - ModuleName string `json:"module_name"` - Assigned int `json:"assigned"` - Completed int `json:"completed"` - RawScore float64 `json:"raw_score"` // completions/assigned - ValidityFactor float64 `json:"validity_factor"` // 0.0-1.0 - FinalScore float64 `json:"final_score"` // RawScore * ValidityFactor - RiskWeight float64 `json:"risk_weight"` // module criticality weight - Category string `json:"category"` // dsgvo, nis2, iso27001, ai_act -} - -// Level 2: Risk-weighted Module Score per regulation area -type RiskWeightedScore struct { - AreaID string `json:"area_id"` - AreaName string `json:"area_name"` - Modules []ModuleScore `json:"modules"` - WeightedSum float64 `json:"weighted_sum"` - TotalWeight float64 `json:"total_weight"` - AreaScore float64 `json:"area_score"` // WeightedSum / TotalWeight -} - -// Level 3: Regulation Area Score -type RegulationAreaScore struct { - RegulationID string `json:"regulation_id"` // dsgvo, nis2, iso27001, ai_act - RegulationName string `json:"regulation_name"` // Display name - Score float64 `json:"score"` // 0-100 - Weight float64 `json:"weight"` // regulation weight in GCI - WeightedScore float64 `json:"weighted_score"` // Score * Weight - ModuleCount int `json:"module_count"` - CompletedCount int `json:"completed_count"` -} - -// Level 4: GCI Result -type GCIResult struct { - TenantID string `json:"tenant_id"` - GCIScore float64 `json:"gci_score"` // 0-100 - MaturityLevel string `json:"maturity_level"` // Optimized, Managed, Defined, Reactive, HighRisk - MaturityLabel string `json:"maturity_label"` // German label - CalculatedAt time.Time `json:"calculated_at"` - Profile string `json:"profile"` // default, nis2_relevant, ki_nutzer - AreaScores []RegulationAreaScore `json:"area_scores"` - CriticalityMult float64 `json:"criticality_multiplier"` - IncidentAdj float64 `json:"incident_adjustment"` - AuditTrail []AuditEntry `json:"audit_trail"` -} - -// GCI Breakdown with all 4 levels -type GCIBreakdown struct { - GCIResult - Level1Modules []ModuleScore `json:"level1_modules"` - Level2Areas []RiskWeightedScore `json:"level2_areas"` -} - -// MaturityLevel constants -const ( - MaturityOptimized = "OPTIMIZED" - MaturityManaged = "MANAGED" - MaturityDefined = "DEFINED" - MaturityReactive = "REACTIVE" - MaturityHighRisk = "HIGH_RISK" -) - -// Maturity level labels (German) -var MaturityLabels = map[string]string{ - MaturityOptimized: "Optimiert", - MaturityManaged: "Gesteuert", - MaturityDefined: "Definiert", - MaturityReactive: "Reaktiv", - MaturityHighRisk: "Hohes Risiko", -} - -// AuditEntry for score transparency -type AuditEntry struct { - Timestamp time.Time `json:"timestamp"` - Factor string `json:"factor"` - Description string `json:"description"` - Value float64 `json:"value"` - Impact string `json:"impact"` // positive, negative, neutral -} - -// ComplianceMatrixEntry maps roles to regulations -type ComplianceMatrixEntry struct { - Role string `json:"role"` - RoleName string `json:"role_name"` - Regulations map[string]float64 `json:"regulations"` // regulation_id -> score - OverallScore float64 `json:"overall_score"` - RequiredModules int `json:"required_modules"` - CompletedModules int `json:"completed_modules"` -} - -// GCI History snapshot -type GCISnapshot struct { - TenantID string `json:"tenant_id"` - Score float64 `json:"score"` - MaturityLevel string `json:"maturity_level"` - AreaScores map[string]float64 `json:"area_scores"` - CalculatedAt time.Time `json:"calculated_at"` -} diff --git a/ai-compliance-sdk/internal/gci/nis2_roles.go b/ai-compliance-sdk/internal/gci/nis2_roles.go deleted file mode 100644 index c75d134..0000000 --- a/ai-compliance-sdk/internal/gci/nis2_roles.go +++ /dev/null @@ -1,118 +0,0 @@ -package gci - -// NIS2Role defines a NIS2 role classification -type NIS2Role struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - MandatoryModules []string `json:"mandatory_modules"` - Priority int `json:"priority"` // 1=highest -} - -// NIS2RoleAssignment represents a user's NIS2 role -type NIS2RoleAssignment struct { - TenantID string `json:"tenant_id"` - UserID string `json:"user_id"` - UserName string `json:"user_name"` - RoleID string `json:"role_id"` - RoleName string `json:"role_name"` - AssignedAt string `json:"assigned_at"` -} - -// NIS2 role definitions -var NIS2Roles = map[string]NIS2Role{ - "N1": { - ID: "N1", - Name: "Geschaeftsleitung", - Description: "Leitungsorgane mit persoenlicher Haftung gemaess NIS2 Art. 20", - Priority: 1, - MandatoryModules: []string{ - "nis2-management", - "nis2-risikomanagement", - "dsgvo-grundlagen", - "iso-isms", - }, - }, - "N2": { - ID: "N2", - Name: "IT-Sicherheit / CISO", - Description: "Verantwortliche fuer IT-Sicherheit und Cybersecurity", - Priority: 2, - MandatoryModules: []string{ - "nis2-risikomanagement", - "nis2-incident-response", - "nis2-supply-chain", - "iso-zugangssteuerung", - "iso-kryptografie", - }, - }, - "N3": { - ID: "N3", - Name: "Kritische Funktionen", - Description: "Mitarbeiter in kritischen Geschaeftsprozessen", - Priority: 3, - MandatoryModules: []string{ - "nis2-risikomanagement", - "nis2-incident-response", - "dsgvo-tom", - "iso-zugangssteuerung", - }, - }, - "N4": { - ID: "N4", - Name: "Allgemeine Mitarbeiter", - Description: "Alle Mitarbeiter mit IT-Zugang", - Priority: 4, - MandatoryModules: []string{ - "nis2-risikomanagement", - "dsgvo-grundlagen", - "iso-isms", - }, - }, - "N5": { - ID: "N5", - Name: "Incident Response Team", - Description: "Mitglieder des IRT/CSIRT gemaess NIS2 Art. 21", - Priority: 2, - MandatoryModules: []string{ - "nis2-incident-response", - "nis2-risikomanagement", - "nis2-supply-chain", - "iso-zugangssteuerung", - "iso-kryptografie", - "iso-isms", - }, - }, -} - -// GetNIS2Role returns a NIS2 role by ID -func GetNIS2Role(roleID string) (NIS2Role, bool) { - r, ok := NIS2Roles[roleID] - return r, ok -} - -// ListNIS2Roles returns all NIS2 roles sorted by priority -func ListNIS2Roles() []NIS2Role { - roles := []NIS2Role{} - // Return in priority order - order := []string{"N1", "N2", "N5", "N3", "N4"} - for _, id := range order { - if r, ok := NIS2Roles[id]; ok { - roles = append(roles, r) - } - } - return roles -} - -// MockNIS2RoleAssignments returns mock role assignments -func MockNIS2RoleAssignments(tenantID string) []NIS2RoleAssignment { - return []NIS2RoleAssignment{ - {TenantID: tenantID, UserID: "user-001", UserName: "Dr. Schmidt", RoleID: "N1", RoleName: "Geschaeftsleitung", AssignedAt: "2025-06-01"}, - {TenantID: tenantID, UserID: "user-002", UserName: "M. Weber", RoleID: "N2", RoleName: "IT-Sicherheit / CISO", AssignedAt: "2025-06-01"}, - {TenantID: tenantID, UserID: "user-003", UserName: "S. Mueller", RoleID: "N5", RoleName: "Incident Response Team", AssignedAt: "2025-07-15"}, - {TenantID: tenantID, UserID: "user-004", UserName: "K. Fischer", RoleID: "N3", RoleName: "Kritische Funktionen", AssignedAt: "2025-08-01"}, - {TenantID: tenantID, UserID: "user-005", UserName: "L. Braun", RoleID: "N3", RoleName: "Kritische Funktionen", AssignedAt: "2025-08-01"}, - {TenantID: tenantID, UserID: "user-006", UserName: "A. Schwarz", RoleID: "N4", RoleName: "Allgemeine Mitarbeiter", AssignedAt: "2025-09-01"}, - {TenantID: tenantID, UserID: "user-007", UserName: "T. Wagner", RoleID: "N4", RoleName: "Allgemeine Mitarbeiter", AssignedAt: "2025-09-01"}, - } -} diff --git a/ai-compliance-sdk/internal/gci/nis2_scoring.go b/ai-compliance-sdk/internal/gci/nis2_scoring.go deleted file mode 100644 index 57b7468..0000000 --- a/ai-compliance-sdk/internal/gci/nis2_scoring.go +++ /dev/null @@ -1,147 +0,0 @@ -package gci - -import "math" - -// NIS2Score represents the NIS2-specific compliance score -type NIS2Score struct { - TenantID string `json:"tenant_id"` - OverallScore float64 `json:"overall_score"` - MaturityLevel string `json:"maturity_level"` - MaturityLabel string `json:"maturity_label"` - AreaScores []NIS2AreaScore `json:"area_scores"` - RoleCompliance []NIS2RoleScore `json:"role_compliance"` -} - -// NIS2AreaScore represents a NIS2 compliance area -type NIS2AreaScore struct { - AreaID string `json:"area_id"` - AreaName string `json:"area_name"` - Score float64 `json:"score"` - Weight float64 `json:"weight"` - ModuleIDs []string `json:"module_ids"` -} - -// NIS2RoleScore represents completion per NIS2 role -type NIS2RoleScore struct { - RoleID string `json:"role_id"` - RoleName string `json:"role_name"` - AssignedUsers int `json:"assigned_users"` - CompletionRate float64 `json:"completion_rate"` - MandatoryTotal int `json:"mandatory_total"` - MandatoryDone int `json:"mandatory_done"` -} - -// NIS2 scoring areas with weights -// NIS2Score = 25% Management + 25% Incident + 30% IT Security + 20% Supply Chain -var nis2Areas = []struct { - ID string - Name string - Weight float64 - ModuleIDs []string -}{ - { - ID: "management", Name: "Management & Governance", Weight: 0.25, - ModuleIDs: []string{"nis2-management", "dsgvo-grundlagen", "iso-isms"}, - }, - { - ID: "incident", Name: "Vorfallsbehandlung", Weight: 0.25, - ModuleIDs: []string{"nis2-incident-response"}, - }, - { - ID: "it_security", Name: "IT-Sicherheit", Weight: 0.30, - ModuleIDs: []string{"nis2-risikomanagement", "iso-zugangssteuerung", "iso-kryptografie"}, - }, - { - ID: "supply_chain", Name: "Lieferkettensicherheit", Weight: 0.20, - ModuleIDs: []string{"nis2-supply-chain", "dsgvo-auftragsverarbeitung"}, - }, -} - -// CalculateNIS2Score computes the NIS2-specific compliance score -func CalculateNIS2Score(tenantID string) *NIS2Score { - modules := MockModuleData(tenantID) - moduleMap := map[string]ModuleScore{} - for _, m := range modules { - moduleMap[m.ModuleID] = m - } - - areaScores := []NIS2AreaScore{} - totalWeighted := 0.0 - - for _, area := range nis2Areas { - areaScore := NIS2AreaScore{ - AreaID: area.ID, - AreaName: area.Name, - Weight: area.Weight, - ModuleIDs: area.ModuleIDs, - } - - scoreSum := 0.0 - count := 0 - for _, modID := range area.ModuleIDs { - if m, ok := moduleMap[modID]; ok { - if m.Assigned > 0 { - scoreSum += float64(m.Completed) / float64(m.Assigned) * 100 - } - count++ - } - } - if count > 0 { - areaScore.Score = math.Round(scoreSum/float64(count)*10) / 10 - } - totalWeighted += areaScore.Score * areaScore.Weight - areaScores = append(areaScores, areaScore) - } - - overallScore := math.Round(totalWeighted*10) / 10 - - // Calculate role compliance - roleAssignments := MockNIS2RoleAssignments(tenantID) - roleScores := calculateNIS2RoleScores(roleAssignments, moduleMap) - - return &NIS2Score{ - TenantID: tenantID, - OverallScore: overallScore, - MaturityLevel: determineMaturityLevel(overallScore), - MaturityLabel: MaturityLabels[determineMaturityLevel(overallScore)], - AreaScores: areaScores, - RoleCompliance: roleScores, - } -} - -func calculateNIS2RoleScores(assignments []NIS2RoleAssignment, moduleMap map[string]ModuleScore) []NIS2RoleScore { - // Count users per role - roleCounts := map[string]int{} - for _, a := range assignments { - roleCounts[a.RoleID]++ - } - - scores := []NIS2RoleScore{} - for roleID, role := range NIS2Roles { - rs := NIS2RoleScore{ - RoleID: roleID, - RoleName: role.Name, - AssignedUsers: roleCounts[roleID], - MandatoryTotal: len(role.MandatoryModules), - } - - completionSum := 0.0 - for _, modID := range role.MandatoryModules { - if m, ok := moduleMap[modID]; ok { - if m.Assigned > 0 { - rate := float64(m.Completed) / float64(m.Assigned) - completionSum += rate - if rate >= 0.8 { // 80%+ = considered done - rs.MandatoryDone++ - } - } - } - } - if rs.MandatoryTotal > 0 { - rs.CompletionRate = math.Round(completionSum/float64(rs.MandatoryTotal)*100*10) / 10 - } - scores = append(scores, rs) - } - - return scores -} diff --git a/ai-compliance-sdk/internal/gci/validity.go b/ai-compliance-sdk/internal/gci/validity.go deleted file mode 100644 index 5578f3d..0000000 --- a/ai-compliance-sdk/internal/gci/validity.go +++ /dev/null @@ -1,59 +0,0 @@ -package gci - -import ( - "math" - "time" -) - -const ( - // GracePeriodDays is the number of days after expiry during which - // the certificate still contributes (with declining factor) - GracePeriodDays = 180 - - // DecayStartDays is how many days before expiry the linear decay begins - DecayStartDays = 180 -) - -// CalculateValidityFactor computes the validity factor for a certificate -// based on its expiry date. -// -// Rules: -// - Certificate not yet expiring (>6 months): factor = 1.0 -// - Certificate expiring within 6 months: linear decay from 1.0 to 0.5 -// - Certificate expired: linear decay from 0.5 to 0.0 over grace period -// - Certificate expired beyond grace period: factor = 0.0 -func CalculateValidityFactor(validUntil time.Time, now time.Time) float64 { - daysUntilExpiry := validUntil.Sub(now).Hours() / 24.0 - - if daysUntilExpiry > float64(DecayStartDays) { - // Not yet in decay window - return 1.0 - } - - if daysUntilExpiry > 0 { - // In pre-expiry decay window: linear from 1.0 to 0.5 - fraction := daysUntilExpiry / float64(DecayStartDays) - return 0.5 + 0.5*fraction - } - - // Certificate is expired - daysExpired := -daysUntilExpiry - if daysExpired > float64(GracePeriodDays) { - return 0.0 - } - - // In grace period: linear from 0.5 to 0.0 - fraction := 1.0 - (daysExpired / float64(GracePeriodDays)) - return math.Max(0, 0.5*fraction) -} - -// IsExpired returns true if the certificate is past its validity date -func IsExpired(validUntil time.Time, now time.Time) bool { - return now.After(validUntil) -} - -// IsExpiringSoon returns true if the certificate expires within the decay window -func IsExpiringSoon(validUntil time.Time, now time.Time) bool { - daysUntil := validUntil.Sub(now).Hours() / 24.0 - return daysUntil > 0 && daysUntil <= float64(DecayStartDays) -} diff --git a/ai-compliance-sdk/internal/gci/weights.go b/ai-compliance-sdk/internal/gci/weights.go deleted file mode 100644 index 7c50742..0000000 --- a/ai-compliance-sdk/internal/gci/weights.go +++ /dev/null @@ -1,78 +0,0 @@ -package gci - -// WeightProfile defines regulation weights for different compliance profiles -type WeightProfile struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Weights map[string]float64 `json:"weights"` // regulation_id -> weight (0.0-1.0) -} - -// Default weight profiles -var DefaultProfiles = map[string]WeightProfile{ - "default": { - ID: "default", - Name: "Standard", - Description: "Ausgewogenes Profil fuer allgemeine Compliance", - Weights: map[string]float64{ - "dsgvo": 0.30, - "nis2": 0.25, - "iso27001": 0.25, - "ai_act": 0.20, - }, - }, - "nis2_relevant": { - ID: "nis2_relevant", - Name: "NIS2-relevant", - Description: "Fuer Betreiber kritischer Infrastrukturen", - Weights: map[string]float64{ - "dsgvo": 0.25, - "nis2": 0.35, - "iso27001": 0.25, - "ai_act": 0.15, - }, - }, - "ki_nutzer": { - ID: "ki_nutzer", - Name: "KI-Nutzer", - Description: "Fuer Organisationen mit KI-Einsatz", - Weights: map[string]float64{ - "dsgvo": 0.25, - "nis2": 0.25, - "iso27001": 0.20, - "ai_act": 0.30, - }, - }, -} - -// ModuleRiskWeights defines risk criticality per module type -var ModuleRiskWeights = map[string]float64{ - "incident_response": 3.0, - "management_awareness": 3.0, - "data_protection": 2.5, - "it_security": 2.5, - "supply_chain": 2.0, - "risk_assessment": 2.0, - "access_control": 2.0, - "business_continuity": 2.0, - "employee_training": 1.5, - "documentation": 1.5, - "physical_security": 1.0, - "general": 1.0, -} - -// GetProfile returns a weight profile by ID, defaulting to "default" -func GetProfile(profileID string) WeightProfile { - if p, ok := DefaultProfiles[profileID]; ok { - return p - } - return DefaultProfiles["default"] -} - -// GetModuleRiskWeight returns the risk weight for a module category -func GetModuleRiskWeight(category string) float64 { - if w, ok := ModuleRiskWeights[category]; ok { - return w - } - return 1.0 -} diff --git a/ai-compliance-sdk/internal/industry/models.go b/ai-compliance-sdk/internal/industry/models.go deleted file mode 100644 index d527a81..0000000 --- a/ai-compliance-sdk/internal/industry/models.go +++ /dev/null @@ -1,65 +0,0 @@ -package industry - -// ============================================================================ -// Industry-Specific Compliance Templates (Phase 3.3) -// Static reference data — no database migration needed. -// ============================================================================ - -// IndustryTemplate represents a complete compliance package for a specific industry -type IndustryTemplate struct { - Slug string `json:"slug"` - Name string `json:"name"` - Description string `json:"description"` - Icon string `json:"icon"` - Regulations []string `json:"regulations"` - VVTTemplates []VVTTemplate `json:"vvt_templates"` - TOMRecommendations []TOMRecommendation `json:"tom_recommendations"` - RiskScenarios []RiskScenario `json:"risk_scenarios"` -} - -// VVTTemplate represents a pre-configured processing activity record template -type VVTTemplate struct { - Name string `json:"name"` - Purpose string `json:"purpose"` - LegalBasis string `json:"legal_basis"` - DataCategories []string `json:"data_categories"` - DataSubjects []string `json:"data_subjects"` - RetentionPeriod string `json:"retention_period"` -} - -// TOMRecommendation represents a recommended technical/organizational measure -type TOMRecommendation struct { - Category string `json:"category"` - Name string `json:"name"` - Description string `json:"description"` - Priority string `json:"priority"` -} - -// RiskScenario represents an industry-specific data protection risk scenario -type RiskScenario struct { - Name string `json:"name"` - Description string `json:"description"` - Likelihood string `json:"likelihood"` - Impact string `json:"impact"` - Mitigation string `json:"mitigation"` -} - -// ============================================================================ -// API Response Types -// ============================================================================ - -// IndustryListResponse is the API response for listing all industries -type IndustryListResponse struct { - Industries []IndustrySummary `json:"industries"` - Total int `json:"total"` -} - -// IndustrySummary is a condensed view of an industry template for list endpoints -type IndustrySummary struct { - Slug string `json:"slug"` - Name string `json:"name"` - Description string `json:"description"` - Icon string `json:"icon"` - RegulationCount int `json:"regulation_count"` - TemplateCount int `json:"template_count"` -} diff --git a/ai-compliance-sdk/internal/industry/templates.go b/ai-compliance-sdk/internal/industry/templates.go deleted file mode 100644 index d932699..0000000 --- a/ai-compliance-sdk/internal/industry/templates.go +++ /dev/null @@ -1,558 +0,0 @@ -package industry - -// ============================================================================ -// Static Industry Template Data -// ============================================================================ - -// allTemplates holds all pre-configured industry compliance packages. -// This is static reference data embedded in the binary — no database required. -var allTemplates = []IndustryTemplate{ - itSoftwareTemplate(), - healthcareTemplate(), - financeTemplate(), - manufacturingTemplate(), -} - -// GetAllTemplates returns all available industry templates. -func GetAllTemplates() []IndustryTemplate { - return allTemplates -} - -// GetTemplateBySlug returns the industry template matching the given slug, -// or nil if no match is found. -func GetTemplateBySlug(slug string) *IndustryTemplate { - for i := range allTemplates { - if allTemplates[i].Slug == slug { - return &allTemplates[i] - } - } - return nil -} - -// ============================================================================ -// IT & Software -// ============================================================================ - -func itSoftwareTemplate() IndustryTemplate { - return IndustryTemplate{ - Slug: "it-software", - Name: "IT & Software", - Description: "Compliance-Paket fuer IT-Unternehmen, SaaS-Anbieter und Softwareentwickler mit Fokus auf AI Act, DSGVO fuer Cloud-Dienste und NIS2.", - Icon: "\U0001F4BB", - Regulations: []string{"DSGVO", "AI Act", "NIS2", "ePrivacy"}, - - VVTTemplates: []VVTTemplate{ - { - Name: "SaaS-Kundendaten", - Purpose: "Verarbeitung personenbezogener Daten von SaaS-Kunden zur Bereitstellung der vertraglichen Dienstleistung, einschliesslich Account-Verwaltung, Nutzungsanalyse und Abrechnung.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung)", - DataCategories: []string{"Name", "E-Mail-Adresse", "Unternehmenszugehoerigkeit", "Nutzungsdaten", "Rechnungsdaten", "IP-Adresse"}, - DataSubjects: []string{"Kunden", "Endnutzer der SaaS-Plattform"}, - RetentionPeriod: "Vertragsdauer + 10 Jahre (handelsrechtliche Aufbewahrungspflicht)", - }, - { - Name: "Cloud-Hosting", - Purpose: "Speicherung und Verarbeitung von Kundendaten in Cloud-Infrastruktur (IaaS/PaaS) zur Gewaehrleistung der Verfuegbarkeit und Skalierbarkeit der Dienste.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), Art. 28 DSGVO (Auftragsverarbeitung)", - DataCategories: []string{"Alle vom Kunden eingestellten Daten", "Metadaten", "Logdateien", "Zugangsdaten"}, - DataSubjects: []string{"Kunden", "Endnutzer", "Mitarbeiter der Kunden"}, - RetentionPeriod: "Vertragsdauer + 30 Tage Backup-Retention", - }, - { - Name: "KI-Modelltraining", - Purpose: "Verwendung von (pseudonymisierten) Daten zum Training, zur Validierung und Verbesserung von KI-/ML-Modellen unter Einhaltung des AI Act.", - LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse), ggf. Art. 6 Abs. 1 lit. a (Einwilligung)", - DataCategories: []string{"Pseudonymisierte Nutzungsdaten", "Textdaten", "Interaktionsmuster", "Feedback-Daten"}, - DataSubjects: []string{"Nutzer der KI-Funktionen", "Trainingsdaten-Quellen"}, - RetentionPeriod: "Bis Modell-Abloesung, max. 5 Jahre; Trainingsdaten nach Pseudonymisierung unbegrenzt", - }, - { - Name: "Software-Analytics", - Purpose: "Erhebung anonymisierter und pseudonymisierter Nutzungsstatistiken zur Produktverbesserung, Fehleranalyse und Performance-Monitoring.", - LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse)", - DataCategories: []string{"Geraetemertkmale", "Browserinformationen", "Nutzungsverhalten", "Crash-Reports", "Performance-Metriken"}, - DataSubjects: []string{"Endnutzer der Software"}, - RetentionPeriod: "Rohdaten 90 Tage, aggregierte Daten 2 Jahre", - }, - { - Name: "Newsletter/Marketing", - Purpose: "Versand von Produkt-Newslettern, Release-Benachrichtigungen und Marketing-Kommunikation an registrierte Nutzer und Interessenten.", - LegalBasis: "Art. 6 Abs. 1 lit. a DSGVO (Einwilligung)", - DataCategories: []string{"E-Mail-Adresse", "Name", "Unternehmen", "Oeffnungs- und Klickraten", "Abonnement-Praeferenzen"}, - DataSubjects: []string{"Newsletter-Abonnenten", "Leads", "Bestandskunden"}, - RetentionPeriod: "Bis Widerruf der Einwilligung + 30 Tage Abwicklung", - }, - { - Name: "Bewerbermanagement", - Purpose: "Verarbeitung von Bewerberdaten im Rahmen des Recruiting-Prozesses einschliesslich Sichtung, Kommunikation und Entscheidungsfindung.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (vorvertragliche Massnahmen), ss 26 BDSG", - DataCategories: []string{"Lebenslauf", "Anschreiben", "Zeugnisse", "Kontaktdaten", "Gehaltsvorstellungen", "Bewertungsnotizen"}, - DataSubjects: []string{"Bewerber", "Empfehlungsgeber"}, - RetentionPeriod: "6 Monate nach Abschluss des Verfahrens (AGG-Frist), bei Einwilligung laenger", - }, - }, - - TOMRecommendations: []TOMRecommendation{ - { - Category: "encryption", - Name: "Verschluesselung at rest und in transit", - Description: "Alle gespeicherten Daten mit AES-256 verschluesseln. Saemtlichen Netzwerkverkehr ueber TLS 1.3 absichern. Zertifikats-Management automatisieren.", - Priority: "critical", - }, - { - Category: "access_control", - Name: "Multi-Faktor-Authentifizierung (MFA)", - Description: "MFA fuer alle administrativen Zugaenge, Produktionssysteme und CI/CD-Pipelines erzwingen. FIDO2/WebAuthn bevorzugen.", - Priority: "critical", - }, - { - Category: "monitoring", - Name: "Penetration Testing", - Description: "Regelmaessige externe Penetrationstests (mind. jaehrlich) und kontinuierliche Schwachstellenscans der oeffentlich erreichbaren Infrastruktur durchfuehren.", - Priority: "high", - }, - { - Category: "development", - Name: "Code Reviews und Secure Coding", - Description: "Verpflichtende Code-Reviews fuer alle Aenderungen. SAST/DAST-Tools in die CI/CD-Pipeline integrieren. OWASP Top 10 als Mindeststandard.", - Priority: "high", - }, - { - Category: "supply_chain", - Name: "Dependency Scanning", - Description: "Automatisiertes Scanning aller Abhaengigkeiten (SBOM) auf bekannte Schwachstellen. Alerts bei kritischen CVEs. Regelmaessige Updates erzwingen.", - Priority: "high", - }, - { - Category: "incident_response", - Name: "Incident Response Plan", - Description: "Dokumentierter Incident-Response-Prozess mit definierten Eskalationsstufen, Meldepflichten (72h DSGVO) und regelmaessigen Uebungen (Tabletop Exercises).", - Priority: "critical", - }, - }, - - RiskScenarios: []RiskScenario{ - { - Name: "Datenleck durch Cloud-Fehlkonfiguration", - Description: "Oeffentlich zugaengliche S3-Buckets, fehlende Netzwerk-Segmentierung oder falsch konfigurierte Firewalls legen Kundendaten offen.", - Likelihood: "high", - Impact: "critical", - Mitigation: "Infrastructure-as-Code mit automatisierten Compliance-Checks (z.B. Checkov, tfsec), Cloud Security Posture Management (CSPM) einsetzen, regelmaessige Audits der Cloud-Konfiguration.", - }, - { - Name: "Supply-Chain-Angriff", - Description: "Kompromittierte Abhaengigkeit (npm, PyPI, Go-Module) schleust Schadcode in den Build-Prozess ein und gelangt in die Produktionsumgebung.", - Likelihood: "medium", - Impact: "critical", - Mitigation: "Dependency Pinning, Signaturtruefung, SBOM-Generierung, private Registries, regelmaessige Audits aller Drittanbieter-Komponenten.", - }, - { - Name: "KI-Bias und Diskriminierung", - Description: "KI-Modelle produzieren diskriminierende Ergebnisse aufgrund verzerrter Trainingsdaten. Verstoss gegen AI Act und Gleichbehandlungsgrundsaetze.", - Likelihood: "medium", - Impact: "high", - Mitigation: "Bias-Audits vor und nach Deployment, diverse Trainingsdaten, Erklaerbarkeits-Dokumentation gemaess AI Act, menschliche Ueberpruefung (Human-in-the-Loop).", - }, - { - Name: "Insider-Bedrohung", - Description: "Ein Mitarbeiter mit privilegiertem Zugang exfiltriert Kundendaten, Quellcode oder Geschaeftsgeheimnisse — absichtlich oder durch Social Engineering.", - Likelihood: "low", - Impact: "critical", - Mitigation: "Least-Privilege-Prinzip, privilegierte Zugangssteuerung (PAM), Audit-Logging aller Admin-Aktionen, Vier-Augen-Prinzip fuer kritische Operationen, Security-Awareness-Trainings.", - }, - }, - } -} - -// ============================================================================ -// Gesundheitswesen -// ============================================================================ - -func healthcareTemplate() IndustryTemplate { - return IndustryTemplate{ - Slug: "healthcare", - Name: "Gesundheitswesen", - Description: "Compliance-Paket fuer Arztpraxen, Krankenhaeuser, Labore und Gesundheits-IT mit besonderem Fokus auf Art. 9 DSGVO (besondere Datenkategorien) und Patientendatenschutz.", - Icon: "\U0001F3E5", - Regulations: []string{"DSGVO", "BDSG \u00a722", "SGB V", "MDR", "DiGAV"}, - - VVTTemplates: []VVTTemplate{ - { - Name: "Patientenakte (ePA)", - Purpose: "Fuehrung elektronischer Patientenakten zur medizinischen Dokumentation, Behandlungsplanung und abrechnungstechnischen Erfassung.", - LegalBasis: "Art. 9 Abs. 2 lit. h DSGVO i.V.m. \u00a722 BDSG, \u00a7630f BGB (Dokumentationspflicht)", - DataCategories: []string{"Diagnosen", "Befunde", "Medikation", "Vitalwerte", "Anamnese", "Stammdaten", "Versicherungsdaten"}, - DataSubjects: []string{"Patienten"}, - RetentionPeriod: "10 Jahre nach Abschluss der Behandlung (\u00a7630f BGB), bei Strahlentherapie 30 Jahre", - }, - { - Name: "Terminverwaltung", - Purpose: "Planung, Vergabe und Erinnerung von Behandlungsterminen einschliesslich Online-Terminbuchung.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), Art. 9 Abs. 2 lit. h DSGVO", - DataCategories: []string{"Name", "Kontaktdaten", "Terminzeitpunkt", "Fachrichtung/Behandlungsgrund", "Versicherungsstatus"}, - DataSubjects: []string{"Patienten", "Angehoerige (bei Terminerstellung fuer Dritte)"}, - RetentionPeriod: "Vergangene Termine: 1 Jahr, bei medizinischer Relevanz gemaess Patientenakte", - }, - { - Name: "Labor- und Befunddaten", - Purpose: "Erfassung, Uebermittlung und Archivierung von Laborergebnissen, bildgebenden Befunden und pathologischen Berichten.", - LegalBasis: "Art. 9 Abs. 2 lit. h DSGVO, \u00a710 MBO-Ae", - DataCategories: []string{"Laborwerte", "Bildgebung (DICOM)", "Pathologiebefunde", "Mikrobiologische Ergebnisse", "Genetische Daten"}, - DataSubjects: []string{"Patienten"}, - RetentionPeriod: "10 Jahre, genetische Daten 30 Jahre", - }, - { - Name: "Telemedizin", - Purpose: "Durchfuehrung von Videosprechstunden und telemedizinischen Konsultationen einschliesslich Uebertragung medizinischer Daten.", - LegalBasis: "Art. 9 Abs. 2 lit. h DSGVO, \u00a7630a BGB, Fernbehandlungs-Richtlinien", - DataCategories: []string{"Audio-/Videodaten", "Chatprotokolle", "Uebermittelte Dokumente", "Verbindungsmetadaten", "Behandlungsnotizen"}, - DataSubjects: []string{"Patienten", "Behandelnde Aerzte"}, - RetentionPeriod: "Aufzeichnungen gemaess Patientenakte (10 Jahre), Verbindungsdaten 90 Tage", - }, - { - Name: "Forschungsdaten", - Purpose: "Verwendung pseudonymisierter oder anonymisierter Patientendaten fuer klinische Studien und medizinische Forschung.", - LegalBasis: "Art. 9 Abs. 2 lit. j DSGVO, \u00a727 BDSG, ggf. Einwilligung gemaess Art. 9 Abs. 2 lit. a", - DataCategories: []string{"Pseudonymisierte Diagnosen", "Behandlungsverlaeufe", "Demografische Daten", "Genetische Daten (anonymisiert)", "Studienergebnisse"}, - DataSubjects: []string{"Studienteilnehmer", "Patienten (retrospektiv, pseudonymisiert)"}, - RetentionPeriod: "Studienende + 15 Jahre (GCP-ICH), Forschungsdaten gemaess Foerderrichtlinien", - }, - { - Name: "Abrechnung (KV/Krankenversicherung)", - Purpose: "Erstellung und Uebermittlung von Abrechnungsdaten an Kassenaerztliche Vereinigungen und Krankenkassen.", - LegalBasis: "Art. 6 Abs. 1 lit. c DSGVO (rechtliche Verpflichtung), \u00a7284 SGB V, \u00a7295 SGB V", - DataCategories: []string{"Versichertennummer", "Diagnose-Codes (ICD-10)", "Leistungsziffern (EBM/GOAe)", "Behandlungsdaten", "Zuzahlungsstatus"}, - DataSubjects: []string{"Patienten", "Versicherte"}, - RetentionPeriod: "10 Jahre (steuerrechtlich), Abrechnungsdaten 4 Jahre (\u00a7305 SGB V)", - }, - }, - - TOMRecommendations: []TOMRecommendation{ - { - Category: "encryption", - Name: "Ende-zu-Ende-Verschluesselung", - Description: "Saemtliche Kommunikation mit Gesundheitsdaten (E-Mail, Telemedizin, Befunduebermittlung) Ende-zu-Ende verschluesseln. Zertifizierte Loesungen gemaess gematik-Spezifikation einsetzen.", - Priority: "critical", - }, - { - Category: "access_control", - Name: "Rollenbasierte Zugriffskontrolle (RBAC)", - Description: "Feingranulare Zugriffsrechte basierend auf Behandlungskontext: Nur behandelnde Aerzte sehen relevante Patientendaten. Need-to-know-Prinzip konsequent umsetzen.", - Priority: "critical", - }, - { - Category: "monitoring", - Name: "Audit-Logging", - Description: "Lueckenloses Protokollieren aller Zugriffe auf Patientendaten mit Zeitstempel, Benutzer, Aktion und Begruendung. Logs manipulationssicher speichern (WORM).", - Priority: "critical", - }, - { - Category: "physical_security", - Name: "Physische Sicherheit", - Description: "Zutrittskontrolle zu Serverraeumen und medizinischen Arbeitsbereichen. Bildschirmsperren, Clean-Desk-Policy. Sicherer Umgang mit physischen Patientenakten.", - Priority: "high", - }, - { - Category: "data_minimization", - Name: "Pseudonymisierung", - Description: "Konsequente Pseudonymisierung bei Datenweitergabe (Forschung, Qualitaetssicherung, Abrechnung). Zuordnungstabellen separat und besonders geschuetzt speichern.", - Priority: "high", - }, - }, - - RiskScenarios: []RiskScenario{ - { - Name: "Unbefugter Zugriff auf Patientendaten", - Description: "Mitarbeiter ohne Behandlungsbezug greifen auf Patientenakten zu (z.B. prominente Patienten). Verstoss gegen aerztliche Schweigepflicht und DSGVO.", - Likelihood: "high", - Impact: "critical", - Mitigation: "Striktes RBAC mit Behandlungskontext-Pruefung, automatische Anomalie-Erkennung bei ungewoehnlichen Zugriffen, regelmaessige Audit-Log-Auswertung, Sanktionskatalog.", - }, - { - Name: "Ransomware-Angriff auf Krankenhaus-IT", - Description: "Verschluesselungstrojaner legt Krankenhaus-Informationssystem lahm. Patientenversorgung gefaehrdet, Notbetrieb erforderlich.", - Likelihood: "medium", - Impact: "critical", - Mitigation: "Netzwerksegmentierung (Medizingeraete, Verwaltung, Gaeste), Offline-Backups, Notfallplaene fuer Papierbetrieb, regelmaessige Sicherheitsupdates, Mitarbeiterschulung gegen Phishing.", - }, - { - Name: "Datenverlust bei Systemausfall", - Description: "Hardware-Defekt oder Softwarefehler fuehrt zum Verlust aktueller Patientendaten, Befunde oder Medikationsplaene.", - Likelihood: "medium", - Impact: "high", - Mitigation: "Redundante Systeme (Clustering), automatische Backups mit verifizierter Wiederherstellung, unterbrechungsfreie Stromversorgung (USV), Disaster-Recovery-Plan mit RTOs unter 4 Stunden.", - }, - { - Name: "Verletzung der aerztlichen Schweigepflicht", - Description: "Versehentliche oder vorsaetzliche Weitergabe von Patientendaten an Unberechtigte (z.B. Angehoerige ohne Vollmacht, Arbeitgeber, Medien).", - Likelihood: "medium", - Impact: "high", - Mitigation: "Schulungen zur Schweigepflicht (\u00a7203 StGB), klare Prozesse fuer Auskunftsersuchen, Dokumentation von Einwilligungen und Vollmachten, sichere Kommunikationskanaele.", - }, - }, - } -} - -// ============================================================================ -// Finanzdienstleister -// ============================================================================ - -func financeTemplate() IndustryTemplate { - return IndustryTemplate{ - Slug: "finance", - Name: "Finanzdienstleister", - Description: "Compliance-Paket fuer Banken, Versicherungen, Zahlungsdienstleister und FinTechs mit Fokus auf BaFin-Anforderungen, PSD2 und Geldwaeschepraeventions.", - Icon: "\U0001F3E6", - Regulations: []string{"DSGVO", "KWG", "ZAG", "GwG", "MaRisk", "BAIT/DORA", "PSD2"}, - - VVTTemplates: []VVTTemplate{ - { - Name: "Kontoeroeffnung / KYC", - Purpose: "Identitaetspruefung und Legitimation von Neukunden im Rahmen der Know-Your-Customer-Pflichten gemaess Geldwaeschegesetz.", - LegalBasis: "Art. 6 Abs. 1 lit. c DSGVO (rechtliche Verpflichtung), \u00a710 GwG, \u00a7154 AO", - DataCategories: []string{"Personalausweisdaten", "Adressdaten", "Geburtsdatum", "Staatsangehoerigkeit", "PEP-Status", "Wirtschaftliche Berechtigung", "Video-Identifikation"}, - DataSubjects: []string{"Neukunden", "Wirtschaftlich Berechtigte", "Vertretungsberechtigte"}, - RetentionPeriod: "5 Jahre nach Ende der Geschaeftsbeziehung (\u00a78 GwG), Identifizierungsdaten 10 Jahre", - }, - { - Name: "Zahlungsverarbeitung", - Purpose: "Ausfuehrung und Dokumentation von Zahlungstransaktionen (Ueberweisungen, Lastschriften, Kartenzahlungen) im Rahmen der Kontovertragserfullung.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), \u00a7675f BGB, PSD2", - DataCategories: []string{"IBAN/Kontonummer", "Transaktionsbetrag", "Verwendungszweck", "Empfaengerdaten", "Zeitstempel", "Autorisierungsdaten"}, - DataSubjects: []string{"Kontoinhaber", "Zahlungsempfaenger", "Zahlungspflichtige"}, - RetentionPeriod: "10 Jahre (\u00a7257 HGB, \u00a7147 AO)", - }, - { - Name: "Kreditpruefung / Scoring", - Purpose: "Bonitaetspruefung und Kreditwuerdigkeitsbewertung auf Basis interner und externer Daten zur Kreditentscheidung.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (vorvertragliche Massnahmen), \u00a731 BDSG (Scoring)", - DataCategories: []string{"Einkommensnachweise", "Schufa-Score", "Beschaeftigungsstatus", "Bestehende Verbindlichkeiten", "Sicherheiten", "Scoring-Ergebnis"}, - DataSubjects: []string{"Kreditantragsteller", "Buergen", "Mithaftende"}, - RetentionPeriod: "Kreditlaufzeit + 3 Jahre, bei Ablehnung 6 Monate", - }, - { - Name: "Wertpapierhandel", - Purpose: "Ausfuehrung und Dokumentation von Wertpapiergeschaeften, Anlageberatung und Geeignetheitspruefung.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO, \u00a763 WpHG (Aufzeichnungspflichten), MiFID II", - DataCategories: []string{"Depotdaten", "Orderdaten", "Risikoprofil", "Anlageerfahrung", "Geeignetheitserklaerung", "Telefonaufzeichnungen"}, - DataSubjects: []string{"Depotinhaber", "Bevollmaechtigte", "Anlageberater"}, - RetentionPeriod: "10 Jahre (\u00a7257 HGB), Telefonaufzeichnungen 5 Jahre (MiFID II)", - }, - { - Name: "Geldwaesche-Monitoring", - Purpose: "Kontinuierliche Ueberwachung von Transaktionsmustern zur Erkennung verdaechtiger Aktivitaeten und Erfuellung der Meldepflichten gegenueber der FIU.", - LegalBasis: "Art. 6 Abs. 1 lit. c DSGVO (rechtliche Verpflichtung), \u00a325h KWG, \u00a756 GwG", - DataCategories: []string{"Transaktionshistorie", "Risikobewertung", "Verdachtsmeldungen (SAR)", "PEP-Screening-Ergebnisse", "Sanktionslistenabgleich"}, - DataSubjects: []string{"Kunden", "Transaktionspartner", "Verdachtspersonen"}, - RetentionPeriod: "5 Jahre nach Ende der Geschaeftsbeziehung (\u00a78 GwG), Verdachtsmeldungen 10 Jahre", - }, - { - Name: "Versicherungsantraege", - Purpose: "Verarbeitung von Antrags- und Risikodaten zur Pruefung, Annahme und Verwaltung von Versicherungsvertraegen.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), bei Gesundheitsdaten Art. 9 Abs. 2 lit. f DSGVO", - DataCategories: []string{"Antragsdaten", "Gesundheitsfragen", "Schadenhistorie", "Risikofaktoren", "Praemienberechnung", "Leistungsansprueche"}, - DataSubjects: []string{"Versicherungsnehmer", "Versicherte Personen", "Bezugsberechtigte", "Geschaedigte"}, - RetentionPeriod: "Vertragsdauer + 10 Jahre (Verjaehrung), Lebensversicherung bis Ablauf aller Ansprueche", - }, - }, - - TOMRecommendations: []TOMRecommendation{ - { - Category: "encryption", - Name: "HSM fuer Schluesselverwaltung", - Description: "Hardware Security Modules (HSM) fuer kryptographische Schluessel, insbesondere bei Zahlungsverkehr und digitalen Signaturen. PCI-DSS-konform.", - Priority: "critical", - }, - { - Category: "monitoring", - Name: "Transaktionsmonitoring", - Description: "Echtzeit-Ueberwachung aller Finanztransaktionen auf Anomalien, Betrugsversuche und verdaechtige Muster. Regelbasierte und KI-gestuetzte Erkennung.", - Priority: "critical", - }, - { - Category: "access_control", - Name: "Vier-Augen-Prinzip", - Description: "Kritische Transaktionen (Kreditfreigaben, Grossueberweisungen, Konfigurationsaenderungen) benoetigen Freigabe durch zwei unabhaengige Personen.", - Priority: "critical", - }, - { - Category: "network_security", - Name: "DDoS-Schutz", - Description: "Mehrstufiger DDoS-Schutz fuer Online-Banking und Zahlungsverkehr-Infrastruktur. Redundante Anbindung, Traffic-Scrubbing, automatische Skalierung.", - Priority: "high", - }, - { - Category: "business_continuity", - Name: "Backup und Disaster Recovery", - Description: "Taeglich gesicherte Datenbanken mit geografisch getrennter Aufbewahrung. RTO unter 2 Stunden fuer Kernbanksysteme, RPO unter 15 Minuten.", - Priority: "critical", - }, - { - Category: "testing", - Name: "Penetration Testing (TIBER-EU)", - Description: "Threat-Intelligence-basierte Red-Teaming-Tests gemaess TIBER-EU-Framework. Jaehrliche Durchfuehrung durch externe, BaFin-akkreditierte Tester.", - Priority: "high", - }, - }, - - RiskScenarios: []RiskScenario{ - { - Name: "Betrug und Identitaetsdiebstahl", - Description: "Kriminelle nutzen gestohlene Identitaetsdaten zur Kontoeroeffnung, Kreditaufnahme oder fuer nicht autorisierte Transaktionen.", - Likelihood: "high", - Impact: "high", - Mitigation: "Starke Kundenauthentifizierung (SCA) gemaess PSD2, Echtzeit-Betrugs-Scoring, Video-Ident mit Liveness-Detection, biometrische Verifikation, Transaktionslimits.", - }, - { - Name: "Insiderhandel-Datenleck", - Description: "Vorabinformationen ueber boersenrelevante Entscheidungen (M&A, Quartalsberichte) gelangen an Unberechtigte.", - Likelihood: "low", - Impact: "critical", - Mitigation: "Insiderverzeichnisse fuehren, Chinese Walls zwischen Abteilungen, Kommunikations-Monitoring, Handelsverbote fuer Insider, regelmaessige Compliance-Schulungen.", - }, - { - Name: "Systemausfall bei Zahlungsverkehr", - Description: "Ausfall des Kernbanksystems oder der Zahlungsverkehrsinfrastruktur fuehrt zu Nicht-Verfuegbarkeit von Transaktionen, Geldautomaten und Online-Banking.", - Likelihood: "medium", - Impact: "critical", - Mitigation: "Hochverfuegbarkeits-Architektur (Active-Active), automatischer Failover, regelmaessige Disaster-Recovery-Tests, Notfall-Kommunikationsplan fuer Kunden und BaFin.", - }, - { - Name: "Geldwaesche-Compliance-Verstoss", - Description: "Mangelhafte KYC-Prozesse oder unzureichendes Transaktionsmonitoring fuehren zu einem Compliance-Verstoss mit BaFin-Sanktionen.", - Likelihood: "medium", - Impact: "critical", - Mitigation: "Automatisiertes Transaction-Monitoring mit regelmaessiger Kalibrierung, jaehrliche GwG-Schulungen, interne Revision der AML-Prozesse, PEP- und Sanktionslisten-Screening in Echtzeit.", - }, - }, - } -} - -// ============================================================================ -// Produktion / Industrie -// ============================================================================ - -func manufacturingTemplate() IndustryTemplate { - return IndustryTemplate{ - Slug: "manufacturing", - Name: "Produktion / Industrie", - Description: "Compliance-Paket fuer produzierende Unternehmen mit Fokus auf NIS2-Anforderungen, OT-Security, IoT-Sicherheit und Schutz industrieller Steuerungssysteme.", - Icon: "\U0001F3ED", - Regulations: []string{"DSGVO", "NIS2", "Maschinenverordnung", "BetrSichV", "IT-Sicherheitsgesetz 2.0"}, - - VVTTemplates: []VVTTemplate{ - { - Name: "Mitarbeiterdaten / Zeiterfassung", - Purpose: "Erfassung von Arbeitszeiten, Schichtplanung und Anwesenheitsdaten zur Lohnabrechnung und Einhaltung des Arbeitszeitgesetzes.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), \u00a726 BDSG, \u00a716 ArbZG", - DataCategories: []string{"Mitarbeiterstammdaten", "Arbeitszeitdaten", "Schichtplaene", "Fehlzeiten", "Ueberstunden", "Zutrittsdaten"}, - DataSubjects: []string{"Mitarbeiter", "Leiharbeiter", "Praktikanten"}, - RetentionPeriod: "Lohnunterlagen 6 Jahre (\u00a7257 HGB), Arbeitszeitnachweise 2 Jahre (\u00a716 ArbZG)", - }, - { - Name: "Lieferantenmanagement", - Purpose: "Verwaltung von Lieferantendaten, Bestellprozessen und Qualitaetsbewertungen im Rahmen der Supply-Chain.", - LegalBasis: "Art. 6 Abs. 1 lit. b DSGVO (Vertragserfullung), Art. 6 Abs. 1 lit. f (berechtigtes Interesse)", - DataCategories: []string{"Ansprechpartner", "Kontaktdaten", "Lieferkonditionen", "Qualitaetsbewertungen", "Zertifizierungen", "Bankverbindungen"}, - DataSubjects: []string{"Ansprechpartner der Lieferanten", "Subunternehmer"}, - RetentionPeriod: "Vertragsdauer + 10 Jahre (Gewaehrleistung und Steuerrecht)", - }, - { - Name: "IoT-Sensordaten", - Purpose: "Erfassung und Auswertung von Sensor- und Maschinendaten fuer Produktionsoptimierung, Predictive Maintenance und Qualitaetssicherung.", - LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse), bei Personenbezug ggf. Art. 6 Abs. 1 lit. a (Einwilligung)", - DataCategories: []string{"Maschinenkennung", "Temperatur/Druck/Vibration", "Produktionszaehler", "Energieverbrauch", "Standortdaten (Intralogistik)", "Bediener-ID (falls zugeordnet)"}, - DataSubjects: []string{"Maschinenbediener (indirekt)", "Instandhalter"}, - RetentionPeriod: "Rohdaten 1 Jahr, aggregierte Daten 5 Jahre, qualitaetsrelevant 10 Jahre", - }, - { - Name: "Qualitaetskontrolle", - Purpose: "Dokumentation von Qualitaetspruefungen, Chargenrueckverfolgbarkeit und Reklamationsmanagement.", - LegalBasis: "Art. 6 Abs. 1 lit. c DSGVO (rechtliche Verpflichtung), Maschinenverordnung, Produkthaftung", - DataCategories: []string{"Pruefprotokolle", "Chargennnummern", "Messwerte", "Pruefer-ID", "Fotos/Videos der Pruefung", "Reklamationsdaten"}, - DataSubjects: []string{"Pruefer", "Reklamierende Kunden"}, - RetentionPeriod: "Produktlebensdauer + 10 Jahre (Produkthaftung), sicherheitskritisch 30 Jahre", - }, - { - Name: "Videoueberwachung", - Purpose: "Ueberwachung von Produktionshallen, Lagerbereichen und Aussenbereichen zum Schutz vor Diebstahl, Sabotage und zur Arbeitssicherheit.", - LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse), Betriebsvereinbarung", - DataCategories: []string{"Videoaufnahmen", "Zeitstempel", "Kamerastandort", "Bewegungserkennung"}, - DataSubjects: []string{"Mitarbeiter", "Besucher", "Lieferanten", "Unbefugte"}, - RetentionPeriod: "72 Stunden Standard, bei Vorfaellen bis Abschluss der Ermittlung (max. 10 Tage ohne konkreten Anlass)", - }, - { - Name: "Zugangskontrolle (physisch und logisch)", - Purpose: "Steuerung und Protokollierung des Zutritts zu Produktionsbereichen, Gefahrstofflagern und IT-Raeumen mittels Chipkarten/Biometrie.", - LegalBasis: "Art. 6 Abs. 1 lit. f DSGVO (berechtigtes Interesse), BetrSichV, bei Biometrie Art. 9 Abs. 2 lit. b DSGVO", - DataCategories: []string{"Mitarbeiter-ID", "Zutrittszeitpunkt", "Zutrittsbereich", "Chipkartennummer", "Biometrische Daten (optional)"}, - DataSubjects: []string{"Mitarbeiter", "Externe Dienstleister", "Besucher"}, - RetentionPeriod: "Zutrittsprotokolle 90 Tage, sicherheitsrelevante Bereiche 1 Jahr", - }, - }, - - TOMRecommendations: []TOMRecommendation{ - { - Category: "network_security", - Name: "Netzwerksegmentierung (IT/OT)", - Description: "Strikte Trennung von Office-IT und Operational Technology (OT) durch DMZ, Firewalls und unidirektionale Gateways. Purdue-Modell als Referenzarchitektur.", - Priority: "critical", - }, - { - Category: "patch_management", - Name: "IoT-Patch-Management", - Description: "Zentrales Management aller IoT-Geraete und Firmware-Versionen. Geplante Wartungsfenster fuer Updates, Risikobewertung vor Patches auf Produktionssystemen.", - Priority: "high", - }, - { - Category: "physical_security", - Name: "Physische Zutrittskontrolle", - Description: "Mehrstufiges Zutrittskonzept (Gelaende, Gebaeude, Produktionshalle, Leitstand). Besuchermanagement, Begleitung in Sicherheitsbereichen, Videoprotokollierung.", - Priority: "high", - }, - { - Category: "business_continuity", - Name: "Backup industrieller Steuerungen", - Description: "Regelmaessige Sicherung von SPS-Programmen, SCADA-Konfigurationen und Roboterprogrammen. Offline-Aufbewahrung der Backups, dokumentierte Restore-Prozeduren.", - Priority: "critical", - }, - { - Category: "incident_response", - Name: "Notfallplaene fuer Produktionsausfall", - Description: "Dokumentierte Notfallplaene fuer Cyber-Angriffe auf OT-Systeme. Manuelle Rueckfallebenen, Kommunikationsketten, Kontakt zu BSI und CERT. Jaehrliche Uebungen.", - Priority: "critical", - }, - }, - - RiskScenarios: []RiskScenario{ - { - Name: "OT-Cyberangriff auf Produktionsanlage", - Description: "Angreifer kompromittiert SCADA/SPS-Systeme und manipuliert Produktionsprozesse. Moegliche Folgen: Produktionsausfall, Qualitaetsmaengel, Personengefaehrdung.", - Likelihood: "medium", - Impact: "critical", - Mitigation: "Netzwerksegmentierung (IT/OT), Anomalie-Erkennung im OT-Netzwerk, Haertung der Steuerungssysteme, Deaktivierung nicht benoetigter Dienste und Ports, regelmaessige Sicherheitsaudits.", - }, - { - Name: "Ausfall der Lieferkette durch Cybervorfall", - Description: "Ein Cyberangriff auf einen kritischen Zulieferer fuehrt zum Stillstand der eigenen Produktion mangels Materialverfuegbarkeit oder kompromittierter Daten.", - Likelihood: "medium", - Impact: "high", - Mitigation: "Diversifikation der Lieferantenbasis, vertragliche Cybersecurity-Anforderungen an Zulieferer, regelmaessige Risikobewertung der Supply Chain, Notfallbestaende fuer kritische Komponenten.", - }, - { - Name: "Industriespionage", - Description: "Wettbewerber oder staatliche Akteure greifen Konstruktionsdaten, Fertigungsverfahren oder strategische Planungen ab.", - Likelihood: "medium", - Impact: "critical", - Mitigation: "DLP-Loesungen (Data Loss Prevention), Verschluesselung von CAD/CAM-Daten, Geheimhaltungsvereinbarungen, Informationsklassifizierung, USB-Port-Kontrolle, Mitarbeiter-Sensibilisierung.", - }, - { - Name: "IoT-Botnet-Kompromittierung", - Description: "Ungepatchte IoT-Sensoren und Aktoren werden Teil eines Botnets und dienen als Angriffsinfrastruktur oder Einfallstor ins Unternehmensnetz.", - Likelihood: "high", - Impact: "high", - Mitigation: "Default-Passwoerter aendern, Firmware-Updates automatisieren, IoT-Geraete in eigenem VLAN isolieren, Netzwerk-Traffic-Monitoring, Geraete-Inventar fuehren, unsichere Geraete ersetzen.", - }, - }, - } -} diff --git a/ai-compliance-sdk/internal/multitenant/models.go b/ai-compliance-sdk/internal/multitenant/models.go deleted file mode 100644 index c86094c..0000000 --- a/ai-compliance-sdk/internal/multitenant/models.go +++ /dev/null @@ -1,77 +0,0 @@ -package multitenant - -import ( - "time" - - "github.com/google/uuid" -) - -// TenantOverview provides a consolidated view of a tenant's compliance status -// including scores, module highlights, and namespace information. -type TenantOverview struct { - ID uuid.UUID `json:"id"` - Name string `json:"name"` - Slug string `json:"slug"` - Status string `json:"status"` - MaxUsers int `json:"max_users"` - LLMQuotaMonthly int `json:"llm_quota_monthly"` - ComplianceScore int `json:"compliance_score"` - RiskLevel string `json:"risk_level"` - NamespaceCount int `json:"namespace_count"` - - // Module highlights - OpenIncidents int `json:"open_incidents"` - OpenReports int `json:"open_reports"` // whistleblower - PendingDSRs int `json:"pending_dsrs"` - TrainingRate float64 `json:"training_completion_rate"` - VendorRiskHigh int `json:"vendor_risk_high"` - - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -// MultiTenantOverviewResponse wraps the list of tenant overviews with aggregate metrics. -type MultiTenantOverviewResponse struct { - Tenants []TenantOverview `json:"tenants"` - Total int `json:"total"` - AverageScore int `json:"average_score"` - GeneratedAt time.Time `json:"generated_at"` -} - -// CreateTenantRequest represents a request to create a new tenant. -type CreateTenantRequest struct { - Name string `json:"name" binding:"required"` - Slug string `json:"slug" binding:"required"` - MaxUsers int `json:"max_users"` - LLMQuotaMonthly int `json:"llm_quota_monthly"` -} - -// UpdateTenantRequest represents a partial update to an existing tenant. -// Pointer fields allow distinguishing between "not provided" and "zero value". -type UpdateTenantRequest struct { - Name *string `json:"name"` - MaxUsers *int `json:"max_users"` - LLMQuotaMonthly *int `json:"llm_quota_monthly"` - Status *string `json:"status"` -} - -// CreateNamespaceRequest represents a request to create a new namespace within a tenant. -type CreateNamespaceRequest struct { - Name string `json:"name" binding:"required"` - Slug string `json:"slug" binding:"required"` - IsolationLevel string `json:"isolation_level"` - DataClassification string `json:"data_classification"` -} - -// SwitchTenantRequest represents a request to switch the active tenant context. -type SwitchTenantRequest struct { - TenantID string `json:"tenant_id" binding:"required"` -} - -// SwitchTenantResponse contains the tenant info needed for the frontend to switch context. -type SwitchTenantResponse struct { - TenantID uuid.UUID `json:"tenant_id"` - TenantName string `json:"tenant_name"` - TenantSlug string `json:"tenant_slug"` - Status string `json:"status"` -} diff --git a/ai-compliance-sdk/internal/multitenant/store.go b/ai-compliance-sdk/internal/multitenant/store.go deleted file mode 100644 index 21bb4ca..0000000 --- a/ai-compliance-sdk/internal/multitenant/store.go +++ /dev/null @@ -1,148 +0,0 @@ -package multitenant - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/breakpilot/ai-compliance-sdk/internal/rbac" - "github.com/breakpilot/ai-compliance-sdk/internal/reporting" - "github.com/google/uuid" - "github.com/jackc/pgx/v5/pgxpool" -) - -// Store provides aggregated multi-tenant views by combining data from the -// existing RBAC store, reporting store, and direct SQL queries for module highlights. -type Store struct { - pool *pgxpool.Pool - rbacStore *rbac.Store - reportingStore *reporting.Store -} - -// NewStore creates a new multi-tenant store. -func NewStore(pool *pgxpool.Pool, rbacStore *rbac.Store, reportingStore *reporting.Store) *Store { - return &Store{ - pool: pool, - rbacStore: rbacStore, - reportingStore: reportingStore, - } -} - -// GetOverview retrieves all tenants with their compliance scores and module highlights. -// It aggregates data from the RBAC tenant list, the reporting compliance score, -// and direct SQL counts for namespaces, incidents, reports, DSRs, training, and vendors. -// Individual query failures are tolerated and result in zero-value defaults. -func (s *Store) GetOverview(ctx context.Context) (*MultiTenantOverviewResponse, error) { - tenants, err := s.rbacStore.ListTenants(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list tenants: %w", err) - } - - overviews := make([]TenantOverview, 0, len(tenants)) - totalScore := 0 - - for _, tenant := range tenants { - overview := s.buildTenantOverview(ctx, tenant) - totalScore += overview.ComplianceScore - overviews = append(overviews, overview) - } - - averageScore := 0 - if len(overviews) > 0 { - averageScore = totalScore / len(overviews) - } - - return &MultiTenantOverviewResponse{ - Tenants: overviews, - Total: len(overviews), - AverageScore: averageScore, - GeneratedAt: time.Now().UTC(), - }, nil -} - -// GetTenantDetail returns detailed compliance info for a specific tenant. -func (s *Store) GetTenantDetail(ctx context.Context, tenantID uuid.UUID) (*TenantOverview, error) { - tenant, err := s.rbacStore.GetTenant(ctx, tenantID) - if err != nil { - return nil, fmt.Errorf("failed to get tenant: %w", err) - } - - overview := s.buildTenantOverview(ctx, tenant) - return &overview, nil -} - -// buildTenantOverview constructs a TenantOverview by fetching compliance scores -// and module highlights for a single tenant. Errors are logged but do not -// propagate -- missing data defaults to zero values. -func (s *Store) buildTenantOverview(ctx context.Context, tenant *rbac.Tenant) TenantOverview { - overview := TenantOverview{ - ID: tenant.ID, - Name: tenant.Name, - Slug: tenant.Slug, - Status: string(tenant.Status), - MaxUsers: tenant.MaxUsers, - LLMQuotaMonthly: tenant.LLMQuotaMonthly, - CreatedAt: tenant.CreatedAt, - UpdatedAt: tenant.UpdatedAt, - } - - // Compliance score and risk level derived from an executive report. - // GenerateReport computes the compliance score and risk overview internally. - report, err := s.reportingStore.GenerateReport(ctx, tenant.ID) - if err != nil { - log.Printf("multitenant: failed to generate report for tenant %s: %v", tenant.ID, err) - } else { - overview.ComplianceScore = report.ComplianceScore - overview.RiskLevel = report.RiskOverview.OverallLevel - } - - // Namespace count - overview.NamespaceCount = s.countSafe(ctx, tenant.ID, - "SELECT COUNT(*) FROM compliance_namespaces WHERE tenant_id = $1") - - // Open incidents - overview.OpenIncidents = s.countSafe(ctx, tenant.ID, - "SELECT COUNT(*) FROM incidents WHERE tenant_id = $1 AND status IN ('new', 'investigating', 'containment')") - - // Open whistleblower reports - overview.OpenReports = s.countSafe(ctx, tenant.ID, - "SELECT COUNT(*) FROM whistleblower_reports WHERE tenant_id = $1 AND status IN ('new', 'acknowledged', 'investigating')") - - // Pending DSR requests - overview.PendingDSRs = s.countSafe(ctx, tenant.ID, - "SELECT COUNT(*) FROM dsr_requests WHERE tenant_id = $1 AND status IN ('new', 'in_progress')") - - // Training completion rate (average progress, 0-100) - overview.TrainingRate = s.avgSafe(ctx, tenant.ID, - "SELECT COALESCE(AVG(CASE WHEN status = 'completed' THEN 100.0 ELSE progress END), 0) FROM academy_enrollments WHERE tenant_id = $1") - - // High-risk vendors - overview.VendorRiskHigh = s.countSafe(ctx, tenant.ID, - "SELECT COUNT(*) FROM vendors WHERE tenant_id = $1 AND risk_level = 'high'") - - return overview -} - -// countSafe executes a COUNT(*) query that takes a single tenant_id parameter. -// If the query fails for any reason (e.g. table does not exist), it returns 0. -func (s *Store) countSafe(ctx context.Context, tenantID uuid.UUID, query string) int { - var count int - err := s.pool.QueryRow(ctx, query, tenantID).Scan(&count) - if err != nil { - // Tolerate errors -- table may not exist or query may fail - return 0 - } - return count -} - -// avgSafe executes an AVG query that takes a single tenant_id parameter. -// If the query fails for any reason, it returns 0. -func (s *Store) avgSafe(ctx context.Context, tenantID uuid.UUID, query string) float64 { - var avg float64 - err := s.pool.QueryRow(ctx, query, tenantID).Scan(&avg) - if err != nil { - return 0 - } - return avg -} diff --git a/ai-compliance-sdk/internal/reporting/models.go b/ai-compliance-sdk/internal/reporting/models.go deleted file mode 100644 index 34cb32a..0000000 --- a/ai-compliance-sdk/internal/reporting/models.go +++ /dev/null @@ -1,97 +0,0 @@ -package reporting - -import "time" - -type ExecutiveReport struct { - GeneratedAt time.Time `json:"generated_at"` - TenantID string `json:"tenant_id"` - ComplianceScore int `json:"compliance_score"` // 0-100 overall score - - // Module summaries - DSGVO DSGVOSummary `json:"dsgvo"` - Vendors VendorSummary `json:"vendors"` - Incidents IncidentSummary `json:"incidents"` - Whistleblower WhistleblowerSummary `json:"whistleblower"` - Academy AcademySummary `json:"academy"` - - // Cross-module metrics - RiskOverview RiskOverview `json:"risk_overview"` - UpcomingDeadlines []Deadline `json:"upcoming_deadlines"` - RecentActivity []ActivityEntry `json:"recent_activity"` -} - -type DSGVOSummary struct { - ProcessingActivities int `json:"processing_activities"` - ActiveProcessings int `json:"active_processings"` - TOMsImplemented int `json:"toms_implemented"` - TOMsPlanned int `json:"toms_planned"` - TOMsTotal int `json:"toms_total"` - CompletionPercent int `json:"completion_percent"` // TOMsImplemented / total * 100 - OpenDSRs int `json:"open_dsrs"` - OverdueDSRs int `json:"overdue_dsrs"` - DSFAsCompleted int `json:"dsfas_completed"` - RetentionPolicies int `json:"retention_policies"` -} - -type VendorSummary struct { - TotalVendors int `json:"total_vendors"` - ActiveVendors int `json:"active_vendors"` - ByRiskLevel map[string]int `json:"by_risk_level"` - PendingReviews int `json:"pending_reviews"` - ExpiredContracts int `json:"expired_contracts"` -} - -type IncidentSummary struct { - TotalIncidents int `json:"total_incidents"` - OpenIncidents int `json:"open_incidents"` - CriticalIncidents int `json:"critical_incidents"` - NotificationsPending int `json:"notifications_pending"` - AvgResolutionHours float64 `json:"avg_resolution_hours"` -} - -type WhistleblowerSummary struct { - TotalReports int `json:"total_reports"` - OpenReports int `json:"open_reports"` - OverdueAcknowledgments int `json:"overdue_acknowledgments"` - OverdueFeedbacks int `json:"overdue_feedbacks"` - AvgResolutionDays float64 `json:"avg_resolution_days"` -} - -type AcademySummary struct { - TotalCourses int `json:"total_courses"` - TotalEnrollments int `json:"total_enrollments"` - CompletionRate float64 `json:"completion_rate"` // 0-100 - OverdueCount int `json:"overdue_count"` - AvgCompletionDays float64 `json:"avg_completion_days"` -} - -type RiskOverview struct { - OverallLevel string `json:"overall_level"` // LOW, MEDIUM, HIGH, CRITICAL - ModuleRisks []ModuleRisk `json:"module_risks"` - OpenFindings int `json:"open_findings"` - CriticalFindings int `json:"critical_findings"` -} - -type ModuleRisk struct { - Module string `json:"module"` - Level string `json:"level"` // LOW, MEDIUM, HIGH, CRITICAL - Score int `json:"score"` // 0-100 - Issues int `json:"issues"` -} - -type Deadline struct { - Module string `json:"module"` - Type string `json:"type"` - Description string `json:"description"` - DueDate time.Time `json:"due_date"` - DaysLeft int `json:"days_left"` - Severity string `json:"severity"` // INFO, WARNING, URGENT, OVERDUE -} - -type ActivityEntry struct { - Timestamp time.Time `json:"timestamp"` - Module string `json:"module"` - Action string `json:"action"` - Description string `json:"description"` - UserID string `json:"user_id,omitempty"` -} diff --git a/ai-compliance-sdk/internal/reporting/store.go b/ai-compliance-sdk/internal/reporting/store.go deleted file mode 100644 index 0fef11d..0000000 --- a/ai-compliance-sdk/internal/reporting/store.go +++ /dev/null @@ -1,516 +0,0 @@ -package reporting - -import ( - "context" - "math" - "sort" - "time" - - "github.com/breakpilot/ai-compliance-sdk/internal/academy" - "github.com/breakpilot/ai-compliance-sdk/internal/whistleblower" - "github.com/google/uuid" - "github.com/jackc/pgx/v5/pgxpool" -) - -type Store struct { - pool *pgxpool.Pool - whistleStore *whistleblower.Store - academyStore *academy.Store -} - -func NewStore(pool *pgxpool.Pool, ws *whistleblower.Store, as *academy.Store) *Store { - return &Store{ - pool: pool, - whistleStore: ws, - academyStore: as, - } -} - -func (s *Store) GenerateReport(ctx context.Context, tenantID uuid.UUID) (*ExecutiveReport, error) { - report := &ExecutiveReport{ - GeneratedAt: time.Now().UTC(), - TenantID: tenantID.String(), - } - - // 1. Gather DSGVO stats via direct SQL (Python is now primary for DSGVO) - report.DSGVO = s.getDSGVOStats(ctx, tenantID) - - // 2. Gather vendor stats via direct SQL (Python is now primary for vendors) - report.Vendors = s.getVendorStats(ctx, tenantID) - - // 3. Gather incident stats via direct SQL (Python is now primary for incidents) - report.Incidents = s.getIncidentStats(ctx, tenantID) - - // 4. Gather whistleblower stats - whistleStats, err := s.whistleStore.GetStatistics(ctx, tenantID) - if err == nil && whistleStats != nil { - openReports := 0 - for status, count := range whistleStats.ByStatus { - if status != "CLOSED" && status != "ARCHIVED" { - openReports += count - } - } - report.Whistleblower = WhistleblowerSummary{ - TotalReports: whistleStats.TotalReports, - OpenReports: openReports, - OverdueAcknowledgments: whistleStats.OverdueAcknowledgments, - OverdueFeedbacks: whistleStats.OverdueFeedbacks, - AvgResolutionDays: whistleStats.AvgResolutionDays, - } - } - - // 5. Gather academy stats - academyStats, err := s.academyStore.GetStatistics(ctx, tenantID) - if err == nil && academyStats != nil { - report.Academy = AcademySummary{ - TotalCourses: academyStats.TotalCourses, - TotalEnrollments: academyStats.TotalEnrollments, - CompletionRate: academyStats.CompletionRate, - OverdueCount: academyStats.OverdueCount, - AvgCompletionDays: academyStats.AvgCompletionDays, - } - } - - // 6. Calculate risk overview - report.RiskOverview = s.calculateRiskOverview(report) - - // 7. Calculate compliance score (0-100) - report.ComplianceScore = s.calculateComplianceScore(report) - - // 8. Gather upcoming deadlines from DB - report.UpcomingDeadlines = s.getUpcomingDeadlines(ctx, tenantID) - - // 9. Gather recent activity from DB - report.RecentActivity = s.getRecentActivity(ctx, tenantID) - - return report, nil -} - -// getDSGVOStats queries DSGVO tables directly (previously via dsgvo.Store) -func (s *Store) getDSGVOStats(ctx context.Context, tenantID uuid.UUID) DSGVOSummary { - summary := DSGVOSummary{} - - // Processing activities - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*), COUNT(*) FILTER (WHERE status = 'ACTIVE') FROM compliance.vvt_entries WHERE tenant_id = $1`, tenantID, - ).Scan(&summary.ProcessingActivities, &summary.ActiveProcessings) - - // TOMs - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*) FILTER (WHERE status = 'IMPLEMENTED'), COUNT(*) FILTER (WHERE status = 'PLANNED') FROM compliance.tom_entries WHERE tenant_id = $1`, tenantID, - ).Scan(&summary.TOMsImplemented, &summary.TOMsPlanned) - summary.TOMsTotal = summary.TOMsImplemented + summary.TOMsPlanned - if summary.TOMsTotal > 0 { - summary.CompletionPercent = int(math.Round(float64(summary.TOMsImplemented) / float64(summary.TOMsTotal) * 100)) - } - - // DSRs - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*) FILTER (WHERE status NOT IN ('COMPLETED','REJECTED')), COUNT(*) FILTER (WHERE deadline < NOW() AND status NOT IN ('COMPLETED','REJECTED')) FROM compliance.dsr_requests WHERE tenant_id = $1`, tenantID, - ).Scan(&summary.OpenDSRs, &summary.OverdueDSRs) - - // DSFAs - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*) FROM compliance.dsfa_entries WHERE tenant_id = $1 AND status = 'COMPLETED'`, tenantID, - ).Scan(&summary.DSFAsCompleted) - - // Retention policies - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*) FROM compliance.loeschfristen WHERE tenant_id = $1`, tenantID, - ).Scan(&summary.RetentionPolicies) - - return summary -} - -// getVendorStats queries vendor tables directly (previously via vendor.Store) -func (s *Store) getVendorStats(ctx context.Context, tenantID uuid.UUID) VendorSummary { - summary := VendorSummary{ByRiskLevel: map[string]int{}} - - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*), COUNT(*) FILTER (WHERE status = 'ACTIVE') FROM compliance.vendor_compliance WHERE tenant_id = $1`, tenantID, - ).Scan(&summary.TotalVendors, &summary.ActiveVendors) - - rows, err := s.pool.Query(ctx, - `SELECT COALESCE(risk_level, 'UNKNOWN'), COUNT(*) FROM compliance.vendor_compliance WHERE tenant_id = $1 GROUP BY risk_level`, tenantID, - ) - if err == nil { - defer rows.Close() - for rows.Next() { - var level string - var count int - if rows.Scan(&level, &count) == nil { - summary.ByRiskLevel[level] = count - } - } - } - - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*) FROM compliance.vendor_compliance WHERE tenant_id = $1 AND next_review_date < NOW()`, tenantID, - ).Scan(&summary.PendingReviews) - - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*) FROM compliance.vendor_compliance WHERE tenant_id = $1 AND contract_end < NOW()`, tenantID, - ).Scan(&summary.ExpiredContracts) - - return summary -} - -// getIncidentStats queries incident tables directly (previously via incidents.Store) -func (s *Store) getIncidentStats(ctx context.Context, tenantID uuid.UUID) IncidentSummary { - summary := IncidentSummary{} - - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*), COUNT(*) FILTER (WHERE status NOT IN ('RESOLVED','CLOSED')), COUNT(*) FILTER (WHERE severity = 'CRITICAL' AND status NOT IN ('RESOLVED','CLOSED')) FROM compliance.incidents WHERE tenant_id = $1`, tenantID, - ).Scan(&summary.TotalIncidents, &summary.OpenIncidents, &summary.CriticalIncidents) - - _ = s.pool.QueryRow(ctx, - `SELECT COUNT(*) FROM compliance.incidents WHERE tenant_id = $1 AND notification_required = true AND notification_sent = false`, tenantID, - ).Scan(&summary.NotificationsPending) - - _ = s.pool.QueryRow(ctx, - `SELECT COALESCE(AVG(EXTRACT(EPOCH FROM (resolved_at - created_at))/3600), 0) FROM compliance.incidents WHERE tenant_id = $1 AND resolved_at IS NOT NULL`, tenantID, - ).Scan(&summary.AvgResolutionHours) - - return summary -} - -func (s *Store) calculateRiskOverview(report *ExecutiveReport) RiskOverview { - modules := []ModuleRisk{} - - // DSGVO risk based on overdue DSRs and missing TOMs - dsgvoScore := 100 - dsgvoIssues := report.DSGVO.OverdueDSRs + report.DSGVO.TOMsPlanned - if report.DSGVO.OverdueDSRs > 0 { - dsgvoScore -= report.DSGVO.OverdueDSRs * 15 - } - if report.DSGVO.TOMsTotal > 0 { - dsgvoScore = int(math.Round(float64(report.DSGVO.CompletionPercent))) - } - if dsgvoScore < 0 { - dsgvoScore = 0 - } - modules = append(modules, ModuleRisk{Module: "DSGVO", Level: riskLevel(dsgvoScore), Score: dsgvoScore, Issues: dsgvoIssues}) - - // Vendor risk based on high-risk vendors and pending reviews - vendorScore := 100 - vendorIssues := report.Vendors.PendingReviews + report.Vendors.ExpiredContracts - highRisk := 0 - if v, ok := report.Vendors.ByRiskLevel["HIGH"]; ok { - highRisk += v - } - if v, ok := report.Vendors.ByRiskLevel["CRITICAL"]; ok { - highRisk += v - } - if report.Vendors.TotalVendors > 0 { - vendorScore = 100 - int(math.Round(float64(highRisk)/float64(report.Vendors.TotalVendors)*100)) - } - vendorScore -= report.Vendors.PendingReviews * 5 - vendorScore -= report.Vendors.ExpiredContracts * 10 - if vendorScore < 0 { - vendorScore = 0 - } - modules = append(modules, ModuleRisk{Module: "Vendors", Level: riskLevel(vendorScore), Score: vendorScore, Issues: vendorIssues}) - - // Incident risk - incidentScore := 100 - incidentIssues := report.Incidents.OpenIncidents - incidentScore -= report.Incidents.CriticalIncidents * 20 - incidentScore -= report.Incidents.OpenIncidents * 5 - incidentScore -= report.Incidents.NotificationsPending * 15 - if incidentScore < 0 { - incidentScore = 0 - } - modules = append(modules, ModuleRisk{Module: "Incidents", Level: riskLevel(incidentScore), Score: incidentScore, Issues: incidentIssues}) - - // Whistleblower compliance - whistleScore := 100 - whistleIssues := report.Whistleblower.OverdueAcknowledgments + report.Whistleblower.OverdueFeedbacks - whistleScore -= report.Whistleblower.OverdueAcknowledgments * 20 - whistleScore -= report.Whistleblower.OverdueFeedbacks * 10 - if whistleScore < 0 { - whistleScore = 0 - } - modules = append(modules, ModuleRisk{Module: "Whistleblower", Level: riskLevel(whistleScore), Score: whistleScore, Issues: whistleIssues}) - - // Academy compliance - academyScore := int(math.Round(report.Academy.CompletionRate)) - academyIssues := report.Academy.OverdueCount - modules = append(modules, ModuleRisk{Module: "Academy", Level: riskLevel(academyScore), Score: academyScore, Issues: academyIssues}) - - // Overall score is the average across modules - totalScore := 0 - for _, m := range modules { - totalScore += m.Score - } - if len(modules) > 0 { - totalScore = totalScore / len(modules) - } - - totalFindings := 0 - criticalFindings := 0 - for _, m := range modules { - totalFindings += m.Issues - if m.Level == "CRITICAL" { - criticalFindings += m.Issues - } - } - - return RiskOverview{ - OverallLevel: riskLevel(totalScore), - ModuleRisks: modules, - OpenFindings: totalFindings, - CriticalFindings: criticalFindings, - } -} - -func riskLevel(score int) string { - switch { - case score >= 75: - return "LOW" - case score >= 50: - return "MEDIUM" - case score >= 25: - return "HIGH" - default: - return "CRITICAL" - } -} - -func (s *Store) calculateComplianceScore(report *ExecutiveReport) int { - scores := []int{} - weights := []int{} - - // DSGVO: weight 30 (most important) - if report.DSGVO.TOMsTotal > 0 { - scores = append(scores, report.DSGVO.CompletionPercent) - } else { - scores = append(scores, 0) - } - weights = append(weights, 30) - - // Vendor compliance: weight 20 - vendorScore := 100 - if report.Vendors.TotalVendors > 0 { - vendorScore -= report.Vendors.PendingReviews * 10 - vendorScore -= report.Vendors.ExpiredContracts * 15 - } - if vendorScore < 0 { - vendorScore = 0 - } - scores = append(scores, vendorScore) - weights = append(weights, 20) - - // Incident handling: weight 20 - incidentScore := 100 - incidentScore -= report.Incidents.OpenIncidents * 10 - incidentScore -= report.Incidents.NotificationsPending * 20 - if incidentScore < 0 { - incidentScore = 0 - } - scores = append(scores, incidentScore) - weights = append(weights, 20) - - // Whistleblower: weight 15 - whistleScore := 100 - whistleScore -= report.Whistleblower.OverdueAcknowledgments * 25 - whistleScore -= report.Whistleblower.OverdueFeedbacks * 15 - if whistleScore < 0 { - whistleScore = 0 - } - scores = append(scores, whistleScore) - weights = append(weights, 15) - - // Academy: weight 15 - academyScore := int(math.Round(report.Academy.CompletionRate)) - scores = append(scores, academyScore) - weights = append(weights, 15) - - totalWeight := 0 - weightedSum := 0 - for i, sc := range scores { - weightedSum += sc * weights[i] - totalWeight += weights[i] - } - if totalWeight == 0 { - return 0 - } - return int(math.Round(float64(weightedSum) / float64(totalWeight))) -} - -func (s *Store) getUpcomingDeadlines(ctx context.Context, tenantID uuid.UUID) []Deadline { - deadlines := []Deadline{} - now := time.Now().UTC() - - // Vendor reviews due - rows, err := s.pool.Query(ctx, ` - SELECT name, next_review_date FROM compliance.vendor_compliance - WHERE tenant_id = $1 AND next_review_date IS NOT NULL - ORDER BY next_review_date ASC LIMIT 10 - `, tenantID) - if err == nil { - defer rows.Close() - for rows.Next() { - var name string - var dueDate time.Time - if err := rows.Scan(&name, &dueDate); err != nil { - continue - } - daysLeft := int(dueDate.Sub(now).Hours() / 24) - severity := "INFO" - if daysLeft < 0 { - severity = "OVERDUE" - } else if daysLeft <= 7 { - severity = "URGENT" - } else if daysLeft <= 30 { - severity = "WARNING" - } - deadlines = append(deadlines, Deadline{ - Module: "Vendors", - Type: "REVIEW", - Description: "Vendor-Review: " + name, - DueDate: dueDate, - DaysLeft: daysLeft, - Severity: severity, - }) - } - } - - // DSR deadlines (overdue) - rows2, err := s.pool.Query(ctx, ` - SELECT request_type, deadline FROM compliance.dsr_requests - WHERE tenant_id = $1 AND status NOT IN ('COMPLETED', 'REJECTED') - AND deadline IS NOT NULL - ORDER BY deadline ASC LIMIT 10 - `, tenantID) - if err == nil { - defer rows2.Close() - for rows2.Next() { - var reqType string - var dueDate time.Time - if err := rows2.Scan(&reqType, &dueDate); err != nil { - continue - } - daysLeft := int(dueDate.Sub(now).Hours() / 24) - severity := "INFO" - if daysLeft < 0 { - severity = "OVERDUE" - } else if daysLeft <= 3 { - severity = "URGENT" - } else if daysLeft <= 14 { - severity = "WARNING" - } - deadlines = append(deadlines, Deadline{ - Module: "DSR", - Type: "RESPONSE", - Description: "Betroffenenrecht: " + reqType, - DueDate: dueDate, - DaysLeft: daysLeft, - Severity: severity, - }) - } - } - - // Sort by due date ascending - sort.Slice(deadlines, func(i, j int) bool { - return deadlines[i].DueDate.Before(deadlines[j].DueDate) - }) - - if len(deadlines) > 15 { - deadlines = deadlines[:15] - } - - return deadlines -} - -func (s *Store) getRecentActivity(ctx context.Context, tenantID uuid.UUID) []ActivityEntry { - activities := []ActivityEntry{} - - // Recent vendors created/updated - rows, _ := s.pool.Query(ctx, ` - SELECT name, created_at, 'CREATED' as action FROM compliance.vendor_compliance - WHERE tenant_id = $1 AND created_at > NOW() - INTERVAL '30 days' - UNION ALL - SELECT name, updated_at, 'UPDATED' FROM compliance.vendor_compliance - WHERE tenant_id = $1 AND updated_at > created_at AND updated_at > NOW() - INTERVAL '30 days' - ORDER BY 2 DESC LIMIT 5 - `, tenantID) - if rows != nil { - defer rows.Close() - for rows.Next() { - var name, action string - var ts time.Time - if err := rows.Scan(&name, &ts, &action); err != nil { - continue - } - desc := "Vendor " - if action == "CREATED" { - desc += "angelegt: " - } else { - desc += "aktualisiert: " - } - activities = append(activities, ActivityEntry{ - Timestamp: ts, - Module: "Vendors", - Action: action, - Description: desc + name, - }) - } - } - - // Recent incidents - rows2, _ := s.pool.Query(ctx, ` - SELECT title, created_at, severity FROM compliance.incidents - WHERE tenant_id = $1 AND created_at > NOW() - INTERVAL '30 days' - ORDER BY created_at DESC LIMIT 5 - `, tenantID) - if rows2 != nil { - defer rows2.Close() - for rows2.Next() { - var title, severity string - var ts time.Time - if err := rows2.Scan(&title, &ts, &severity); err != nil { - continue - } - activities = append(activities, ActivityEntry{ - Timestamp: ts, - Module: "Incidents", - Action: "CREATED", - Description: "Datenpanne (" + severity + "): " + title, - }) - } - } - - // Recent whistleblower reports (admin view) - rows3, _ := s.pool.Query(ctx, ` - SELECT category, created_at FROM whistleblower_reports - WHERE tenant_id = $1 AND created_at > NOW() - INTERVAL '30 days' - ORDER BY created_at DESC LIMIT 5 - `, tenantID) - if rows3 != nil { - defer rows3.Close() - for rows3.Next() { - var category string - var ts time.Time - if err := rows3.Scan(&category, &ts); err != nil { - continue - } - activities = append(activities, ActivityEntry{ - Timestamp: ts, - Module: "Whistleblower", - Action: "REPORT", - Description: "Neue Meldung: " + category, - }) - } - } - - // Sort by timestamp descending (most recent first) - sort.Slice(activities, func(i, j int) bool { - return activities[i].Timestamp.After(activities[j].Timestamp) - }) - - if len(activities) > 20 { - activities = activities[:20] - } - - return activities -} diff --git a/ai-compliance-sdk/internal/sso/models.go b/ai-compliance-sdk/internal/sso/models.go deleted file mode 100644 index 6ec524c..0000000 --- a/ai-compliance-sdk/internal/sso/models.go +++ /dev/null @@ -1,158 +0,0 @@ -package sso - -import ( - "time" - - "github.com/google/uuid" -) - -// ============================================================================ -// Constants / Enums -// ============================================================================ - -// ProviderType represents the SSO authentication protocol. -type ProviderType string - -const ( - // ProviderTypeOIDC represents OpenID Connect authentication. - ProviderTypeOIDC ProviderType = "oidc" - // ProviderTypeSAML represents SAML 2.0 authentication. - ProviderTypeSAML ProviderType = "saml" -) - -// ============================================================================ -// Main Entities -// ============================================================================ - -// SSOConfig represents a per-tenant SSO provider configuration supporting -// OIDC and SAML authentication protocols. -type SSOConfig struct { - ID uuid.UUID `json:"id" db:"id"` - TenantID uuid.UUID `json:"tenant_id" db:"tenant_id"` - ProviderType ProviderType `json:"provider_type" db:"provider_type"` - Name string `json:"name" db:"name"` - Enabled bool `json:"enabled" db:"enabled"` - - // OIDC settings - OIDCIssuerURL string `json:"oidc_issuer_url,omitempty" db:"oidc_issuer_url"` - OIDCClientID string `json:"oidc_client_id,omitempty" db:"oidc_client_id"` - OIDCClientSecret string `json:"oidc_client_secret,omitempty" db:"oidc_client_secret"` - OIDCRedirectURI string `json:"oidc_redirect_uri,omitempty" db:"oidc_redirect_uri"` - OIDCScopes []string `json:"oidc_scopes,omitempty" db:"oidc_scopes"` - - // SAML settings (for future use) - SAMLEntityID string `json:"saml_entity_id,omitempty" db:"saml_entity_id"` - SAMLSSOURL string `json:"saml_sso_url,omitempty" db:"saml_sso_url"` - SAMLCertificate string `json:"saml_certificate,omitempty" db:"saml_certificate"` - SAMLACS_URL string `json:"saml_acs_url,omitempty" db:"saml_acs_url"` - - // Role mapping: maps SSO group/role names to internal role IDs - RoleMapping map[string]string `json:"role_mapping" db:"role_mapping"` - DefaultRoleID *uuid.UUID `json:"default_role_id,omitempty" db:"default_role_id"` - AutoProvision bool `json:"auto_provision" db:"auto_provision"` - - // Audit - CreatedAt time.Time `json:"created_at" db:"created_at"` - UpdatedAt time.Time `json:"updated_at" db:"updated_at"` -} - -// SSOUser represents a JIT-provisioned user authenticated via an SSO provider. -type SSOUser struct { - ID uuid.UUID `json:"id" db:"id"` - TenantID uuid.UUID `json:"tenant_id" db:"tenant_id"` - SSOConfigID uuid.UUID `json:"sso_config_id" db:"sso_config_id"` - ExternalID string `json:"external_id" db:"external_id"` - Email string `json:"email" db:"email"` - DisplayName string `json:"display_name" db:"display_name"` - Groups []string `json:"groups" db:"groups"` - LastLogin *time.Time `json:"last_login,omitempty" db:"last_login"` - IsActive bool `json:"is_active" db:"is_active"` - - // Audit - CreatedAt time.Time `json:"created_at" db:"created_at"` - UpdatedAt time.Time `json:"updated_at" db:"updated_at"` -} - -// ============================================================================ -// API Request Types -// ============================================================================ - -// CreateSSOConfigRequest is the API request for creating an SSO configuration. -type CreateSSOConfigRequest struct { - ProviderType ProviderType `json:"provider_type" binding:"required"` - Name string `json:"name" binding:"required"` - Enabled bool `json:"enabled"` - OIDCIssuerURL string `json:"oidc_issuer_url"` - OIDCClientID string `json:"oidc_client_id"` - OIDCClientSecret string `json:"oidc_client_secret"` - OIDCRedirectURI string `json:"oidc_redirect_uri"` - OIDCScopes []string `json:"oidc_scopes"` - RoleMapping map[string]string `json:"role_mapping"` - DefaultRoleID *uuid.UUID `json:"default_role_id"` - AutoProvision bool `json:"auto_provision"` -} - -// UpdateSSOConfigRequest is the API request for partially updating an SSO -// configuration. Pointer fields allow distinguishing between "not provided" -// (nil) and "set to zero value". -type UpdateSSOConfigRequest struct { - Name *string `json:"name"` - Enabled *bool `json:"enabled"` - OIDCIssuerURL *string `json:"oidc_issuer_url"` - OIDCClientID *string `json:"oidc_client_id"` - OIDCClientSecret *string `json:"oidc_client_secret"` - OIDCRedirectURI *string `json:"oidc_redirect_uri"` - OIDCScopes []string `json:"oidc_scopes"` - RoleMapping map[string]string `json:"role_mapping"` - DefaultRoleID *uuid.UUID `json:"default_role_id"` - AutoProvision *bool `json:"auto_provision"` -} - -// ============================================================================ -// JWT / Session Types -// ============================================================================ - -// SSOClaims holds the claims embedded in JWT tokens issued after successful -// SSO authentication. These are used for downstream authorization decisions. -type SSOClaims struct { - UserID uuid.UUID `json:"user_id"` - TenantID uuid.UUID `json:"tenant_id"` - Email string `json:"email"` - DisplayName string `json:"display_name"` - Roles []string `json:"roles"` - SSOConfigID uuid.UUID `json:"sso_config_id"` -} - -// ============================================================================ -// List / Filter Types -// ============================================================================ - -// SSOConfigFilters defines filters for listing SSO configurations. -type SSOConfigFilters struct { - ProviderType ProviderType - Enabled *bool - Search string - Limit int - Offset int -} - -// SSOUserFilters defines filters for listing SSO users. -type SSOUserFilters struct { - SSOConfigID *uuid.UUID - Email string - IsActive *bool - Limit int - Offset int -} - -// SSOConfigListResponse is the API response for listing SSO configurations. -type SSOConfigListResponse struct { - Configs []SSOConfig `json:"configs"` - Total int `json:"total"` -} - -// SSOUserListResponse is the API response for listing SSO users. -type SSOUserListResponse struct { - Users []SSOUser `json:"users"` - Total int `json:"total"` -} diff --git a/ai-compliance-sdk/internal/sso/store.go b/ai-compliance-sdk/internal/sso/store.go deleted file mode 100644 index b8083dc..0000000 --- a/ai-compliance-sdk/internal/sso/store.go +++ /dev/null @@ -1,477 +0,0 @@ -package sso - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/google/uuid" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" -) - -// Store handles SSO configuration and user data persistence. -type Store struct { - pool *pgxpool.Pool -} - -// NewStore creates a new SSO store. -func NewStore(pool *pgxpool.Pool) *Store { - return &Store{pool: pool} -} - -// ============================================================================ -// SSO Configuration CRUD Operations -// ============================================================================ - -// CreateConfig creates a new SSO configuration for a tenant. -func (s *Store) CreateConfig(ctx context.Context, tenantID uuid.UUID, req *CreateSSOConfigRequest) (*SSOConfig, error) { - now := time.Now().UTC() - - cfg := &SSOConfig{ - ID: uuid.New(), - TenantID: tenantID, - ProviderType: req.ProviderType, - Name: req.Name, - Enabled: req.Enabled, - OIDCIssuerURL: req.OIDCIssuerURL, - OIDCClientID: req.OIDCClientID, - OIDCClientSecret: req.OIDCClientSecret, - OIDCRedirectURI: req.OIDCRedirectURI, - OIDCScopes: req.OIDCScopes, - RoleMapping: req.RoleMapping, - DefaultRoleID: req.DefaultRoleID, - AutoProvision: req.AutoProvision, - CreatedAt: now, - UpdatedAt: now, - } - - // Apply defaults - if len(cfg.OIDCScopes) == 0 { - cfg.OIDCScopes = []string{"openid", "profile", "email"} - } - if cfg.RoleMapping == nil { - cfg.RoleMapping = map[string]string{} - } - - roleMappingJSON, err := json.Marshal(cfg.RoleMapping) - if err != nil { - return nil, fmt.Errorf("failed to marshal role_mapping: %w", err) - } - - _, err = s.pool.Exec(ctx, ` - INSERT INTO sso_configurations ( - id, tenant_id, provider_type, name, enabled, - oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes, - saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url, - role_mapping, default_role_id, auto_provision, - created_at, updated_at - ) VALUES ( - $1, $2, $3, $4, $5, - $6, $7, $8, $9, $10, - $11, $12, $13, $14, - $15, $16, $17, - $18, $19 - ) - `, - cfg.ID, cfg.TenantID, string(cfg.ProviderType), cfg.Name, cfg.Enabled, - cfg.OIDCIssuerURL, cfg.OIDCClientID, cfg.OIDCClientSecret, cfg.OIDCRedirectURI, cfg.OIDCScopes, - cfg.SAMLEntityID, cfg.SAMLSSOURL, cfg.SAMLCertificate, cfg.SAMLACS_URL, - roleMappingJSON, cfg.DefaultRoleID, cfg.AutoProvision, - cfg.CreatedAt, cfg.UpdatedAt, - ) - if err != nil { - return nil, fmt.Errorf("failed to insert sso configuration: %w", err) - } - - return cfg, nil -} - -// GetConfig retrieves an SSO configuration by ID and tenant. -func (s *Store) GetConfig(ctx context.Context, tenantID, configID uuid.UUID) (*SSOConfig, error) { - var cfg SSOConfig - var providerType string - var roleMappingJSON []byte - - err := s.pool.QueryRow(ctx, ` - SELECT - id, tenant_id, provider_type, name, enabled, - oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes, - saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url, - role_mapping, default_role_id, auto_provision, - created_at, updated_at - FROM sso_configurations - WHERE id = $1 AND tenant_id = $2 - `, configID, tenantID).Scan( - &cfg.ID, &cfg.TenantID, &providerType, &cfg.Name, &cfg.Enabled, - &cfg.OIDCIssuerURL, &cfg.OIDCClientID, &cfg.OIDCClientSecret, &cfg.OIDCRedirectURI, &cfg.OIDCScopes, - &cfg.SAMLEntityID, &cfg.SAMLSSOURL, &cfg.SAMLCertificate, &cfg.SAMLACS_URL, - &roleMappingJSON, &cfg.DefaultRoleID, &cfg.AutoProvision, - &cfg.CreatedAt, &cfg.UpdatedAt, - ) - if err == pgx.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("failed to get sso configuration: %w", err) - } - - cfg.ProviderType = ProviderType(providerType) - cfg.RoleMapping = unmarshalRoleMapping(roleMappingJSON) - - return &cfg, nil -} - -// GetConfigByName retrieves an SSO configuration by name and tenant. -func (s *Store) GetConfigByName(ctx context.Context, tenantID uuid.UUID, name string) (*SSOConfig, error) { - var cfg SSOConfig - var providerType string - var roleMappingJSON []byte - - err := s.pool.QueryRow(ctx, ` - SELECT - id, tenant_id, provider_type, name, enabled, - oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes, - saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url, - role_mapping, default_role_id, auto_provision, - created_at, updated_at - FROM sso_configurations - WHERE tenant_id = $1 AND name = $2 - `, tenantID, name).Scan( - &cfg.ID, &cfg.TenantID, &providerType, &cfg.Name, &cfg.Enabled, - &cfg.OIDCIssuerURL, &cfg.OIDCClientID, &cfg.OIDCClientSecret, &cfg.OIDCRedirectURI, &cfg.OIDCScopes, - &cfg.SAMLEntityID, &cfg.SAMLSSOURL, &cfg.SAMLCertificate, &cfg.SAMLACS_URL, - &roleMappingJSON, &cfg.DefaultRoleID, &cfg.AutoProvision, - &cfg.CreatedAt, &cfg.UpdatedAt, - ) - if err == pgx.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("failed to get sso configuration by name: %w", err) - } - - cfg.ProviderType = ProviderType(providerType) - cfg.RoleMapping = unmarshalRoleMapping(roleMappingJSON) - - return &cfg, nil -} - -// ListConfigs lists all SSO configurations for a tenant. -func (s *Store) ListConfigs(ctx context.Context, tenantID uuid.UUID) ([]SSOConfig, error) { - rows, err := s.pool.Query(ctx, ` - SELECT - id, tenant_id, provider_type, name, enabled, - oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes, - saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url, - role_mapping, default_role_id, auto_provision, - created_at, updated_at - FROM sso_configurations - WHERE tenant_id = $1 - ORDER BY name ASC - `, tenantID) - if err != nil { - return nil, fmt.Errorf("failed to list sso configurations: %w", err) - } - defer rows.Close() - - var configs []SSOConfig - for rows.Next() { - cfg, err := scanSSOConfig(rows) - if err != nil { - return nil, err - } - configs = append(configs, *cfg) - } - - return configs, nil -} - -// UpdateConfig updates an existing SSO configuration with partial updates. -func (s *Store) UpdateConfig(ctx context.Context, tenantID, configID uuid.UUID, req *UpdateSSOConfigRequest) (*SSOConfig, error) { - cfg, err := s.GetConfig(ctx, tenantID, configID) - if err != nil { - return nil, err - } - if cfg == nil { - return nil, fmt.Errorf("sso configuration not found") - } - - // Apply partial updates - if req.Name != nil { - cfg.Name = *req.Name - } - if req.Enabled != nil { - cfg.Enabled = *req.Enabled - } - if req.OIDCIssuerURL != nil { - cfg.OIDCIssuerURL = *req.OIDCIssuerURL - } - if req.OIDCClientID != nil { - cfg.OIDCClientID = *req.OIDCClientID - } - if req.OIDCClientSecret != nil { - cfg.OIDCClientSecret = *req.OIDCClientSecret - } - if req.OIDCRedirectURI != nil { - cfg.OIDCRedirectURI = *req.OIDCRedirectURI - } - if req.OIDCScopes != nil { - cfg.OIDCScopes = req.OIDCScopes - } - if req.RoleMapping != nil { - cfg.RoleMapping = req.RoleMapping - } - if req.DefaultRoleID != nil { - cfg.DefaultRoleID = req.DefaultRoleID - } - if req.AutoProvision != nil { - cfg.AutoProvision = *req.AutoProvision - } - - cfg.UpdatedAt = time.Now().UTC() - - roleMappingJSON, err := json.Marshal(cfg.RoleMapping) - if err != nil { - return nil, fmt.Errorf("failed to marshal role_mapping: %w", err) - } - - _, err = s.pool.Exec(ctx, ` - UPDATE sso_configurations SET - name = $3, enabled = $4, - oidc_issuer_url = $5, oidc_client_id = $6, oidc_client_secret = $7, - oidc_redirect_uri = $8, oidc_scopes = $9, - saml_entity_id = $10, saml_sso_url = $11, saml_certificate = $12, saml_acs_url = $13, - role_mapping = $14, default_role_id = $15, auto_provision = $16, - updated_at = $17 - WHERE id = $1 AND tenant_id = $2 - `, - cfg.ID, cfg.TenantID, - cfg.Name, cfg.Enabled, - cfg.OIDCIssuerURL, cfg.OIDCClientID, cfg.OIDCClientSecret, - cfg.OIDCRedirectURI, cfg.OIDCScopes, - cfg.SAMLEntityID, cfg.SAMLSSOURL, cfg.SAMLCertificate, cfg.SAMLACS_URL, - roleMappingJSON, cfg.DefaultRoleID, cfg.AutoProvision, - cfg.UpdatedAt, - ) - if err != nil { - return nil, fmt.Errorf("failed to update sso configuration: %w", err) - } - - return cfg, nil -} - -// DeleteConfig deletes an SSO configuration by ID and tenant. -func (s *Store) DeleteConfig(ctx context.Context, tenantID, configID uuid.UUID) error { - _, err := s.pool.Exec(ctx, - "DELETE FROM sso_configurations WHERE id = $1 AND tenant_id = $2", - configID, tenantID, - ) - if err != nil { - return fmt.Errorf("failed to delete sso configuration: %w", err) - } - return nil -} - -// GetEnabledConfig retrieves the active/enabled SSO configuration for a tenant. -func (s *Store) GetEnabledConfig(ctx context.Context, tenantID uuid.UUID) (*SSOConfig, error) { - var cfg SSOConfig - var providerType string - var roleMappingJSON []byte - - err := s.pool.QueryRow(ctx, ` - SELECT - id, tenant_id, provider_type, name, enabled, - oidc_issuer_url, oidc_client_id, oidc_client_secret, oidc_redirect_uri, oidc_scopes, - saml_entity_id, saml_sso_url, saml_certificate, saml_acs_url, - role_mapping, default_role_id, auto_provision, - created_at, updated_at - FROM sso_configurations - WHERE tenant_id = $1 AND enabled = true - LIMIT 1 - `, tenantID).Scan( - &cfg.ID, &cfg.TenantID, &providerType, &cfg.Name, &cfg.Enabled, - &cfg.OIDCIssuerURL, &cfg.OIDCClientID, &cfg.OIDCClientSecret, &cfg.OIDCRedirectURI, &cfg.OIDCScopes, - &cfg.SAMLEntityID, &cfg.SAMLSSOURL, &cfg.SAMLCertificate, &cfg.SAMLACS_URL, - &roleMappingJSON, &cfg.DefaultRoleID, &cfg.AutoProvision, - &cfg.CreatedAt, &cfg.UpdatedAt, - ) - if err == pgx.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("failed to get enabled sso configuration: %w", err) - } - - cfg.ProviderType = ProviderType(providerType) - cfg.RoleMapping = unmarshalRoleMapping(roleMappingJSON) - - return &cfg, nil -} - -// ============================================================================ -// SSO User Operations -// ============================================================================ - -// UpsertUser inserts or updates an SSO user via JIT provisioning. -// On conflict (tenant_id, sso_config_id, external_id), the user's email, -// display name, groups, and last login timestamp are updated. -func (s *Store) UpsertUser(ctx context.Context, tenantID, ssoConfigID uuid.UUID, externalID, email, displayName string, groups []string) (*SSOUser, error) { - now := time.Now().UTC() - id := uuid.New() - - var user SSOUser - err := s.pool.QueryRow(ctx, ` - INSERT INTO sso_users ( - id, tenant_id, sso_config_id, - external_id, email, display_name, groups, - last_login, is_active, - created_at, updated_at - ) VALUES ( - $1, $2, $3, - $4, $5, $6, $7, - $8, true, - $8, $8 - ) - ON CONFLICT (tenant_id, sso_config_id, external_id) DO UPDATE SET - email = EXCLUDED.email, - display_name = EXCLUDED.display_name, - groups = EXCLUDED.groups, - last_login = EXCLUDED.last_login, - is_active = true, - updated_at = EXCLUDED.updated_at - RETURNING - id, tenant_id, sso_config_id, - external_id, email, display_name, groups, - last_login, is_active, - created_at, updated_at - `, - id, tenantID, ssoConfigID, - externalID, email, displayName, groups, - now, - ).Scan( - &user.ID, &user.TenantID, &user.SSOConfigID, - &user.ExternalID, &user.Email, &user.DisplayName, &user.Groups, - &user.LastLogin, &user.IsActive, - &user.CreatedAt, &user.UpdatedAt, - ) - if err != nil { - return nil, fmt.Errorf("failed to upsert sso user: %w", err) - } - - return &user, nil -} - -// GetUserByExternalID looks up an SSO user by their external identity provider ID. -func (s *Store) GetUserByExternalID(ctx context.Context, tenantID, ssoConfigID uuid.UUID, externalID string) (*SSOUser, error) { - var user SSOUser - - err := s.pool.QueryRow(ctx, ` - SELECT - id, tenant_id, sso_config_id, - external_id, email, display_name, groups, - last_login, is_active, - created_at, updated_at - FROM sso_users - WHERE tenant_id = $1 AND sso_config_id = $2 AND external_id = $3 - `, tenantID, ssoConfigID, externalID).Scan( - &user.ID, &user.TenantID, &user.SSOConfigID, - &user.ExternalID, &user.Email, &user.DisplayName, &user.Groups, - &user.LastLogin, &user.IsActive, - &user.CreatedAt, &user.UpdatedAt, - ) - if err == pgx.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("failed to get sso user by external id: %w", err) - } - - return &user, nil -} - -// ListUsers lists all SSO-provisioned users for a tenant. -func (s *Store) ListUsers(ctx context.Context, tenantID uuid.UUID) ([]SSOUser, error) { - rows, err := s.pool.Query(ctx, ` - SELECT - id, tenant_id, sso_config_id, - external_id, email, display_name, groups, - last_login, is_active, - created_at, updated_at - FROM sso_users - WHERE tenant_id = $1 - ORDER BY display_name ASC - `, tenantID) - if err != nil { - return nil, fmt.Errorf("failed to list sso users: %w", err) - } - defer rows.Close() - - var users []SSOUser - for rows.Next() { - user, err := scanSSOUser(rows) - if err != nil { - return nil, err - } - users = append(users, *user) - } - - return users, nil -} - -// ============================================================================ -// Row Scanning Helpers -// ============================================================================ - -// scanSSOConfig scans an SSO configuration row from pgx.Rows. -func scanSSOConfig(rows pgx.Rows) (*SSOConfig, error) { - var cfg SSOConfig - var providerType string - var roleMappingJSON []byte - - err := rows.Scan( - &cfg.ID, &cfg.TenantID, &providerType, &cfg.Name, &cfg.Enabled, - &cfg.OIDCIssuerURL, &cfg.OIDCClientID, &cfg.OIDCClientSecret, &cfg.OIDCRedirectURI, &cfg.OIDCScopes, - &cfg.SAMLEntityID, &cfg.SAMLSSOURL, &cfg.SAMLCertificate, &cfg.SAMLACS_URL, - &roleMappingJSON, &cfg.DefaultRoleID, &cfg.AutoProvision, - &cfg.CreatedAt, &cfg.UpdatedAt, - ) - if err != nil { - return nil, fmt.Errorf("failed to scan sso configuration: %w", err) - } - - cfg.ProviderType = ProviderType(providerType) - cfg.RoleMapping = unmarshalRoleMapping(roleMappingJSON) - - return &cfg, nil -} - -// scanSSOUser scans an SSO user row from pgx.Rows. -func scanSSOUser(rows pgx.Rows) (*SSOUser, error) { - var user SSOUser - - err := rows.Scan( - &user.ID, &user.TenantID, &user.SSOConfigID, - &user.ExternalID, &user.Email, &user.DisplayName, &user.Groups, - &user.LastLogin, &user.IsActive, - &user.CreatedAt, &user.UpdatedAt, - ) - if err != nil { - return nil, fmt.Errorf("failed to scan sso user: %w", err) - } - - return &user, nil -} - -// unmarshalRoleMapping safely unmarshals JSONB role_mapping bytes into a map. -func unmarshalRoleMapping(data []byte) map[string]string { - if data == nil { - return map[string]string{} - } - var m map[string]string - if err := json.Unmarshal(data, &m); err != nil { - return map[string]string{} - } - return m -} diff --git a/backend-compliance/compliance/api/ai_routes.py b/backend-compliance/compliance/api/ai_routes.py index 7be91f0..875b0f8 100644 --- a/backend-compliance/compliance/api/ai_routes.py +++ b/backend-compliance/compliance/api/ai_routes.py @@ -36,7 +36,6 @@ async def list_ai_systems( db: Session = Depends(get_db), ): """List all registered AI systems.""" - import uuid as _uuid query = db.query(AISystemDB) if classification: @@ -88,7 +87,6 @@ async def create_ai_system( ): """Register a new AI system.""" import uuid as _uuid - from datetime import datetime try: cls_enum = AIClassificationEnum(data.classification) if data.classification else AIClassificationEnum.UNCLASSIFIED diff --git a/backend-compliance/compliance/api/audit_routes.py b/backend-compliance/compliance/api/audit_routes.py index b74053b..179fa9e 100644 --- a/backend-compliance/compliance/api/audit_routes.py +++ b/backend-compliance/compliance/api/audit_routes.py @@ -26,7 +26,7 @@ from ..db.models import ( ) from .schemas import ( CreateAuditSessionRequest, AuditSessionResponse, AuditSessionSummary, AuditSessionDetailResponse, - AuditSessionListResponse, SignOffRequest, SignOffResponse, + SignOffRequest, SignOffResponse, AuditChecklistItem, AuditChecklistResponse, AuditStatistics, PaginationMeta, ) @@ -164,7 +164,7 @@ async def get_audit_session( completion_percentage=session.completion_percentage, ) - return AuditSessionDetail( + return AuditSessionDetailResponse( id=session.id, name=session.name, description=session.description, diff --git a/backend-compliance/compliance/api/banner_routes.py b/backend-compliance/compliance/api/banner_routes.py index cfd456f..f57c89e 100644 --- a/backend-compliance/compliance/api/banner_routes.py +++ b/backend-compliance/compliance/api/banner_routes.py @@ -12,7 +12,6 @@ from typing import Optional, List from fastapi import APIRouter, Depends, HTTPException, Query, Header from pydantic import BaseModel from sqlalchemy.orm import Session -from sqlalchemy import func from classroom_engine.database import get_db from ..db.banner_models import ( @@ -317,12 +316,12 @@ async def get_site_config( categories = db.query(BannerCategoryConfigDB).filter( BannerCategoryConfigDB.site_config_id == config.id, - BannerCategoryConfigDB.is_active == True, + BannerCategoryConfigDB.is_active, ).order_by(BannerCategoryConfigDB.sort_order).all() vendors = db.query(BannerVendorConfigDB).filter( BannerVendorConfigDB.site_config_id == config.id, - BannerVendorConfigDB.is_active == True, + BannerVendorConfigDB.is_active, ).all() result = _site_config_to_dict(config) diff --git a/backend-compliance/compliance/api/change_request_engine.py b/backend-compliance/compliance/api/change_request_engine.py index 5cf399a..4faf763 100644 --- a/backend-compliance/compliance/api/change_request_engine.py +++ b/backend-compliance/compliance/api/change_request_engine.py @@ -96,8 +96,8 @@ def generate_change_requests_for_use_case( trigger_type="use_case_high_risk", target_document_type="dsfa", proposal_title=f"DSFA erstellen für '{title}' (Risiko: {risk_level})", - proposal_body=f"Ein neuer Use Case mit hohem Risiko wurde erstellt. " - f"Art. 35 DSGVO verlangt eine DSFA für Hochrisiko-Verarbeitungen.", + proposal_body="Ein neuer Use Case mit hohem Risiko wurde erstellt. " + "Art. 35 DSGVO verlangt eine DSFA für Hochrisiko-Verarbeitungen.", proposed_changes={ "source": "use_case", "title": title, diff --git a/backend-compliance/compliance/api/change_request_routes.py b/backend-compliance/compliance/api/change_request_routes.py index be4cc2c..fce7f06 100644 --- a/backend-compliance/compliance/api/change_request_routes.py +++ b/backend-compliance/compliance/api/change_request_routes.py @@ -14,8 +14,7 @@ Endpoints: import json import logging -from datetime import datetime -from typing import Optional, List +from typing import Optional from fastapi import APIRouter, Depends, HTTPException, Query, Header from pydantic import BaseModel diff --git a/backend-compliance/compliance/api/company_profile_routes.py b/backend-compliance/compliance/api/company_profile_routes.py index ec47300..0b44a86 100644 --- a/backend-compliance/compliance/api/company_profile_routes.py +++ b/backend-compliance/compliance/api/company_profile_routes.py @@ -11,7 +11,6 @@ Endpoints: import json import logging -import uuid from typing import Optional from fastapi import APIRouter, HTTPException, Header @@ -127,16 +126,68 @@ class AuditListResponse(BaseModel): # SQL column lists — keep in sync with SELECT/INSERT # ============================================================================= -_BASE_COLUMNS = """id, tenant_id, company_name, legal_form, industry, founded_year, - business_model, offerings, company_size, employee_count, annual_revenue, - headquarters_country, headquarters_city, has_international_locations, - international_countries, target_markets, primary_jurisdiction, - is_data_controller, is_data_processor, uses_ai, ai_use_cases, - dpo_name, dpo_email, legal_contact_name, legal_contact_email, - machine_builder, is_complete, completed_at, created_at, updated_at, - repos, document_sources, processing_systems, ai_systems, technical_contacts, - subject_to_nis2, subject_to_ai_act, subject_to_iso27001, - supervisory_authority, review_cycle_months""" +_BASE_COLUMNS_LIST = [ + "id", "tenant_id", "company_name", "legal_form", "industry", "founded_year", + "business_model", "offerings", "company_size", "employee_count", "annual_revenue", + "headquarters_country", "headquarters_city", "has_international_locations", + "international_countries", "target_markets", "primary_jurisdiction", + "is_data_controller", "is_data_processor", "uses_ai", "ai_use_cases", + "dpo_name", "dpo_email", "legal_contact_name", "legal_contact_email", + "machine_builder", "is_complete", "completed_at", "created_at", "updated_at", + "repos", "document_sources", "processing_systems", "ai_systems", "technical_contacts", + "subject_to_nis2", "subject_to_ai_act", "subject_to_iso27001", + "supervisory_authority", "review_cycle_months", +] + +_BASE_COLUMNS = ", ".join(_BASE_COLUMNS_LIST) + +# Per-field defaults and type coercions for row_to_response. +# Each entry is (field_name, default_value, expected_type_or_None). +# - expected_type: if set, the value is checked with isinstance; if it fails, +# default_value is used instead. +# - Special sentinels: "STR" means str(value), "STR_OR_NONE" means str(v) if v else None. +_FIELD_DEFAULTS = { + "id": (None, "STR"), + "tenant_id": (None, None), + "company_name": ("", None), + "legal_form": ("GmbH", None), + "industry": ("", None), + "founded_year": (None, None), + "business_model": ("B2B", None), + "offerings": ([], list), + "company_size": ("small", None), + "employee_count": ("1-9", None), + "annual_revenue": ("< 2 Mio", None), + "headquarters_country": ("DE", None), + "headquarters_city": ("", None), + "has_international_locations": (False, None), + "international_countries": ([], list), + "target_markets": (["DE"], list), + "primary_jurisdiction": ("DE", None), + "is_data_controller": (True, None), + "is_data_processor": (False, None), + "uses_ai": (False, None), + "ai_use_cases": ([], list), + "dpo_name": (None, None), + "dpo_email": (None, None), + "legal_contact_name": (None, None), + "legal_contact_email": (None, None), + "machine_builder": (None, dict), + "is_complete": (False, None), + "completed_at": (None, "STR_OR_NONE"), + "created_at": (None, "STR"), + "updated_at": (None, "STR"), + "repos": ([], list), + "document_sources": ([], list), + "processing_systems": ([], list), + "ai_systems": ([], list), + "technical_contacts": ([], list), + "subject_to_nis2": (False, None), + "subject_to_ai_act": (False, None), + "subject_to_iso27001": (False, None), + "supervisory_authority": (None, None), + "review_cycle_months": (12, None), +} # ============================================================================= @@ -144,50 +195,29 @@ _BASE_COLUMNS = """id, tenant_id, company_name, legal_form, industry, founded_ye # ============================================================================= def row_to_response(row) -> CompanyProfileResponse: - """Convert a DB row to response model.""" - return CompanyProfileResponse( - id=str(row[0]), - tenant_id=row[1], - company_name=row[2] or "", - legal_form=row[3] or "GmbH", - industry=row[4] or "", - founded_year=row[5], - business_model=row[6] or "B2B", - offerings=row[7] if isinstance(row[7], list) else [], - company_size=row[8] or "small", - employee_count=row[9] or "1-9", - annual_revenue=row[10] or "< 2 Mio", - headquarters_country=row[11] or "DE", - headquarters_city=row[12] or "", - has_international_locations=row[13] or False, - international_countries=row[14] if isinstance(row[14], list) else [], - target_markets=row[15] if isinstance(row[15], list) else ["DE"], - primary_jurisdiction=row[16] or "DE", - is_data_controller=row[17] if row[17] is not None else True, - is_data_processor=row[18] or False, - uses_ai=row[19] or False, - ai_use_cases=row[20] if isinstance(row[20], list) else [], - dpo_name=row[21], - dpo_email=row[22], - legal_contact_name=row[23], - legal_contact_email=row[24], - machine_builder=row[25] if isinstance(row[25], dict) else None, - is_complete=row[26] or False, - completed_at=str(row[27]) if row[27] else None, - created_at=str(row[28]), - updated_at=str(row[29]), - # Phase 2 fields (indices 30-39) - repos=row[30] if isinstance(row[30], list) else [], - document_sources=row[31] if isinstance(row[31], list) else [], - processing_systems=row[32] if isinstance(row[32], list) else [], - ai_systems=row[33] if isinstance(row[33], list) else [], - technical_contacts=row[34] if isinstance(row[34], list) else [], - subject_to_nis2=row[35] or False, - subject_to_ai_act=row[36] or False, - subject_to_iso27001=row[37] or False, - supervisory_authority=row[38], - review_cycle_months=row[39] or 12, - ) + """Convert a DB row to response model using zip-based column mapping.""" + raw = dict(zip(_BASE_COLUMNS_LIST, row)) + coerced: dict = {} + + for col in _BASE_COLUMNS_LIST: + default, expected_type = _FIELD_DEFAULTS[col] + value = raw[col] + + if expected_type == "STR": + coerced[col] = str(value) + elif expected_type == "STR_OR_NONE": + coerced[col] = str(value) if value else None + elif expected_type is not None: + # Type-checked field (list / dict): use value only if it matches + coerced[col] = value if isinstance(value, expected_type) else default + else: + # is_data_controller needs special None-check (True when NULL) + if col == "is_data_controller": + coerced[col] = value if value is not None else default + else: + coerced[col] = value or default if default is not None else value + + return CompanyProfileResponse(**coerced) def log_audit(db, tenant_id: str, action: str, changed_fields: Optional[dict], changed_by: Optional[str]): diff --git a/backend-compliance/compliance/api/consent_template_routes.py b/backend-compliance/compliance/api/consent_template_routes.py index a30dbd9..930da4c 100644 --- a/backend-compliance/compliance/api/consent_template_routes.py +++ b/backend-compliance/compliance/api/consent_template_routes.py @@ -12,7 +12,7 @@ Endpoints: import logging from datetime import datetime -from typing import Optional, List +from typing import Optional from fastapi import APIRouter, Depends, HTTPException, Header from pydantic import BaseModel diff --git a/backend-compliance/compliance/api/crud_factory.py b/backend-compliance/compliance/api/crud_factory.py index 6099851..3e5de2d 100644 --- a/backend-compliance/compliance/api/crud_factory.py +++ b/backend-compliance/compliance/api/crud_factory.py @@ -21,7 +21,7 @@ Usage: import logging from datetime import datetime -from typing import Any, Dict, List, Optional, Callable +from typing import Any, Dict, List, Optional from fastapi import APIRouter, Depends, HTTPException, Query from sqlalchemy import text diff --git a/backend-compliance/compliance/api/document_templates/loeschfristen_template.py b/backend-compliance/compliance/api/document_templates/loeschfristen_template.py index b97a748..6e638c9 100644 --- a/backend-compliance/compliance/api/document_templates/loeschfristen_template.py +++ b/backend-compliance/compliance/api/document_templates/loeschfristen_template.py @@ -42,7 +42,7 @@ def generate_loeschfristen_drafts(ctx: dict) -> list[dict]: "responsible": ctx.get("dpo_name", "DSB"), "status": "draft", "review_cycle_months": ctx.get("review_cycle_months", 12), - "notes": f"Automatisch generiert aus Stammdaten. Bitte prüfen und anpassen.", + "notes": "Automatisch generiert aus Stammdaten. Bitte prüfen und anpassen.", } policies.append(policy) diff --git a/backend-compliance/compliance/api/document_templates/tom_template.py b/backend-compliance/compliance/api/document_templates/tom_template.py index 93b6b90..8b4f914 100644 --- a/backend-compliance/compliance/api/document_templates/tom_template.py +++ b/backend-compliance/compliance/api/document_templates/tom_template.py @@ -51,7 +51,6 @@ def generate_tom_drafts(ctx: dict) -> list[dict]: measures.extend(_AI_ACT_TOMS) # Enrich with metadata - company = ctx.get("company_name", "") result = [] for i, m in enumerate(measures, 1): result.append({ diff --git a/backend-compliance/compliance/api/dsfa_routes.py b/backend-compliance/compliance/api/dsfa_routes.py index 3906818..dcd9ce7 100644 --- a/backend-compliance/compliance/api/dsfa_routes.py +++ b/backend-compliance/compliance/api/dsfa_routes.py @@ -33,7 +33,6 @@ from classroom_engine.database import get_db logger = logging.getLogger(__name__) router = APIRouter(prefix="/dsfa", tags=["compliance-dsfa"]) -from .tenant_utils import get_tenant_id as _shared_get_tenant_id # Legacy compat — still used by _get_tenant_id() below; will be removed once # all call-sites switch to Depends(get_tenant_id). @@ -855,7 +854,7 @@ async def approve_dsfa( if request.approved: new_status = "approved" - row = db.execute( + db.execute( text(""" UPDATE compliance_dsfas SET status = 'approved', approved_by = :approved_by, approved_at = NOW(), updated_at = NOW() @@ -866,7 +865,7 @@ async def approve_dsfa( ).fetchone() else: new_status = "needs-update" - row = db.execute( + db.execute( text(""" UPDATE compliance_dsfas SET status = 'needs-update', updated_at = NOW() diff --git a/backend-compliance/compliance/api/dsr_routes.py b/backend-compliance/compliance/api/dsr_routes.py index 0a4db34..776de71 100644 --- a/backend-compliance/compliance/api/dsr_routes.py +++ b/backend-compliance/compliance/api/dsr_routes.py @@ -14,7 +14,7 @@ from fastapi import APIRouter, Depends, HTTPException, Query, Header from fastapi.responses import StreamingResponse from pydantic import BaseModel from sqlalchemy.orm import Session -from sqlalchemy import text, func, and_, or_, cast, String +from sqlalchemy import text, func, and_, or_ from classroom_engine.database import get_db from ..db.dsr_models import ( @@ -574,7 +574,7 @@ async def get_published_templates( """Gibt publizierte Vorlagen zurueck.""" query = db.query(DSRTemplateDB).filter( DSRTemplateDB.tenant_id == uuid.UUID(tenant_id), - DSRTemplateDB.is_active == True, + DSRTemplateDB.is_active, DSRTemplateDB.language == language, ) if request_type: diff --git a/backend-compliance/compliance/api/email_template_routes.py b/backend-compliance/compliance/api/email_template_routes.py index 4498bbb..2af10a5 100644 --- a/backend-compliance/compliance/api/email_template_routes.py +++ b/backend-compliance/compliance/api/email_template_routes.py @@ -6,14 +6,12 @@ Inklusive Versionierung, Approval-Workflow, Vorschau und Send-Logging. """ import uuid -import re from datetime import datetime -from typing import Optional, List, Dict, Any +from typing import Optional, Dict from fastapi import APIRouter, Depends, HTTPException, Query, Header from pydantic import BaseModel from sqlalchemy.orm import Session -from sqlalchemy import func from classroom_engine.database import get_db from ..db.email_template_models import ( @@ -182,7 +180,7 @@ async def get_stats( base = db.query(EmailTemplateDB).filter(EmailTemplateDB.tenant_id == tid) total = base.count() - active = base.filter(EmailTemplateDB.is_active == True).count() + active = base.filter(EmailTemplateDB.is_active).count() # Count templates with published versions published_count = 0 diff --git a/backend-compliance/compliance/api/evidence_routes.py b/backend-compliance/compliance/api/evidence_routes.py index 3706410..4c202a1 100644 --- a/backend-compliance/compliance/api/evidence_routes.py +++ b/backend-compliance/compliance/api/evidence_routes.py @@ -248,7 +248,231 @@ async def upload_evidence( # ============================================================================ -# CI/CD Evidence Collection +# CI/CD Evidence Collection — helpers +# ============================================================================ + +# Map CI source names to the corresponding control IDs +SOURCE_CONTROL_MAP = { + "sast": "SDLC-001", + "dependency_scan": "SDLC-002", + "secret_scan": "SDLC-003", + "code_review": "SDLC-004", + "sbom": "SDLC-005", + "container_scan": "SDLC-006", + "test_results": "AUD-001", +} + + +def _parse_ci_evidence(data: dict) -> dict: + """ + Parse and validate incoming CI evidence data. + + Returns a dict with: + - report_json: str (serialised JSON) + - report_hash: str (SHA-256 hex digest) + - evidence_status: str ("valid" or "failed") + - findings_count: int + - critical_findings: int + """ + report_json = json.dumps(data) if data else "{}" + report_hash = hashlib.sha256(report_json.encode()).hexdigest() + + findings_count = 0 + critical_findings = 0 + + if data and isinstance(data, dict): + # Semgrep format + if "results" in data: + findings_count = len(data.get("results", [])) + critical_findings = len([ + r for r in data.get("results", []) + if r.get("extra", {}).get("severity", "").upper() in ["CRITICAL", "HIGH"] + ]) + + # Trivy format + elif "Results" in data: + for result in data.get("Results", []): + vulns = result.get("Vulnerabilities", []) + findings_count += len(vulns) + critical_findings += len([ + v for v in vulns + if v.get("Severity", "").upper() in ["CRITICAL", "HIGH"] + ]) + + # Generic findings array + elif "findings" in data: + findings_count = len(data.get("findings", [])) + + # SBOM format - just count components + elif "components" in data: + findings_count = len(data.get("components", [])) + + evidence_status = "failed" if critical_findings > 0 else "valid" + + return { + "report_json": report_json, + "report_hash": report_hash, + "evidence_status": evidence_status, + "findings_count": findings_count, + "critical_findings": critical_findings, + } + + +def _store_evidence( + db: Session, + *, + control_db_id: str, + source: str, + parsed: dict, + ci_job_id: str, + ci_job_url: str, + report_data: dict, +) -> EvidenceDB: + """ + Persist a CI evidence item to the database and write the report file. + + Returns the created EvidenceDB instance (already committed). + """ + findings_count = parsed["findings_count"] + critical_findings = parsed["critical_findings"] + + # Build title and description + title = f"{source.upper()} Report - {datetime.now().strftime('%Y-%m-%d %H:%M')}" + description = "Automatically collected from CI/CD pipeline" + if findings_count > 0: + description += f"\n- Total findings: {findings_count}" + if critical_findings > 0: + description += f"\n- Critical/High findings: {critical_findings}" + if ci_job_id: + description += f"\n- CI Job ID: {ci_job_id}" + if ci_job_url: + description += f"\n- CI Job URL: {ci_job_url}" + + # Store report file + upload_dir = f"/tmp/compliance_evidence/ci/{source}" + os.makedirs(upload_dir, exist_ok=True) + file_name = f"{source}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{parsed['report_hash'][:8]}.json" + file_path = os.path.join(upload_dir, file_name) + + with open(file_path, "w") as f: + json.dump(report_data or {}, f, indent=2) + + # Create evidence record + evidence = EvidenceDB( + id=str(uuid_module.uuid4()), + control_id=control_db_id, + evidence_type=f"ci_{source}", + title=title, + description=description, + artifact_path=file_path, + artifact_hash=parsed["report_hash"], + file_size_bytes=len(parsed["report_json"]), + mime_type="application/json", + source="ci_pipeline", + ci_job_id=ci_job_id, + valid_from=datetime.utcnow(), + valid_until=datetime.utcnow() + timedelta(days=90), + status=EvidenceStatusEnum(parsed["evidence_status"]), + ) + db.add(evidence) + db.commit() + db.refresh(evidence) + + return evidence + + +def _extract_findings_detail(report_data: dict) -> dict: + """ + Extract severity-bucketed finding counts from report data. + + Returns dict with keys: critical, high, medium, low. + """ + findings_detail = { + "critical": 0, + "high": 0, + "medium": 0, + "low": 0, + } + + if not report_data: + return findings_detail + + # Semgrep format + if "results" in report_data: + for r in report_data.get("results", []): + severity = r.get("extra", {}).get("severity", "").upper() + if severity == "CRITICAL": + findings_detail["critical"] += 1 + elif severity == "HIGH": + findings_detail["high"] += 1 + elif severity == "MEDIUM": + findings_detail["medium"] += 1 + elif severity in ["LOW", "INFO"]: + findings_detail["low"] += 1 + + # Trivy format + elif "Results" in report_data: + for result in report_data.get("Results", []): + for v in result.get("Vulnerabilities", []): + severity = v.get("Severity", "").upper() + if severity == "CRITICAL": + findings_detail["critical"] += 1 + elif severity == "HIGH": + findings_detail["high"] += 1 + elif severity == "MEDIUM": + findings_detail["medium"] += 1 + elif severity == "LOW": + findings_detail["low"] += 1 + + # Generic findings with severity + elif "findings" in report_data: + for f in report_data.get("findings", []): + severity = f.get("severity", "").upper() + if severity == "CRITICAL": + findings_detail["critical"] += 1 + elif severity == "HIGH": + findings_detail["high"] += 1 + elif severity == "MEDIUM": + findings_detail["medium"] += 1 + else: + findings_detail["low"] += 1 + + return findings_detail + + +def _update_risks(db: Session, *, source: str, control_id: str, ci_job_id: str, report_data: dict): + """ + Update risk status based on new evidence. + + Uses AutoRiskUpdater to update Control status and linked Risks based on + severity-bucketed findings. Returns the update result or None on error. + """ + findings_detail = _extract_findings_detail(report_data) + + try: + auto_updater = AutoRiskUpdater(db) + risk_update_result = auto_updater.process_evidence_collect_request( + tool=source, + control_id=control_id, + evidence_type=f"ci_{source}", + timestamp=datetime.utcnow().isoformat(), + commit_sha=report_data.get("commit_sha", "unknown") if report_data else "unknown", + ci_job_id=ci_job_id, + findings=findings_detail, + ) + + logger.info(f"Auto-risk update completed for {control_id}: " + f"control_updated={risk_update_result.control_updated}, " + f"risks_affected={len(risk_update_result.risks_affected)}") + + return risk_update_result + except Exception as e: + logger.error(f"Auto-risk update failed for {control_id}: {str(e)}") + return None + + +# ============================================================================ +# CI/CD Evidence Collection — endpoint # ============================================================================ @router.post("/evidence/collect") @@ -274,17 +498,6 @@ async def collect_ci_evidence( - secret_scan: Secret detection (Gitleaks, TruffleHog) - code_review: Code review metrics """ - # Map source to control_id - SOURCE_CONTROL_MAP = { - "sast": "SDLC-001", - "dependency_scan": "SDLC-002", - "secret_scan": "SDLC-003", - "code_review": "SDLC-004", - "sbom": "SDLC-005", - "container_scan": "SDLC-006", - "test_results": "AUD-001", - } - if source not in SOURCE_CONTROL_MAP: raise HTTPException( status_code=400, @@ -302,173 +515,38 @@ async def collect_ci_evidence( detail=f"Control {control_id} not found. Please seed the database first." ) - # Parse and validate report data - report_json = json.dumps(report_data) if report_data else "{}" - report_hash = hashlib.sha256(report_json.encode()).hexdigest() + # --- 1. Parse and validate report data --- + parsed = _parse_ci_evidence(report_data) - # Determine evidence status based on report content - evidence_status = "valid" - findings_count = 0 - critical_findings = 0 - - if report_data: - # Try to extract findings from common report formats - if isinstance(report_data, dict): - # Semgrep format - if "results" in report_data: - findings_count = len(report_data.get("results", [])) - critical_findings = len([ - r for r in report_data.get("results", []) - if r.get("extra", {}).get("severity", "").upper() in ["CRITICAL", "HIGH"] - ]) - - # Trivy format - elif "Results" in report_data: - for result in report_data.get("Results", []): - vulns = result.get("Vulnerabilities", []) - findings_count += len(vulns) - critical_findings += len([ - v for v in vulns - if v.get("Severity", "").upper() in ["CRITICAL", "HIGH"] - ]) - - # Generic findings array - elif "findings" in report_data: - findings_count = len(report_data.get("findings", [])) - - # SBOM format - just count components - elif "components" in report_data: - findings_count = len(report_data.get("components", [])) - - # If critical findings exist, mark as failed - if critical_findings > 0: - evidence_status = "failed" - - # Create evidence title - title = f"{source.upper()} Report - {datetime.now().strftime('%Y-%m-%d %H:%M')}" - description = f"Automatically collected from CI/CD pipeline" - if findings_count > 0: - description += f"\n- Total findings: {findings_count}" - if critical_findings > 0: - description += f"\n- Critical/High findings: {critical_findings}" - if ci_job_id: - description += f"\n- CI Job ID: {ci_job_id}" - if ci_job_url: - description += f"\n- CI Job URL: {ci_job_url}" - - # Store report file - upload_dir = f"/tmp/compliance_evidence/ci/{source}" - os.makedirs(upload_dir, exist_ok=True) - file_name = f"{source}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{report_hash[:8]}.json" - file_path = os.path.join(upload_dir, file_name) - - with open(file_path, "w") as f: - json.dump(report_data or {}, f, indent=2) - - # Create evidence record directly - evidence = EvidenceDB( - id=str(uuid_module.uuid4()), - control_id=control.id, - evidence_type=f"ci_{source}", - title=title, - description=description, - artifact_path=file_path, - artifact_hash=report_hash, - file_size_bytes=len(report_json), - mime_type="application/json", - source="ci_pipeline", + # --- 2. Store evidence in DB and write report file --- + evidence = _store_evidence( + db, + control_db_id=control.id, + source=source, + parsed=parsed, ci_job_id=ci_job_id, - valid_from=datetime.utcnow(), - valid_until=datetime.utcnow() + timedelta(days=90), - status=EvidenceStatusEnum(evidence_status), + ci_job_url=ci_job_url, + report_data=report_data, ) - db.add(evidence) - db.commit() - db.refresh(evidence) - # ========================================================================= - # AUTOMATIC RISK UPDATE - # Update Control status and linked Risks based on findings - # ========================================================================= - risk_update_result = None - try: - # Extract detailed findings for risk assessment - findings_detail = { - "critical": 0, - "high": 0, - "medium": 0, - "low": 0, - } - - if report_data: - # Semgrep format - if "results" in report_data: - for r in report_data.get("results", []): - severity = r.get("extra", {}).get("severity", "").upper() - if severity == "CRITICAL": - findings_detail["critical"] += 1 - elif severity == "HIGH": - findings_detail["high"] += 1 - elif severity == "MEDIUM": - findings_detail["medium"] += 1 - elif severity in ["LOW", "INFO"]: - findings_detail["low"] += 1 - - # Trivy format - elif "Results" in report_data: - for result in report_data.get("Results", []): - for v in result.get("Vulnerabilities", []): - severity = v.get("Severity", "").upper() - if severity == "CRITICAL": - findings_detail["critical"] += 1 - elif severity == "HIGH": - findings_detail["high"] += 1 - elif severity == "MEDIUM": - findings_detail["medium"] += 1 - elif severity == "LOW": - findings_detail["low"] += 1 - - # Generic findings with severity - elif "findings" in report_data: - for f in report_data.get("findings", []): - severity = f.get("severity", "").upper() - if severity == "CRITICAL": - findings_detail["critical"] += 1 - elif severity == "HIGH": - findings_detail["high"] += 1 - elif severity == "MEDIUM": - findings_detail["medium"] += 1 - else: - findings_detail["low"] += 1 - - # Use AutoRiskUpdater to update Control status and Risks - auto_updater = AutoRiskUpdater(db) - risk_update_result = auto_updater.process_evidence_collect_request( - tool=source, - control_id=control_id, - evidence_type=f"ci_{source}", - timestamp=datetime.utcnow().isoformat(), - commit_sha=report_data.get("commit_sha", "unknown") if report_data else "unknown", - ci_job_id=ci_job_id, - findings=findings_detail, - ) - - logger.info(f"Auto-risk update completed for {control_id}: " - f"control_updated={risk_update_result.control_updated}, " - f"risks_affected={len(risk_update_result.risks_affected)}") - - except Exception as e: - logger.error(f"Auto-risk update failed for {control_id}: {str(e)}") + # --- 3. Automatic risk update --- + risk_update_result = _update_risks( + db, + source=source, + control_id=control_id, + ci_job_id=ci_job_id, + report_data=report_data, + ) return { "success": True, "evidence_id": evidence.id, "control_id": control_id, "source": source, - "status": evidence_status, - "findings_count": findings_count, - "critical_findings": critical_findings, - "artifact_path": file_path, + "status": parsed["evidence_status"], + "findings_count": parsed["findings_count"], + "critical_findings": parsed["critical_findings"], + "artifact_path": evidence.artifact_path, "message": f"Evidence collected successfully for control {control_id}", "auto_risk_update": { "enabled": True, diff --git a/backend-compliance/compliance/api/extraction_routes.py b/backend-compliance/compliance/api/extraction_routes.py index 9f6344b..fd2bcc3 100644 --- a/backend-compliance/compliance/api/extraction_routes.py +++ b/backend-compliance/compliance/api/extraction_routes.py @@ -20,13 +20,13 @@ import asyncio from typing import Optional, List, Dict from datetime import datetime -from fastapi import APIRouter, Depends, Query +from fastapi import APIRouter, Depends from pydantic import BaseModel from sqlalchemy.orm import Session from classroom_engine.database import get_db from ..db import RegulationRepository, RequirementRepository -from ..db.models import RegulationDB, RequirementDB, RegulationTypeEnum +from ..db.models import RegulationDB, RegulationTypeEnum from ..services.rag_client import get_rag_client, RAGSearchResult logger = logging.getLogger(__name__) @@ -185,6 +185,169 @@ def _build_existing_articles( return {r.article for r in existing} +# --------------------------------------------------------------------------- +# Extraction helpers — independently testable +# --------------------------------------------------------------------------- + +def _parse_rag_results( + all_results: List[RAGSearchResult], + regulation_codes: Optional[List[str]] = None, +) -> dict: + """ + Filter, deduplicate, and group RAG search results by regulation code. + + Returns a dict with: + - deduped_by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] + - skipped_no_article: List[RAGSearchResult] + - unique_count: int + """ + # Filter by regulation_codes if requested + if regulation_codes: + all_results = [ + r for r in all_results + if r.regulation_code in regulation_codes + ] + + # Deduplicate at result level (regulation_code + article) + seen: set[tuple[str, str]] = set() + unique_count = 0 + for r in sorted(all_results, key=lambda x: x.score, reverse=True): + article = _normalize_article(r) + if not article: + continue + key = (r.regulation_code, article) + if key not in seen: + seen.add(key) + unique_count += 1 + + # Group by regulation_code + by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] = {} + skipped_no_article: List[RAGSearchResult] = [] + + for r in all_results: + article = _normalize_article(r) + if not article: + skipped_no_article.append(r) + continue + key_r = r.regulation_code or "UNKNOWN" + if key_r not in by_reg: + by_reg[key_r] = [] + by_reg[key_r].append((article, r)) + + # Deduplicate within groups + deduped_by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] = {} + for reg_code, items in by_reg.items(): + seen_articles: set[str] = set() + deduped: List[tuple[str, RAGSearchResult]] = [] + for art, r in sorted(items, key=lambda x: x[1].score, reverse=True): + if art not in seen_articles: + seen_articles.add(art) + deduped.append((art, r)) + deduped_by_reg[reg_code] = deduped + + return { + "deduped_by_reg": deduped_by_reg, + "skipped_no_article": skipped_no_article, + "unique_count": unique_count, + } + + +def _store_requirements( + db: Session, + deduped_by_reg: Dict[str, List[tuple[str, "RAGSearchResult"]]], + dry_run: bool, +) -> dict: + """ + Persist extracted requirements to the database (or simulate in dry_run mode). + + Returns a dict with: + - created_count: int + - skipped_dup_count: int + - failed_count: int + - result_items: List[ExtractedRequirement] + """ + req_repo = RequirementRepository(db) + created_count = 0 + skipped_dup_count = 0 + failed_count = 0 + result_items: List[ExtractedRequirement] = [] + + for reg_code, items in deduped_by_reg.items(): + if not items: + continue + + # Find or create regulation + try: + first_result = items[0][1] + regulation_name = first_result.regulation_name or first_result.regulation_short or reg_code + if dry_run: + # For dry_run, fake a regulation id + regulation_id = f"dry-run-{reg_code}" + existing_articles: set[str] = set() + else: + reg = _get_or_create_regulation(db, reg_code, regulation_name) + regulation_id = reg.id + existing_articles = _build_existing_articles(db, regulation_id) + except Exception as e: + logger.error("Failed to get/create regulation %s: %s", reg_code, e) + failed_count += len(items) + continue + + for article, r in items: + title = _derive_title(r.text, article) + + if article in existing_articles: + skipped_dup_count += 1 + result_items.append(ExtractedRequirement( + regulation_code=reg_code, + article=article, + title=title, + requirement_text=r.text[:1000], + source_url=r.source_url, + score=r.score, + action="skipped_duplicate", + )) + continue + + if not dry_run: + try: + req_repo.create( + regulation_id=regulation_id, + article=article, + title=title, + description=f"Extrahiert aus RAG-Korpus (Collection: {r.category or r.regulation_code}). Score: {r.score:.2f}", + requirement_text=r.text[:2000], + breakpilot_interpretation=None, + is_applicable=True, + priority=2, + ) + existing_articles.add(article) # prevent intra-batch duplication + created_count += 1 + except Exception as e: + logger.error("Failed to create requirement %s/%s: %s", reg_code, article, e) + failed_count += 1 + continue + else: + created_count += 1 # dry_run: count as would-create + + result_items.append(ExtractedRequirement( + regulation_code=reg_code, + article=article, + title=title, + requirement_text=r.text[:1000], + source_url=r.source_url, + score=r.score, + action="created" if not dry_run else "would_create", + )) + + return { + "created_count": created_count, + "skipped_dup_count": skipped_dup_count, + "failed_count": failed_count, + "result_items": result_items, + } + + # --------------------------------------------------------------------------- # Endpoint # --------------------------------------------------------------------------- @@ -225,126 +388,19 @@ async def extract_requirements_from_rag( logger.info("RAG extraction: %d raw results from %d collections", len(all_results), len(collections)) - # --- 2. Filter by regulation_codes if requested --- - if body.regulation_codes: - all_results = [ - r for r in all_results - if r.regulation_code in body.regulation_codes - ] + # --- 2. Parse, filter, deduplicate, and group --- + parsed = _parse_rag_results(all_results, body.regulation_codes) + deduped_by_reg = parsed["deduped_by_reg"] + skipped_no_article = parsed["skipped_no_article"] - # --- 3. Deduplicate at result level (regulation_code + article) --- - seen: set[tuple[str, str]] = set() - unique_results: List[RAGSearchResult] = [] - for r in sorted(all_results, key=lambda x: x.score, reverse=True): - article = _normalize_article(r) - if not article: - continue - key = (r.regulation_code, article) - if key not in seen: - seen.add(key) - unique_results.append(r) + logger.info("RAG extraction: %d unique (regulation, article) pairs", parsed["unique_count"]) - logger.info("RAG extraction: %d unique (regulation, article) pairs", len(unique_results)) - - # --- 4. Group by regulation_code and process --- - by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] = {} - skipped_no_article: List[RAGSearchResult] = [] - - for r in all_results: - article = _normalize_article(r) - if not article: - skipped_no_article.append(r) - continue - key_r = r.regulation_code or "UNKNOWN" - if key_r not in by_reg: - by_reg[key_r] = [] - by_reg[key_r].append((article, r)) - - # Deduplicate within groups - deduped_by_reg: Dict[str, List[tuple[str, RAGSearchResult]]] = {} - for reg_code, items in by_reg.items(): - seen_articles: set[str] = set() - deduped: List[tuple[str, RAGSearchResult]] = [] - for art, r in sorted(items, key=lambda x: x[1].score, reverse=True): - if art not in seen_articles: - seen_articles.add(art) - deduped.append((art, r)) - deduped_by_reg[reg_code] = deduped - - # --- 5. Create requirements --- - req_repo = RequirementRepository(db) - created_count = 0 - skipped_dup_count = 0 - failed_count = 0 - result_items: List[ExtractedRequirement] = [] - - for reg_code, items in deduped_by_reg.items(): - if not items: - continue - - # Find or create regulation - try: - first_result = items[0][1] - regulation_name = first_result.regulation_name or first_result.regulation_short or reg_code - if body.dry_run: - # For dry_run, fake a regulation id - regulation_id = f"dry-run-{reg_code}" - existing_articles: set[str] = set() - else: - reg = _get_or_create_regulation(db, reg_code, regulation_name) - regulation_id = reg.id - existing_articles = _build_existing_articles(db, regulation_id) - except Exception as e: - logger.error("Failed to get/create regulation %s: %s", reg_code, e) - failed_count += len(items) - continue - - for article, r in items: - title = _derive_title(r.text, article) - - if article in existing_articles: - skipped_dup_count += 1 - result_items.append(ExtractedRequirement( - regulation_code=reg_code, - article=article, - title=title, - requirement_text=r.text[:1000], - source_url=r.source_url, - score=r.score, - action="skipped_duplicate", - )) - continue - - if not body.dry_run: - try: - req_repo.create( - regulation_id=regulation_id, - article=article, - title=title, - description=f"Extrahiert aus RAG-Korpus (Collection: {r.category or r.regulation_code}). Score: {r.score:.2f}", - requirement_text=r.text[:2000], - breakpilot_interpretation=None, - is_applicable=True, - priority=2, - ) - existing_articles.add(article) # prevent intra-batch duplication - created_count += 1 - except Exception as e: - logger.error("Failed to create requirement %s/%s: %s", reg_code, article, e) - failed_count += 1 - continue - else: - created_count += 1 # dry_run: count as would-create - - result_items.append(ExtractedRequirement( - regulation_code=reg_code, - article=article, - title=title, - requirement_text=r.text[:1000], - source_url=r.source_url, - score=r.score, - action="created" if not body.dry_run else "would_create", - )) + # --- 3. Create requirements --- + store_result = _store_requirements(db, deduped_by_reg, body.dry_run) + created_count = store_result["created_count"] + skipped_dup_count = store_result["skipped_dup_count"] + failed_count = store_result["failed_count"] + result_items = store_result["result_items"] message = ( f"{'[DRY RUN] ' if body.dry_run else ''}" diff --git a/backend-compliance/compliance/api/incident_routes.py b/backend-compliance/compliance/api/incident_routes.py index c5dbfe0..01b44f6 100644 --- a/backend-compliance/compliance/api/incident_routes.py +++ b/backend-compliance/compliance/api/incident_routes.py @@ -24,7 +24,7 @@ Endpoints: import json import logging from datetime import datetime, timedelta, timezone -from typing import Optional, List, Any +from typing import Optional, List from uuid import UUID, uuid4 from fastapi import APIRouter, Depends, HTTPException, Query, Header diff --git a/backend-compliance/compliance/api/isms_routes.py b/backend-compliance/compliance/api/isms_routes.py index 25c93a0..96825e5 100644 --- a/backend-compliance/compliance/api/isms_routes.py +++ b/backend-compliance/compliance/api/isms_routes.py @@ -14,7 +14,7 @@ Provides endpoints for ISO 27001 certification-ready ISMS management: import uuid import hashlib from datetime import datetime, date -from typing import Optional, List +from typing import Optional from fastapi import APIRouter, HTTPException, Query, Depends from sqlalchemy.orm import Session @@ -53,7 +53,7 @@ from .schemas import ( # Readiness ISMSReadinessCheckResponse, ISMSReadinessCheckRequest, PotentialFinding, # Audit Trail - AuditTrailResponse, AuditTrailEntry, PaginationMeta, + AuditTrailResponse, PaginationMeta, # Overview ISO27001OverviewResponse, ISO27001ChapterStatus ) @@ -673,10 +673,6 @@ async def list_findings( ofi_count = sum(1 for f in findings if f.finding_type == FindingTypeEnum.OFI) open_count = sum(1 for f in findings if f.status != FindingStatusEnum.CLOSED) - # Add is_blocking property to each finding - for f in findings: - f.is_blocking = f.finding_type == FindingTypeEnum.MAJOR and f.status != FindingStatusEnum.CLOSED - return AuditFindingListResponse( findings=findings, total=len(findings), @@ -746,7 +742,6 @@ async def create_finding(data: AuditFindingCreate, db: Session = Depends(get_db) db.commit() db.refresh(finding) - finding.is_blocking = finding.finding_type == FindingTypeEnum.MAJOR return finding @@ -775,7 +770,6 @@ async def update_finding( db.commit() db.refresh(finding) - finding.is_blocking = finding.finding_type == FindingTypeEnum.MAJOR and finding.status != FindingStatusEnum.CLOSED return finding @@ -824,7 +818,6 @@ async def close_finding( db.commit() db.refresh(finding) - finding.is_blocking = False return finding @@ -1271,10 +1264,9 @@ async def run_readiness_check( # Chapter 6: Planning - Risk Assessment from ..db.models import RiskDB - risks = db.query(RiskDB).filter(RiskDB.status == "open").count() risks_without_treatment = db.query(RiskDB).filter( RiskDB.status == "open", - RiskDB.treatment_plan == None + RiskDB.treatment_plan is None ).count() if risks_without_treatment > 0: potential_majors.append(PotentialFinding( @@ -1299,7 +1291,7 @@ async def run_readiness_check( # SoA soa_total = db.query(StatementOfApplicabilityDB).count() soa_unapproved = db.query(StatementOfApplicabilityDB).filter( - StatementOfApplicabilityDB.approved_at == None + StatementOfApplicabilityDB.approved_at is None ).count() if soa_total == 0: potential_majors.append(PotentialFinding( @@ -1525,7 +1517,7 @@ async def get_iso27001_overview(db: Session = Depends(get_db)): soa_total = db.query(StatementOfApplicabilityDB).count() soa_approved = db.query(StatementOfApplicabilityDB).filter( - StatementOfApplicabilityDB.approved_at != None + StatementOfApplicabilityDB.approved_at is not None ).count() soa_all_approved = soa_total > 0 and soa_approved == soa_total diff --git a/backend-compliance/compliance/api/legal_document_routes.py b/backend-compliance/compliance/api/legal_document_routes.py index 5233f5c..dd5286f 100644 --- a/backend-compliance/compliance/api/legal_document_routes.py +++ b/backend-compliance/compliance/api/legal_document_routes.py @@ -671,7 +671,7 @@ async def get_my_consents( .filter( UserConsentDB.tenant_id == tid, UserConsentDB.user_id == user_id, - UserConsentDB.withdrawn_at == None, + UserConsentDB.withdrawn_at is None, ) .order_by(UserConsentDB.consented_at.desc()) .all() @@ -694,8 +694,8 @@ async def check_consent( UserConsentDB.tenant_id == tid, UserConsentDB.user_id == user_id, UserConsentDB.document_type == document_type, - UserConsentDB.consented == True, - UserConsentDB.withdrawn_at == None, + UserConsentDB.consented, + UserConsentDB.withdrawn_at is None, ) .order_by(UserConsentDB.consented_at.desc()) .first() @@ -757,10 +757,10 @@ async def get_consent_stats( total = base.count() active = base.filter( - UserConsentDB.consented == True, - UserConsentDB.withdrawn_at == None, + UserConsentDB.consented, + UserConsentDB.withdrawn_at is None, ).count() - withdrawn = base.filter(UserConsentDB.withdrawn_at != None).count() + withdrawn = base.filter(UserConsentDB.withdrawn_at is not None).count() # By document type by_type = {} diff --git a/backend-compliance/compliance/api/legal_template_routes.py b/backend-compliance/compliance/api/legal_template_routes.py index 7bad5cc..9fb14b2 100644 --- a/backend-compliance/compliance/api/legal_template_routes.py +++ b/backend-compliance/compliance/api/legal_template_routes.py @@ -314,9 +314,9 @@ async def update_legal_template( raise HTTPException(status_code=400, detail="No fields to update") if "document_type" in updates and updates["document_type"] not in VALID_DOCUMENT_TYPES: - raise HTTPException(status_code=400, detail=f"Invalid document_type") + raise HTTPException(status_code=400, detail="Invalid document_type") if "status" in updates and updates["status"] not in VALID_STATUSES: - raise HTTPException(status_code=400, detail=f"Invalid status") + raise HTTPException(status_code=400, detail="Invalid status") set_clauses = ["updated_at = :updated_at"] params: Dict[str, Any] = { diff --git a/backend-compliance/compliance/api/routes.py b/backend-compliance/compliance/api/routes.py index 2721f30..4edbec9 100644 --- a/backend-compliance/compliance/api/routes.py +++ b/backend-compliance/compliance/api/routes.py @@ -16,11 +16,10 @@ import logging logger = logging.getLogger(__name__) import os -from datetime import datetime, timedelta -from typing import Optional, List +from datetime import datetime +from typing import Optional -from pydantic import BaseModel -from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Query, BackgroundTasks +from fastapi import APIRouter, Depends, HTTPException, Query, BackgroundTasks from fastapi.responses import FileResponse from sqlalchemy.orm import Session @@ -31,22 +30,16 @@ from ..db import ( RequirementRepository, ControlRepository, EvidenceRepository, - RiskRepository, - AuditExportRepository, ControlStatusEnum, ControlDomainEnum, - RiskLevelEnum, - EvidenceStatusEnum, ) from ..db.models import EvidenceDB, ControlDB from ..services.seeder import ComplianceSeeder from ..services.export_generator import AuditExportGenerator -from ..services.auto_risk_updater import AutoRiskUpdater, ScanType from .schemas import ( - RegulationCreate, RegulationResponse, RegulationListResponse, + RegulationResponse, RegulationListResponse, RequirementCreate, RequirementResponse, RequirementListResponse, - ControlCreate, ControlUpdate, ControlResponse, ControlListResponse, ControlReviewRequest, - MappingCreate, MappingResponse, MappingListResponse, + ControlUpdate, ControlResponse, ControlListResponse, ControlReviewRequest, ExportRequest, ExportResponse, ExportListResponse, SeedRequest, SeedResponse, # Pagination schemas @@ -381,7 +374,6 @@ async def delete_requirement(requirement_id: str, db: Session = Depends(get_db)) async def update_requirement(requirement_id: str, updates: dict, db: Session = Depends(get_db)): """Update a requirement with implementation/audit details.""" from ..db.models import RequirementDB - from datetime import datetime requirement = db.query(RequirementDB).filter(RequirementDB.id == requirement_id).first() if not requirement: @@ -870,8 +862,8 @@ async def init_tables(db: Session = Depends(get_db)): """Create compliance tables if they don't exist.""" from classroom_engine.database import engine from ..db.models import ( - RegulationDB, RequirementDB, ControlDB, ControlMappingDB, - EvidenceDB, RiskDB, AuditExportDB, AISystemDB + RegulationDB, RequirementDB, ControlMappingDB, + RiskDB, AuditExportDB, AISystemDB ) try: @@ -971,8 +963,8 @@ async def seed_database( """Seed the compliance database with initial data.""" from classroom_engine.database import engine from ..db.models import ( - RegulationDB, RequirementDB, ControlDB, ControlMappingDB, - EvidenceDB, RiskDB, AuditExportDB + RegulationDB, RequirementDB, ControlMappingDB, + RiskDB, AuditExportDB ) try: diff --git a/backend-compliance/compliance/api/schemas.py b/backend-compliance/compliance/api/schemas.py index c132e10..19854f5 100644 --- a/backend-compliance/compliance/api/schemas.py +++ b/backend-compliance/compliance/api/schemas.py @@ -496,57 +496,6 @@ class SeedResponse(BaseModel): counts: Dict[str, int] -# ============================================================================ -# PDF Extraction Schemas -# ============================================================================ - -class BSIAspectResponse(BaseModel): - """Response schema for an extracted BSI-TR Pruefaspekt.""" - aspect_id: str - title: str - full_text: str - category: str - page_number: int - section: str - requirement_level: str - source_document: str - keywords: List[str] = [] - related_aspects: List[str] = [] - - -class PDFExtractionResponse(BaseModel): - """Response for PDF extraction operation.""" - success: bool - source_document: str - total_aspects: int - aspects: List[BSIAspectResponse] - statistics: Dict[str, Any] - requirements_created: int = 0 - - -class PDFExtractionRequest(BaseModel): - """Request to extract requirements from a PDF.""" - document_code: str # e.g., "BSI-TR-03161-2" - save_to_db: bool = True - force: bool = False - - -# ============================================================================ -# Paginated Response Schemas (after all Response classes are defined) -# ============================================================================ - -class PaginatedRequirementResponse(BaseModel): - """Paginated response for requirements.""" - data: List[RequirementResponse] - pagination: PaginationMeta - - -class PaginatedControlResponse(BaseModel): - """Paginated response for controls.""" - data: List[ControlResponse] - pagination: PaginationMeta - - class PaginatedEvidenceResponse(BaseModel): """Paginated response for evidence.""" data: List[EvidenceResponse] diff --git a/backend-compliance/compliance/api/screening_routes.py b/backend-compliance/compliance/api/screening_routes.py index 8e16b54..307ca67 100644 --- a/backend-compliance/compliance/api/screening_routes.py +++ b/backend-compliance/compliance/api/screening_routes.py @@ -257,18 +257,6 @@ def map_osv_severity(vuln: dict) -> tuple[str, float]: severity = "MEDIUM" cvss = 5.0 - # Check severity array - for sev in vuln.get("severity", []): - if sev.get("type") == "CVSS_V3": - score_str = sev.get("score", "") - # Extract base score from CVSS vector - try: - import re as _re - # CVSS vectors don't contain the score directly, try database_specific - pass - except Exception: - pass - # Check database_specific for severity db_specific = vuln.get("database_specific", {}) if "severity" in db_specific: diff --git a/backend-compliance/compliance/api/source_policy_router.py b/backend-compliance/compliance/api/source_policy_router.py index f991fe3..7cdb2e9 100644 --- a/backend-compliance/compliance/api/source_policy_router.py +++ b/backend-compliance/compliance/api/source_policy_router.py @@ -21,9 +21,8 @@ Endpoints: GET /api/v1/admin/compliance-report — Compliance report """ -import uuid from datetime import datetime -from typing import Optional, List +from typing import Optional from fastapi import APIRouter, HTTPException, Depends, Query from pydantic import BaseModel, Field @@ -155,7 +154,7 @@ async def list_sources( """List all allowed sources with optional filters.""" query = db.query(AllowedSourceDB) if active_only: - query = query.filter(AllowedSourceDB.active == True) + query = query.filter(AllowedSourceDB.active) if source_type: query = query.filter(AllowedSourceDB.source_type == source_type) if license: @@ -527,8 +526,8 @@ async def get_policy_audit( async def get_policy_stats(db: Session = Depends(get_db)): """Get dashboard statistics for source policy.""" total_sources = db.query(AllowedSourceDB).count() - active_sources = db.query(AllowedSourceDB).filter(AllowedSourceDB.active == True).count() - pii_rules = db.query(PIIRuleDB).filter(PIIRuleDB.active == True).count() + active_sources = db.query(AllowedSourceDB).filter(AllowedSourceDB.active).count() + pii_rules = db.query(PIIRuleDB).filter(PIIRuleDB.active).count() # Count blocked content entries from today today_start = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) @@ -550,8 +549,8 @@ async def get_policy_stats(db: Session = Depends(get_db)): @router.get("/compliance-report") async def get_compliance_report(db: Session = Depends(get_db)): """Generate a compliance report for source policies.""" - sources = db.query(AllowedSourceDB).filter(AllowedSourceDB.active == True).all() - pii_rules = db.query(PIIRuleDB).filter(PIIRuleDB.active == True).all() + sources = db.query(AllowedSourceDB).filter(AllowedSourceDB.active).all() + pii_rules = db.query(PIIRuleDB).filter(PIIRuleDB.active).all() return { "report_date": datetime.utcnow().isoformat(), diff --git a/backend-compliance/compliance/api/tom_routes.py b/backend-compliance/compliance/api/tom_routes.py index fbed0d9..4752c62 100644 --- a/backend-compliance/compliance/api/tom_routes.py +++ b/backend-compliance/compliance/api/tom_routes.py @@ -19,11 +19,11 @@ import json import logging from datetime import datetime, timezone from typing import Optional, List, Any, Dict -from uuid import UUID, uuid4 +from uuid import UUID from fastapi import APIRouter, Depends, HTTPException, Query from fastapi.responses import StreamingResponse -from pydantic import BaseModel, Field +from pydantic import BaseModel from sqlalchemy import func from sqlalchemy.orm import Session diff --git a/backend-compliance/compliance/api/vendor_compliance_routes.py b/backend-compliance/compliance/api/vendor_compliance_routes.py index 917d35d..7a1dd40 100644 --- a/backend-compliance/compliance/api/vendor_compliance_routes.py +++ b/backend-compliance/compliance/api/vendor_compliance_routes.py @@ -50,10 +50,9 @@ import json import logging import uuid from datetime import datetime -from typing import Optional, List +from typing import Optional from fastapi import APIRouter, Depends, HTTPException, Query -from pydantic import BaseModel from sqlalchemy import text from sqlalchemy.orm import Session diff --git a/backend-compliance/compliance/api/versioning_utils.py b/backend-compliance/compliance/api/versioning_utils.py index 86feaec..6aa4560 100644 --- a/backend-compliance/compliance/api/versioning_utils.py +++ b/backend-compliance/compliance/api/versioning_utils.py @@ -7,7 +7,6 @@ with all 5 version tables (DSFA, VVT, TOM, Loeschfristen, Obligations). import json import logging -from datetime import datetime from typing import Optional, List from fastapi import APIRouter, Depends, HTTPException, Request diff --git a/backend-compliance/compliance/api/vvt_routes.py b/backend-compliance/compliance/api/vvt_routes.py index 53d081d..2eb04ae 100644 --- a/backend-compliance/compliance/api/vvt_routes.py +++ b/backend-compliance/compliance/api/vvt_routes.py @@ -19,7 +19,6 @@ import io import logging from datetime import datetime, timezone from typing import Optional, List -from uuid import uuid4 from fastapi import APIRouter, Depends, HTTPException, Query, Request from fastapi.responses import StreamingResponse diff --git a/backend-compliance/compliance/db/banner_models.py b/backend-compliance/compliance/db/banner_models.py index 2588d8a..f795229 100644 --- a/backend-compliance/compliance/db/banner_models.py +++ b/backend-compliance/compliance/db/banner_models.py @@ -13,7 +13,7 @@ import uuid from datetime import datetime from sqlalchemy import ( - Column, String, Text, Boolean, Integer, DateTime, Index, JSON + Column, Text, Boolean, Integer, DateTime, Index, JSON ) from sqlalchemy.dialects.postgresql import UUID diff --git a/backend-compliance/compliance/db/dsr_models.py b/backend-compliance/compliance/db/dsr_models.py index c89dc2f..79111af 100644 --- a/backend-compliance/compliance/db/dsr_models.py +++ b/backend-compliance/compliance/db/dsr_models.py @@ -14,7 +14,7 @@ import uuid from datetime import datetime from sqlalchemy import ( - Column, String, Text, Boolean, DateTime, JSON, Index + Column, Text, Boolean, DateTime, JSON, Index ) from sqlalchemy.dialects.postgresql import UUID diff --git a/backend-compliance/compliance/db/einwilligungen_models.py b/backend-compliance/compliance/db/einwilligungen_models.py index 3495575..0b974db 100644 --- a/backend-compliance/compliance/db/einwilligungen_models.py +++ b/backend-compliance/compliance/db/einwilligungen_models.py @@ -13,7 +13,7 @@ import uuid from datetime import datetime from sqlalchemy import ( - Column, String, Text, Boolean, DateTime, JSON, Index, Integer + Column, String, Text, Boolean, DateTime, JSON, Index ) from sqlalchemy.dialects.postgresql import UUID diff --git a/backend-compliance/compliance/db/email_template_models.py b/backend-compliance/compliance/db/email_template_models.py index ff2b571..32b3cdb 100644 --- a/backend-compliance/compliance/db/email_template_models.py +++ b/backend-compliance/compliance/db/email_template_models.py @@ -13,7 +13,7 @@ import uuid from datetime import datetime from sqlalchemy import ( - Column, String, Text, Boolean, Integer, DateTime, JSON, Index + Column, Text, Boolean, Integer, DateTime, JSON, Index ) from sqlalchemy.dialects.postgresql import UUID diff --git a/backend-compliance/compliance/db/isms_repository.py b/backend-compliance/compliance/db/isms_repository.py index 81bb676..188b090 100644 --- a/backend-compliance/compliance/db/isms_repository.py +++ b/backend-compliance/compliance/db/isms_repository.py @@ -14,10 +14,9 @@ from datetime import datetime, date from typing import List, Optional, Dict, Any, Tuple from sqlalchemy.orm import Session as DBSession -from sqlalchemy import func, and_, or_ from .models import ( - ISMSScopeDB, ISMSContextDB, ISMSPolicyDB, SecurityObjectiveDB, + ISMSScopeDB, ISMSPolicyDB, SecurityObjectiveDB, StatementOfApplicabilityDB, AuditFindingDB, CorrectiveActionDB, ManagementReviewDB, InternalAuditDB, AuditTrailDB, ISMSReadinessCheckDB, ApprovalStatusEnum, FindingTypeEnum, FindingStatusEnum, CAPATypeEnum diff --git a/backend-compliance/compliance/db/legal_document_extend_models.py b/backend-compliance/compliance/db/legal_document_extend_models.py index 5f06bcd..a42b8a8 100644 --- a/backend-compliance/compliance/db/legal_document_extend_models.py +++ b/backend-compliance/compliance/db/legal_document_extend_models.py @@ -11,7 +11,7 @@ import uuid from datetime import datetime from sqlalchemy import ( - Column, String, Text, Boolean, Integer, DateTime, Index, JSON + Column, Text, Boolean, Integer, DateTime, Index, JSON ) from sqlalchemy.dialects.postgresql import UUID diff --git a/backend-compliance/compliance/db/models.py b/backend-compliance/compliance/db/models.py index 5b02d94..84aa79d 100644 --- a/backend-compliance/compliance/db/models.py +++ b/backend-compliance/compliance/db/models.py @@ -14,7 +14,6 @@ Tables: import enum import uuid from datetime import datetime, date -from typing import Optional, List from sqlalchemy import ( Column, String, Text, Integer, Boolean, DateTime, Date, diff --git a/backend-compliance/compliance/db/repository.py b/backend-compliance/compliance/db/repository.py index dc3c6d9..6fc66d7 100644 --- a/backend-compliance/compliance/db/repository.py +++ b/backend-compliance/compliance/db/repository.py @@ -3,6 +3,7 @@ Repository layer for Compliance module. Provides CRUD operations and business logic queries for all compliance entities. """ +from __future__ import annotations import uuid from datetime import datetime, date @@ -17,7 +18,8 @@ from .models import ( EvidenceDB, RiskDB, AuditExportDB, AuditSessionDB, AuditSignOffDB, AuditResultEnum, AuditSessionStatusEnum, RegulationTypeEnum, ControlDomainEnum, ControlStatusEnum, - RiskLevelEnum, EvidenceStatusEnum, ExportStatusEnum + RiskLevelEnum, EvidenceStatusEnum, ExportStatusEnum, + ServiceModuleDB, ModuleRegulationMappingDB, ) @@ -447,7 +449,7 @@ class ControlRepository: self.db.query(ControlDB) .filter( or_( - ControlDB.next_review_at == None, + ControlDB.next_review_at is None, ControlDB.next_review_at <= datetime.utcnow() ) ) @@ -936,7 +938,7 @@ class ServiceModuleRepository: """Get all modules with filters.""" from .models import ServiceModuleDB, ServiceTypeEnum - query = self.db.query(ServiceModuleDB).filter(ServiceModuleDB.is_active == True) + query = self.db.query(ServiceModuleDB).filter(ServiceModuleDB.is_active) if service_type: query = query.filter(ServiceModuleDB.service_type == ServiceTypeEnum(service_type)) @@ -990,8 +992,7 @@ class ServiceModuleRepository: def get_overview(self) -> Dict[str, Any]: """Get overview statistics for all modules.""" - from .models import ServiceModuleDB, ModuleRegulationMappingDB - from sqlalchemy import func + from .models import ModuleRegulationMappingDB modules = self.get_all() total = len(modules) @@ -1035,7 +1036,6 @@ class ServiceModuleRepository: def seed_from_data(self, services_data: List[Dict[str, Any]], force: bool = False) -> Dict[str, int]: """Seed modules from service_modules.py data.""" - from .models import ServiceModuleDB modules_created = 0 mappings_created = 0 diff --git a/backend-compliance/compliance/scripts/validate_service_modules.py b/backend-compliance/compliance/scripts/validate_service_modules.py index 9e32f54..e6c7fd4 100644 --- a/backend-compliance/compliance/scripts/validate_service_modules.py +++ b/backend-compliance/compliance/scripts/validate_service_modules.py @@ -12,7 +12,7 @@ Checks: import sys from pathlib import Path from collections import defaultdict -from typing import Dict, List, Set +from typing import Dict, List sys.path.insert(0, str(Path(__file__).parent.parent.parent)) @@ -182,7 +182,7 @@ def validate_data_categories(): def main(): """Run all validations.""" print(f"{GREEN}{'='*60}") - print(f" Breakpilot Service Module Validation") + print(" Breakpilot Service Module Validation") print(f"{'='*60}{RESET}") all_passed = True diff --git a/backend-compliance/compliance/services/ai_compliance_assistant.py b/backend-compliance/compliance/services/ai_compliance_assistant.py index 6889086..3b0085f 100644 --- a/backend-compliance/compliance/services/ai_compliance_assistant.py +++ b/backend-compliance/compliance/services/ai_compliance_assistant.py @@ -11,11 +11,11 @@ Provides AI-powered features for: import json import logging import re -from dataclasses import dataclass, field +from dataclasses import dataclass from typing import List, Optional, Dict, Any from enum import Enum -from .llm_provider import LLMProvider, get_shared_provider, LLMResponse +from .llm_provider import LLMProvider, get_shared_provider from .rag_client import get_rag_client logger = logging.getLogger(__name__) diff --git a/backend-compliance/compliance/services/audit_pdf_generator.py b/backend-compliance/compliance/services/audit_pdf_generator.py index 479af71..d35e553 100644 --- a/backend-compliance/compliance/services/audit_pdf_generator.py +++ b/backend-compliance/compliance/services/audit_pdf_generator.py @@ -18,27 +18,23 @@ import io import logging from datetime import datetime from typing import Dict, List, Any, Optional, Tuple -from uuid import uuid4 -import hashlib -from sqlalchemy.orm import Session, selectinload +from sqlalchemy.orm import Session from reportlab.lib import colors from reportlab.lib.pagesizes import A4 from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle -from reportlab.lib.units import mm, cm -from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT, TA_JUSTIFY +from reportlab.lib.units import mm +from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY from reportlab.platypus import ( SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, - PageBreak, Image, ListFlowable, ListItem, KeepTogether, - HRFlowable + PageBreak, HRFlowable ) -from reportlab.graphics.shapes import Drawing, Rect, String +from reportlab.graphics.shapes import Drawing from reportlab.graphics.charts.piecharts import Pie from ..db.models import ( - AuditSessionDB, AuditSignOffDB, AuditResultEnum, AuditSessionStatusEnum, - RequirementDB, RegulationDB + AuditSessionDB, AuditSignOffDB, AuditResultEnum, RequirementDB, RegulationDB ) logger = logging.getLogger(__name__) diff --git a/backend-compliance/compliance/services/auto_risk_updater.py b/backend-compliance/compliance/services/auto_risk_updater.py index 307c394..077b6bd 100644 --- a/backend-compliance/compliance/services/auto_risk_updater.py +++ b/backend-compliance/compliance/services/auto_risk_updater.py @@ -12,7 +12,7 @@ Sprint 6: CI/CD Evidence Collection (2026-01-18) import logging from datetime import datetime -from typing import Dict, List, Optional, Any +from typing import Dict, List, Optional from dataclasses import dataclass from enum import Enum @@ -21,7 +21,7 @@ from sqlalchemy.orm import Session from ..db.models import ( ControlDB, ControlStatusEnum, EvidenceDB, EvidenceStatusEnum, - RiskDB, RiskLevelEnum, + RiskDB, ) from ..db.repository import ControlRepository, EvidenceRepository, RiskRepository diff --git a/backend-compliance/compliance/services/export_generator.py b/backend-compliance/compliance/services/export_generator.py index 805cad9..eeeec1f 100644 --- a/backend-compliance/compliance/services/export_generator.py +++ b/backend-compliance/compliance/services/export_generator.py @@ -189,7 +189,7 @@ class AuditExportGenerator: self, output_dir: Path, included_regulations: Optional[List[str]] ) -> None: """Export regulations to JSON files.""" - query = self.db.query(RegulationDB).filter(RegulationDB.is_active == True) + query = self.db.query(RegulationDB).filter(RegulationDB.is_active) if included_regulations: query = query.filter(RegulationDB.code.in_(included_regulations)) @@ -557,7 +557,7 @@ Generiert am: """ + datetime.now().strftime("%Y-%m-%d %H:%M:%S") ) -> Dict[str, Any]: """Calculate compliance statistics.""" # Count regulations - reg_query = self.db.query(RegulationDB).filter(RegulationDB.is_active == True) + reg_query = self.db.query(RegulationDB).filter(RegulationDB.is_active) if included_regulations: reg_query = reg_query.filter(RegulationDB.code.in_(included_regulations)) total_regulations = reg_query.count() diff --git a/backend-compliance/compliance/services/llm_provider.py b/backend-compliance/compliance/services/llm_provider.py index eaf4611..294e230 100644 --- a/backend-compliance/compliance/services/llm_provider.py +++ b/backend-compliance/compliance/services/llm_provider.py @@ -26,7 +26,7 @@ import asyncio import logging from abc import ABC, abstractmethod from typing import List, Optional, Dict, Any -from dataclasses import dataclass, field +from dataclasses import dataclass from enum import Enum import httpx diff --git a/backend-compliance/compliance/services/regulation_scraper.py b/backend-compliance/compliance/services/regulation_scraper.py index a207e59..83d06e1 100644 --- a/backend-compliance/compliance/services/regulation_scraper.py +++ b/backend-compliance/compliance/services/regulation_scraper.py @@ -11,11 +11,9 @@ Similar pattern to edu-search and zeugnisse-crawler. import logging import re -import asyncio from datetime import datetime from typing import Dict, List, Any, Optional from enum import Enum -import hashlib import httpx from bs4 import BeautifulSoup diff --git a/backend-compliance/compliance/services/report_generator.py b/backend-compliance/compliance/services/report_generator.py index 4ed03f3..2765b98 100644 --- a/backend-compliance/compliance/services/report_generator.py +++ b/backend-compliance/compliance/services/report_generator.py @@ -19,16 +19,11 @@ from sqlalchemy.orm import Session from sqlalchemy import func from ..db.models import ( - RegulationDB, RequirementDB, ControlDB, ControlMappingDB, - EvidenceDB, - RiskDB, - AuditExportDB, ControlStatusEnum, RiskLevelEnum, - EvidenceStatusEnum, ) from ..db.repository import ( RegulationRepository, @@ -171,7 +166,6 @@ class ComplianceReportGenerator: # Control status findings by_status = ctrl_stats.get("by_status", {}) - passed = by_status.get("pass", 0) failed = by_status.get("fail", 0) planned = by_status.get("planned", 0) @@ -200,10 +194,8 @@ class ComplianceReportGenerator: """Generate compliance score section with breakdown.""" stats = self.ctrl_repo.get_statistics() - by_domain = stats.get("by_domain", {}) - domain_scores = {} - controls = self.ctrl_repo.get_all() + domain_scores = {} domain_stats = {} for ctrl in controls: diff --git a/backend-compliance/compliance/services/seeder.py b/backend-compliance/compliance/services/seeder.py index 2a1650b..898e66a 100644 --- a/backend-compliance/compliance/services/seeder.py +++ b/backend-compliance/compliance/services/seeder.py @@ -5,8 +5,7 @@ Seeds the database with initial regulations, controls, and requirements. """ import logging -from typing import Dict, List, Optional -from datetime import datetime +from typing import Dict from sqlalchemy.orm import Session @@ -23,7 +22,6 @@ from ..db.models import ( ControlTypeEnum, ControlDomainEnum, ControlStatusEnum, - RiskLevelEnum, ServiceTypeEnum, RelevanceLevelEnum, ) diff --git a/backend-compliance/compliance/tests/test_audit_routes.py b/backend-compliance/compliance/tests/test_audit_routes.py index b181d26..5800b5c 100644 --- a/backend-compliance/compliance/tests/test_audit_routes.py +++ b/backend-compliance/compliance/tests/test_audit_routes.py @@ -9,10 +9,9 @@ Run with: pytest backend/compliance/tests/test_audit_routes.py -v import pytest import hashlib from datetime import datetime -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock from uuid import uuid4 -from fastapi.testclient import TestClient from sqlalchemy.orm import Session # Import the app and dependencies diff --git a/backend-compliance/compliance/tests/test_auto_risk_updater.py b/backend-compliance/compliance/tests/test_auto_risk_updater.py index 679bc1f..7e7c43e 100644 --- a/backend-compliance/compliance/tests/test_auto_risk_updater.py +++ b/backend-compliance/compliance/tests/test_auto_risk_updater.py @@ -4,10 +4,8 @@ Tests for the AutoRiskUpdater Service. Sprint 6: CI/CD Evidence Collection & Automatic Risk Updates (2026-01-18) """ -import pytest from datetime import datetime -from unittest.mock import MagicMock, patch -from uuid import uuid4 +from unittest.mock import MagicMock from ..services.auto_risk_updater import ( AutoRiskUpdater, @@ -18,9 +16,7 @@ from ..services.auto_risk_updater import ( CONTROL_SCAN_MAPPING, ) from ..db.models import ( - ControlDB, ControlStatusEnum, - EvidenceDB, EvidenceStatusEnum, - RiskDB, RiskLevelEnum, + ControlStatusEnum, ) diff --git a/backend-compliance/compliance/tests/test_compliance_routes.py b/backend-compliance/compliance/tests/test_compliance_routes.py index e332214..7195101 100644 --- a/backend-compliance/compliance/tests/test_compliance_routes.py +++ b/backend-compliance/compliance/tests/test_compliance_routes.py @@ -16,7 +16,6 @@ from compliance.db.models import ( RequirementDB, RegulationDB, AISystemDB, AIClassificationEnum, AISystemStatusEnum, RiskDB, RiskLevelEnum, - EvidenceDB, EvidenceStatusEnum, ) from compliance.db.repository import RequirementRepository diff --git a/backend-compliance/compliance/tests/test_isms_routes.py b/backend-compliance/compliance/tests/test_isms_routes.py index d316cc6..01a063b 100644 --- a/backend-compliance/compliance/tests/test_isms_routes.py +++ b/backend-compliance/compliance/tests/test_isms_routes.py @@ -16,7 +16,7 @@ Run with: pytest backend/compliance/tests/test_isms_routes.py -v import pytest from datetime import datetime, date -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock from uuid import uuid4 from sqlalchemy.orm import Session @@ -25,7 +25,7 @@ import sys sys.path.insert(0, '/Users/benjaminadmin/Projekte/breakpilot-pwa/backend') from compliance.db.models import ( - ISMSScopeDB, ISMSContextDB, ISMSPolicyDB, SecurityObjectiveDB, + ISMSScopeDB, ISMSPolicyDB, SecurityObjectiveDB, StatementOfApplicabilityDB, AuditFindingDB, CorrectiveActionDB, ManagementReviewDB, InternalAuditDB, AuditTrailDB, ISMSReadinessCheckDB, ApprovalStatusEnum, FindingTypeEnum, FindingStatusEnum, CAPATypeEnum @@ -393,7 +393,7 @@ class TestAuditFinding: # is_blocking is a property, so we check the type is_blocking = (sample_major_finding.finding_type == FindingTypeEnum.MAJOR and sample_major_finding.status != FindingStatusEnum.CLOSED) - assert is_blocking == True + assert is_blocking def test_finding_has_objective_evidence(self, sample_finding): """Findings should have objective evidence.""" @@ -524,7 +524,7 @@ class TestISMSReadinessCheck: readiness_score=30.0, ) - assert check.certification_possible == False + assert not check.certification_possible assert len(check.potential_majors) >= 1 assert check.readiness_score < 100 @@ -551,7 +551,7 @@ class TestISMSReadinessCheck: assert check.chapter_4_status == "pass" assert check.chapter_5_status == "pass" assert check.chapter_9_status == "pass" - assert check.certification_possible == True + assert check.certification_possible # ============================================================================ @@ -660,7 +660,7 @@ class TestCertificationBlockers: is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and finding.status != FindingStatusEnum.CLOSED) - assert is_blocking == True + assert is_blocking def test_closed_major_allows_certification(self): """Closed major findings should not block certification.""" @@ -677,7 +677,7 @@ class TestCertificationBlockers: is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and finding.status != FindingStatusEnum.CLOSED) - assert is_blocking == False + assert not is_blocking def test_minor_findings_dont_block_certification(self): """Minor findings should not block certification.""" @@ -693,4 +693,4 @@ class TestCertificationBlockers: is_blocking = (finding.finding_type == FindingTypeEnum.MAJOR and finding.status != FindingStatusEnum.CLOSED) - assert is_blocking == False + assert not is_blocking diff --git a/backend-compliance/dsr_admin_api.py b/backend-compliance/dsr_admin_api.py deleted file mode 100644 index 7904bd4..0000000 --- a/backend-compliance/dsr_admin_api.py +++ /dev/null @@ -1,415 +0,0 @@ -""" -Data Subject Request (DSR) Admin API - Betroffenenanfragen-Verwaltung -Admin-Endpunkte für die Verwaltung von Betroffenenanfragen nach DSGVO -""" - -from fastapi import APIRouter, HTTPException, Header, Query -from typing import Optional, List, Dict, Any -from pydantic import BaseModel -import httpx -import os - -from consent_client import generate_jwt_token, JWT_SECRET - -# Consent Service URL -CONSENT_SERVICE_URL = os.getenv("CONSENT_SERVICE_URL", "http://localhost:8081") - -router = APIRouter(prefix="/v1/admin/dsr", tags=["dsr-admin"]) - -# Admin User UUID (muss in der DB existieren!) -ADMIN_USER_UUID = "a0000000-0000-0000-0000-000000000001" - - -# Request Models -class CreateDSRRequest(BaseModel): - """Admin-Anfrage zum manuellen Erstellen einer Betroffenenanfrage""" - request_type: str - requester_email: str - requester_name: Optional[str] = None - requester_phone: Optional[str] = None - request_details: Optional[Dict[str, Any]] = None - priority: Optional[str] = None # normal, high, expedited - source: Optional[str] = "admin_panel" - - -class UpdateDSRRequest(BaseModel): - """Anfrage zum Aktualisieren einer Betroffenenanfrage""" - status: Optional[str] = None - priority: Optional[str] = None - processing_notes: Optional[str] = None - - -class UpdateStatusRequest(BaseModel): - """Anfrage zum Ändern des Status""" - status: str - comment: Optional[str] = None - - -class VerifyIdentityRequest(BaseModel): - """Anfrage zur Identitätsverifizierung""" - method: str # id_card, passport, video_call, email, phone, other - - -class AssignRequest(BaseModel): - """Anfrage zur Zuweisung""" - assignee_id: str - - -class ExtendDeadlineRequest(BaseModel): - """Anfrage zur Fristverlängerung""" - reason: str - days: Optional[int] = 60 - - -class CompleteDSRRequest(BaseModel): - """Anfrage zum Abschließen einer Betroffenenanfrage""" - summary: str - result_data: Optional[Dict[str, Any]] = None - - -class RejectDSRRequest(BaseModel): - """Anfrage zum Ablehnen einer Betroffenenanfrage""" - reason: str - legal_basis: str # Art. 17(3)a, Art. 17(3)b, Art. 17(3)c, Art. 17(3)d, Art. 17(3)e, Art. 12(5) - - -class SendCommunicationRequest(BaseModel): - """Anfrage zum Senden einer Kommunikation""" - communication_type: str - template_version_id: Optional[str] = None - custom_subject: Optional[str] = None - custom_body: Optional[str] = None - variables: Optional[Dict[str, str]] = None - - -class UpdateExceptionCheckRequest(BaseModel): - """Anfrage zum Aktualisieren einer Ausnahmeprüfung""" - applies: bool - notes: Optional[str] = None - - -class CreateTemplateVersionRequest(BaseModel): - """Anfrage zum Erstellen einer Vorlagen-Version""" - version: str - language: Optional[str] = "de" - subject: str - body_html: str - body_text: Optional[str] = None - - -# Helper für Admin Token -def get_admin_token(authorization: Optional[str]) -> str: - if authorization: - parts = authorization.split(" ") - if len(parts) == 2 and parts[0] == "Bearer": - return parts[1] - - # Für Entwicklung: Generiere einen Admin-Token - return generate_jwt_token( - user_id=ADMIN_USER_UUID, - email="admin@breakpilot.app", - role="admin" - ) - - -async def proxy_request(method: str, path: str, token: str, json_data=None, query_params=None): - """Proxied Anfragen an den Go Consent Service""" - url = f"{CONSENT_SERVICE_URL}/api/v1/admin{path}" - headers = { - "Authorization": f"Bearer {token}", - "Content-Type": "application/json" - } - - async with httpx.AsyncClient() as client: - try: - if method == "GET": - response = await client.get(url, headers=headers, params=query_params, timeout=30.0) - elif method == "POST": - response = await client.post(url, headers=headers, json=json_data, timeout=30.0) - elif method == "PUT": - response = await client.put(url, headers=headers, json=json_data, timeout=30.0) - elif method == "DELETE": - response = await client.delete(url, headers=headers, timeout=30.0) - else: - raise HTTPException(status_code=400, detail="Invalid method") - - if response.status_code >= 400: - error_detail = response.json() if response.content else {"error": "Unknown error"} - raise HTTPException(status_code=response.status_code, detail=error_detail) - - return response.json() if response.content else {"success": True} - - except httpx.RequestError as e: - raise HTTPException(status_code=503, detail=f"Consent Service unavailable: {str(e)}") - - -# ========================================== -# DSR List & Statistics -# ========================================== - -@router.get("") -async def admin_list_dsr( - status: Optional[str] = Query(None, description="Filter by status"), - request_type: Optional[str] = Query(None, description="Filter by request type"), - assigned_to: Optional[str] = Query(None, description="Filter by assignee"), - priority: Optional[str] = Query(None, description="Filter by priority"), - overdue_only: bool = Query(False, description="Only overdue requests"), - search: Optional[str] = Query(None, description="Search term"), - from_date: Optional[str] = Query(None, description="From date (YYYY-MM-DD)"), - to_date: Optional[str] = Query(None, description="To date (YYYY-MM-DD)"), - limit: int = Query(20, ge=1, le=100), - offset: int = Query(0, ge=0), - authorization: Optional[str] = Header(None) -): - """Gibt alle Betroffenenanfragen mit Filtern zurück""" - token = get_admin_token(authorization) - params = {"limit": limit, "offset": offset} - if status: - params["status"] = status - if request_type: - params["request_type"] = request_type - if assigned_to: - params["assigned_to"] = assigned_to - if priority: - params["priority"] = priority - if overdue_only: - params["overdue_only"] = "true" - if search: - params["search"] = search - if from_date: - params["from_date"] = from_date - if to_date: - params["to_date"] = to_date - return await proxy_request("GET", "/dsr", token, query_params=params) - - -@router.get("/stats") -async def admin_get_dsr_stats(authorization: Optional[str] = Header(None)): - """Gibt Dashboard-Statistiken für Betroffenenanfragen zurück""" - token = get_admin_token(authorization) - return await proxy_request("GET", "/dsr/stats", token) - - -# ========================================== -# Single DSR Management -# ========================================== - -@router.get("/{dsr_id}") -async def admin_get_dsr(dsr_id: str, authorization: Optional[str] = Header(None)): - """Gibt Details einer Betroffenenanfrage zurück""" - token = get_admin_token(authorization) - return await proxy_request("GET", f"/dsr/{dsr_id}", token) - - -@router.post("") -async def admin_create_dsr( - request: CreateDSRRequest, - authorization: Optional[str] = Header(None) -): - """Erstellt eine Betroffenenanfrage manuell""" - token = get_admin_token(authorization) - return await proxy_request("POST", "/dsr", token, request.dict(exclude_none=True)) - - -@router.put("/{dsr_id}") -async def admin_update_dsr( - dsr_id: str, - request: UpdateDSRRequest, - authorization: Optional[str] = Header(None) -): - """Aktualisiert eine Betroffenenanfrage""" - token = get_admin_token(authorization) - return await proxy_request("PUT", f"/dsr/{dsr_id}", token, request.dict(exclude_none=True)) - - -@router.post("/{dsr_id}/status") -async def admin_update_dsr_status( - dsr_id: str, - request: UpdateStatusRequest, - authorization: Optional[str] = Header(None) -): - """Ändert den Status einer Betroffenenanfrage""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/status", token, request.dict(exclude_none=True)) - - -# ========================================== -# DSR Workflow Actions -# ========================================== - -@router.post("/{dsr_id}/verify-identity") -async def admin_verify_identity( - dsr_id: str, - request: VerifyIdentityRequest, - authorization: Optional[str] = Header(None) -): - """Verifiziert die Identität des Antragstellers""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/verify-identity", token, request.dict()) - - -@router.post("/{dsr_id}/assign") -async def admin_assign_dsr( - dsr_id: str, - request: AssignRequest, - authorization: Optional[str] = Header(None) -): - """Weist eine Betroffenenanfrage einem Bearbeiter zu""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/assign", token, request.dict()) - - -@router.post("/{dsr_id}/extend") -async def admin_extend_deadline( - dsr_id: str, - request: ExtendDeadlineRequest, - authorization: Optional[str] = Header(None) -): - """Verlängert die Bearbeitungsfrist (max. 2 weitere Monate nach Art. 12(3))""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/extend", token, request.dict()) - - -@router.post("/{dsr_id}/complete") -async def admin_complete_dsr( - dsr_id: str, - request: CompleteDSRRequest, - authorization: Optional[str] = Header(None) -): - """Schließt eine Betroffenenanfrage erfolgreich ab""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/complete", token, request.dict(exclude_none=True)) - - -@router.post("/{dsr_id}/reject") -async def admin_reject_dsr( - dsr_id: str, - request: RejectDSRRequest, - authorization: Optional[str] = Header(None) -): - """Lehnt eine Betroffenenanfrage mit Rechtsgrundlage ab""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/reject", token, request.dict()) - - -# ========================================== -# DSR History & Communications -# ========================================== - -@router.get("/{dsr_id}/history") -async def admin_get_dsr_history(dsr_id: str, authorization: Optional[str] = Header(None)): - """Gibt die Status-Historie einer Betroffenenanfrage zurück""" - token = get_admin_token(authorization) - return await proxy_request("GET", f"/dsr/{dsr_id}/history", token) - - -@router.get("/{dsr_id}/communications") -async def admin_get_dsr_communications(dsr_id: str, authorization: Optional[str] = Header(None)): - """Gibt die Kommunikationshistorie einer Betroffenenanfrage zurück""" - token = get_admin_token(authorization) - return await proxy_request("GET", f"/dsr/{dsr_id}/communications", token) - - -@router.post("/{dsr_id}/communicate") -async def admin_send_communication( - dsr_id: str, - request: SendCommunicationRequest, - authorization: Optional[str] = Header(None) -): - """Sendet eine Kommunikation zum Antragsteller""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/communicate", token, request.dict(exclude_none=True)) - - -# ========================================== -# Exception Checks (Art. 17) -# ========================================== - -@router.get("/{dsr_id}/exception-checks") -async def admin_get_exception_checks(dsr_id: str, authorization: Optional[str] = Header(None)): - """Gibt die Ausnahmeprüfungen für Löschanfragen (Art. 17) zurück""" - token = get_admin_token(authorization) - return await proxy_request("GET", f"/dsr/{dsr_id}/exception-checks", token) - - -@router.post("/{dsr_id}/exception-checks/init") -async def admin_init_exception_checks(dsr_id: str, authorization: Optional[str] = Header(None)): - """Initialisiert die Ausnahmeprüfungen für eine Löschanfrage""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/exception-checks/init", token) - - -@router.put("/{dsr_id}/exception-checks/{check_id}") -async def admin_update_exception_check( - dsr_id: str, - check_id: str, - request: UpdateExceptionCheckRequest, - authorization: Optional[str] = Header(None) -): - """Aktualisiert eine einzelne Ausnahmeprüfung""" - token = get_admin_token(authorization) - return await proxy_request("PUT", f"/dsr/{dsr_id}/exception-checks/{check_id}", token, request.dict(exclude_none=True)) - - -# ========================================== -# Deadline Processing -# ========================================== - -@router.post("/deadlines/process") -async def admin_process_deadlines(authorization: Optional[str] = Header(None)): - """Verarbeitet Fristen und sendet Warnungen (für Cronjob)""" - token = get_admin_token(authorization) - return await proxy_request("POST", "/dsr/deadlines/process", token) - - -# ========================================== -# DSR Templates Router -# ========================================== - -templates_router = APIRouter(prefix="/v1/admin/dsr-templates", tags=["dsr-templates"]) - - -@templates_router.get("") -async def admin_get_templates(authorization: Optional[str] = Header(None)): - """Gibt alle DSR-Vorlagen zurück""" - token = get_admin_token(authorization) - return await proxy_request("GET", "/dsr-templates", token) - - -@templates_router.get("/published") -async def admin_get_published_templates( - request_type: Optional[str] = Query(None, description="Filter by request type"), - language: str = Query("de", description="Language"), - authorization: Optional[str] = Header(None) -): - """Gibt alle veröffentlichten Vorlagen für die Auswahl zurück""" - token = get_admin_token(authorization) - params = {"language": language} - if request_type: - params["request_type"] = request_type - return await proxy_request("GET", "/dsr-templates/published", token, query_params=params) - - -@templates_router.get("/{template_id}/versions") -async def admin_get_template_versions(template_id: str, authorization: Optional[str] = Header(None)): - """Gibt alle Versionen einer Vorlage zurück""" - token = get_admin_token(authorization) - return await proxy_request("GET", f"/dsr-templates/{template_id}/versions", token) - - -@templates_router.post("/{template_id}/versions") -async def admin_create_template_version( - template_id: str, - request: CreateTemplateVersionRequest, - authorization: Optional[str] = Header(None) -): - """Erstellt eine neue Version einer Vorlage""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr-templates/{template_id}/versions", token, request.dict(exclude_none=True)) - - -@templates_router.post("/versions/{version_id}/publish") -async def admin_publish_template_version(version_id: str, authorization: Optional[str] = Header(None)): - """Veröffentlicht eine Vorlagen-Version""" - token = get_admin_token(authorization) - return await proxy_request("POST", f"/dsr-template-versions/{version_id}/publish", token) diff --git a/backend-compliance/dsr_api.py b/backend-compliance/dsr_api.py deleted file mode 100644 index 0eb801b..0000000 --- a/backend-compliance/dsr_api.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -Data Subject Request (DSR) API - Betroffenenanfragen nach DSGVO -Benutzer-Endpunkte zum Erstellen und Verwalten eigener Betroffenenanfragen -""" - -from fastapi import APIRouter, HTTPException, Header, Query -from typing import Optional, List, Dict, Any -from pydantic import BaseModel, EmailStr -import httpx -import os - -from consent_client import generate_jwt_token, JWT_SECRET - -# Consent Service URL -CONSENT_SERVICE_URL = os.getenv("CONSENT_SERVICE_URL", "http://localhost:8081") - -router = APIRouter(prefix="/v1/dsr", tags=["dsr"]) - - -# Request Models -class CreateDSRRequest(BaseModel): - """Anfrage zum Erstellen einer Betroffenenanfrage""" - request_type: str # access, rectification, erasure, restriction, portability - requester_email: Optional[str] = None - requester_name: Optional[str] = None - requester_phone: Optional[str] = None - request_details: Optional[Dict[str, Any]] = None - - -# Helper to extract token -def get_token(authorization: Optional[str]) -> str: - if authorization: - parts = authorization.split(" ") - if len(parts) == 2 and parts[0] == "Bearer": - return parts[1] - raise HTTPException(status_code=401, detail="Authorization required") - - -async def proxy_request(method: str, path: str, token: str, json_data=None, query_params=None): - """Proxied Anfragen an den Go Consent Service""" - url = f"{CONSENT_SERVICE_URL}/api/v1{path}" - headers = { - "Authorization": f"Bearer {token}", - "Content-Type": "application/json" - } - - async with httpx.AsyncClient() as client: - try: - if method == "GET": - response = await client.get(url, headers=headers, params=query_params, timeout=10.0) - elif method == "POST": - response = await client.post(url, headers=headers, json=json_data, timeout=10.0) - elif method == "PUT": - response = await client.put(url, headers=headers, json=json_data, timeout=10.0) - elif method == "DELETE": - response = await client.delete(url, headers=headers, timeout=10.0) - else: - raise HTTPException(status_code=400, detail="Invalid method") - - if response.status_code >= 400: - error_detail = response.json() if response.content else {"error": "Unknown error"} - raise HTTPException(status_code=response.status_code, detail=error_detail) - - return response.json() if response.content else {"success": True} - - except httpx.RequestError as e: - raise HTTPException(status_code=503, detail=f"Consent Service unavailable: {str(e)}") - - -# ========================================== -# User DSR Endpoints -# ========================================== - -@router.post("") -async def create_dsr( - request: CreateDSRRequest, - authorization: str = Header(...) -): - """ - Erstellt eine neue Betroffenenanfrage. - - request_type muss einer der folgenden Werte sein: - - access: Auskunftsrecht (Art. 15 DSGVO) - - rectification: Recht auf Berichtigung (Art. 16 DSGVO) - - erasure: Recht auf Löschung (Art. 17 DSGVO) - - restriction: Recht auf Einschränkung (Art. 18 DSGVO) - - portability: Recht auf Datenübertragbarkeit (Art. 20 DSGVO) - """ - token = get_token(authorization) - return await proxy_request("POST", "/dsr", token, request.dict(exclude_none=True)) - - -@router.get("") -async def get_my_dsrs(authorization: str = Header(...)): - """Gibt alle eigenen Betroffenenanfragen zurück""" - token = get_token(authorization) - return await proxy_request("GET", "/dsr", token) - - -@router.get("/{dsr_id}") -async def get_my_dsr(dsr_id: str, authorization: str = Header(...)): - """Gibt Details einer eigenen Betroffenenanfrage zurück""" - token = get_token(authorization) - return await proxy_request("GET", f"/dsr/{dsr_id}", token) - - -@router.post("/{dsr_id}/cancel") -async def cancel_my_dsr(dsr_id: str, authorization: str = Header(...)): - """Storniert eine eigene Betroffenenanfrage""" - token = get_token(authorization) - return await proxy_request("POST", f"/dsr/{dsr_id}/cancel", token) diff --git a/backend-compliance/middleware/__init__.py b/backend-compliance/middleware/__init__.py index 1497144..0756ef1 100644 --- a/backend-compliance/middleware/__init__.py +++ b/backend-compliance/middleware/__init__.py @@ -4,23 +4,13 @@ BreakPilot Middleware Stack This module provides middleware components for the FastAPI backend: - Request-ID: Adds unique request identifiers for tracing - Security Headers: Adds security headers to all responses -- Rate Limiter: Protects against abuse (Valkey-based) -- PII Redactor: Redacts sensitive data from logs -- Input Gate: Validates request body size and content types """ from .request_id import RequestIDMiddleware, get_request_id from .security_headers import SecurityHeadersMiddleware -from .rate_limiter import RateLimiterMiddleware -from .pii_redactor import PIIRedactor, redact_pii -from .input_gate import InputGateMiddleware __all__ = [ "RequestIDMiddleware", "get_request_id", "SecurityHeadersMiddleware", - "RateLimiterMiddleware", - "PIIRedactor", - "redact_pii", - "InputGateMiddleware", ] diff --git a/backend-compliance/requirements.txt b/backend-compliance/requirements.txt index 99aa4ff..3de31a2 100644 --- a/backend-compliance/requirements.txt +++ b/backend-compliance/requirements.txt @@ -17,13 +17,13 @@ annotated-types==0.7.0 # Authentication PyJWT==2.10.1 -python-multipart==0.0.20 +python-multipart>=0.0.22 # AI / Anthropic (compliance AI assistant) anthropic==0.75.0 # PDF Generation (GDPR export, audit reports) -weasyprint==66.0 +weasyprint>=68.0 reportlab==4.2.5 Jinja2==3.1.6 @@ -48,3 +48,4 @@ redis==5.2.1 # Security: Pin transitive dependencies to patched versions idna>=3.7 cryptography>=42.0.0 +pillow>=12.1.1 diff --git a/backend-compliance/tests/test_company_profile_routes.py b/backend-compliance/tests/test_company_profile_routes.py index c9c88ab..3c75add 100644 --- a/backend-compliance/tests/test_company_profile_routes.py +++ b/backend-compliance/tests/test_company_profile_routes.py @@ -50,7 +50,7 @@ class TestRowToResponse: """Tests for DB row to response conversion.""" def _make_row(self, **overrides): - """Create a mock DB row with 30 fields.""" + """Create a mock DB row with 40 fields (matching row_to_response indices).""" defaults = [ "uuid-123", # 0: id "default", # 1: tenant_id @@ -82,6 +82,17 @@ class TestRowToResponse: "2026-01-01", # 27: completed_at "2026-01-01", # 28: created_at "2026-01-01", # 29: updated_at + # Phase 2 fields (indices 30-39) + [], # 30: repos + [], # 31: document_sources + [], # 32: processing_systems + [], # 33: ai_systems + [], # 34: technical_contacts + False, # 35: subject_to_nis2 + False, # 36: subject_to_ai_act + False, # 37: subject_to_iso27001 + None, # 38: supervisory_authority + 12, # 39: review_cycle_months ] return tuple(defaults) diff --git a/backend-compliance/tests/test_dsfa_routes.py b/backend-compliance/tests/test_dsfa_routes.py index 535c997..6b78d78 100644 --- a/backend-compliance/tests/test_dsfa_routes.py +++ b/backend-compliance/tests/test_dsfa_routes.py @@ -429,7 +429,7 @@ class TestGetTenantId: assert _get_tenant_id("my-tenant") == "my-tenant" def test_default_constant_value(self): - assert DEFAULT_TENANT_ID == "default" + assert DEFAULT_TENANT_ID == "9282a473-5c95-4b3a-bf78-0ecc0ec71d3e" # ============================================================================= diff --git a/backend-compliance/tests/test_evidence_routes.py b/backend-compliance/tests/test_evidence_routes.py index 1bcefb0..39d0236 100644 --- a/backend-compliance/tests/test_evidence_routes.py +++ b/backend-compliance/tests/test_evidence_routes.py @@ -252,3 +252,268 @@ class TestEvidenceCIStatus: MockRepo.return_value.get_all.return_value = [] response = client.get("/evidence/ci-status", params={"control_id": CONTROL_UUID}) assert response.status_code == 200 + + def test_ci_status_without_control_id(self): + """GET /evidence/ci-status without control_id returns all CI evidence.""" + mock_query = MagicMock() + mock_query.filter.return_value = mock_query + mock_query.order_by.return_value = mock_query + mock_query.limit.return_value = mock_query + mock_query.all.return_value = [] + mock_db.query.return_value = mock_query + response = client.get("/evidence/ci-status") + assert response.status_code == 200 + data = response.json() + assert data["period_days"] == 30 + assert data["total_evidence"] == 0 + assert data["controls"] == [] + + def test_ci_status_custom_days_param(self): + """GET /evidence/ci-status with custom days lookback.""" + mock_query = MagicMock() + mock_query.filter.return_value = mock_query + mock_query.order_by.return_value = mock_query + mock_query.limit.return_value = mock_query + mock_query.all.return_value = [] + mock_db.query.return_value = mock_query + response = client.get("/evidence/ci-status", params={"days": 7}) + assert response.status_code == 200 + data = response.json() + assert data["period_days"] == 7 + + +class TestCollectCIEvidence: + """Tests for POST /evidence/collect.""" + + def test_collect_sast_evidence_success(self): + """Collect SAST evidence with Semgrep-format report data.""" + ctrl = make_control({"control_id": "SDLC-001"}) + evidence = make_evidence({ + "evidence_type": "ci_sast", + "source": "ci_pipeline", + "ci_job_id": "job-456", + }) + with patch("compliance.api.evidence_routes.ControlRepository") as MockCtrlRepo, \ + patch("compliance.api.evidence_routes._store_evidence", return_value=evidence), \ + patch("compliance.api.evidence_routes._update_risks", return_value=None): + MockCtrlRepo.return_value.get_by_control_id.return_value = ctrl + response = client.post( + "/evidence/collect", + params={"source": "sast", "ci_job_id": "job-456"}, + json={"results": [ + {"check_id": "python.lang.security", "extra": {"severity": "MEDIUM"}}, + ]}, + ) + assert response.status_code == 200 + data = response.json() + assert data["success"] is True + assert data["source"] == "sast" + assert data["control_id"] == "SDLC-001" + + def test_collect_unknown_source_returns_400(self): + """Unknown source should return 400.""" + response = client.post( + "/evidence/collect", + params={"source": "unknown_tool"}, + json={}, + ) + assert response.status_code == 400 + assert "Unknown source" in response.json()["detail"] + + def test_collect_control_not_found_returns_404(self): + """If the mapped control does not exist in DB, return 404.""" + with patch("compliance.api.evidence_routes.ControlRepository") as MockCtrlRepo: + MockCtrlRepo.return_value.get_by_control_id.return_value = None + response = client.post( + "/evidence/collect", + params={"source": "sast"}, + json={"results": []}, + ) + assert response.status_code == 404 + assert "SDLC-001" in response.json()["detail"] + + def test_collect_with_null_report_data(self): + """Collect with no report data body (None).""" + ctrl = make_control({"control_id": "SDLC-002"}) + evidence = make_evidence({ + "evidence_type": "ci_dependency_scan", + "source": "ci_pipeline", + }) + with patch("compliance.api.evidence_routes.ControlRepository") as MockCtrlRepo, \ + patch("compliance.api.evidence_routes._store_evidence", return_value=evidence), \ + patch("compliance.api.evidence_routes._update_risks", return_value=None): + MockCtrlRepo.return_value.get_by_control_id.return_value = ctrl + response = client.post( + "/evidence/collect", + params={"source": "dependency_scan"}, + ) + assert response.status_code == 200 + data = response.json() + assert data["success"] is True + + def test_collect_sbom_source(self): + """Collect SBOM evidence with components list.""" + ctrl = make_control({"control_id": "SDLC-005"}) + evidence = make_evidence({ + "evidence_type": "ci_sbom", + "source": "ci_pipeline", + }) + with patch("compliance.api.evidence_routes.ControlRepository") as MockCtrlRepo, \ + patch("compliance.api.evidence_routes._store_evidence", return_value=evidence), \ + patch("compliance.api.evidence_routes._update_risks", return_value=None): + MockCtrlRepo.return_value.get_by_control_id.return_value = ctrl + response = client.post( + "/evidence/collect", + params={"source": "sbom"}, + json={"components": [ + {"name": "fastapi", "version": "0.100.0"}, + {"name": "pydantic", "version": "2.0.0"}, + ]}, + ) + assert response.status_code == 200 + data = response.json() + assert data["success"] is True + assert data["source"] == "sbom" + + +class TestParseCIEvidence: + """Unit tests for _parse_ci_evidence helper.""" + + def test_parse_empty_data(self): + from compliance.api.evidence_routes import _parse_ci_evidence + result = _parse_ci_evidence({}) + assert result["findings_count"] == 0 + assert result["critical_findings"] == 0 + assert result["evidence_status"] == "valid" + + def test_parse_none_data(self): + from compliance.api.evidence_routes import _parse_ci_evidence + result = _parse_ci_evidence(None) + assert result["evidence_status"] == "valid" + assert result["report_json"] == "{}" + + def test_parse_semgrep_with_critical(self): + """Semgrep results with CRITICAL severity → status=failed.""" + from compliance.api.evidence_routes import _parse_ci_evidence + data = { + "results": [ + {"check_id": "sql-injection", "extra": {"severity": "CRITICAL"}}, + {"check_id": "xss", "extra": {"severity": "MEDIUM"}}, + ] + } + result = _parse_ci_evidence(data) + assert result["findings_count"] == 2 + assert result["critical_findings"] == 1 + assert result["evidence_status"] == "failed" + + def test_parse_trivy_format(self): + """Trivy Results format with Vulnerabilities.""" + from compliance.api.evidence_routes import _parse_ci_evidence + data = { + "Results": [ + { + "Target": "python:3.11", + "Vulnerabilities": [ + {"VulnerabilityID": "CVE-2024-001", "Severity": "HIGH"}, + {"VulnerabilityID": "CVE-2024-002", "Severity": "LOW"}, + ], + } + ] + } + result = _parse_ci_evidence(data) + assert result["findings_count"] == 2 + assert result["critical_findings"] == 1 + assert result["evidence_status"] == "failed" + + def test_parse_generic_findings(self): + """Generic findings array format.""" + from compliance.api.evidence_routes import _parse_ci_evidence + data = {"findings": [{"id": "f1"}, {"id": "f2"}, {"id": "f3"}]} + result = _parse_ci_evidence(data) + assert result["findings_count"] == 3 + assert result["critical_findings"] == 0 + assert result["evidence_status"] == "valid" + + def test_parse_sbom_components(self): + """SBOM components → findings_count = number of components.""" + from compliance.api.evidence_routes import _parse_ci_evidence + data = {"components": [{"name": "a"}, {"name": "b"}]} + result = _parse_ci_evidence(data) + assert result["findings_count"] == 2 + assert result["evidence_status"] == "valid" + + +class TestExtractFindingsDetail: + """Unit tests for _extract_findings_detail helper.""" + + def test_extract_empty(self): + from compliance.api.evidence_routes import _extract_findings_detail + result = _extract_findings_detail({}) + assert result == {"critical": 0, "high": 0, "medium": 0, "low": 0} + + def test_extract_none(self): + from compliance.api.evidence_routes import _extract_findings_detail + result = _extract_findings_detail(None) + assert result == {"critical": 0, "high": 0, "medium": 0, "low": 0} + + def test_extract_semgrep_severities(self): + from compliance.api.evidence_routes import _extract_findings_detail + data = { + "results": [ + {"extra": {"severity": "CRITICAL"}}, + {"extra": {"severity": "HIGH"}}, + {"extra": {"severity": "MEDIUM"}}, + {"extra": {"severity": "LOW"}}, + {"extra": {"severity": "INFO"}}, + ] + } + result = _extract_findings_detail(data) + assert result["critical"] == 1 + assert result["high"] == 1 + assert result["medium"] == 1 + assert result["low"] == 2 # LOW + INFO both count as low + + +class TestListEvidenceEdgeCases: + """Additional edge-case tests for GET /evidence.""" + + def test_list_filter_by_status(self): + """Filter by status parameter.""" + ev_valid = make_evidence({"status": MagicMock(value="valid")}) + ev_failed = make_evidence({"status": MagicMock(value="failed")}) + with patch("compliance.api.evidence_routes.EvidenceRepository") as MockRepo: + MockRepo.return_value.get_all.return_value = [ev_valid, ev_failed] + response = client.get("/evidence", params={"status": "valid"}) + assert response.status_code == 200 + # The route filters in-memory by status enum + data = response.json() + # At least it returns without error (status enum matching may differ with mocks) + assert "evidence" in data + + def test_list_filter_invalid_status(self): + """Invalid status value should be ignored (no crash).""" + with patch("compliance.api.evidence_routes.EvidenceRepository") as MockRepo: + MockRepo.return_value.get_all.return_value = [make_evidence()] + response = client.get("/evidence", params={"status": "nonexistent_status"}) + assert response.status_code == 200 + # Invalid status is silently ignored per the try/except ValueError in the route + assert response.json()["total"] == 1 + + def test_list_control_not_found(self): + """GET /evidence with nonexistent control_id returns 404.""" + with patch("compliance.api.evidence_routes.EvidenceRepository"), \ + patch("compliance.api.evidence_routes.ControlRepository") as MockCtrlRepo: + MockCtrlRepo.return_value.get_by_control_id.return_value = None + response = client.get("/evidence", params={"control_id": "NONEXISTENT-001"}) + assert response.status_code == 404 + + def test_list_pagination_slices_correctly(self): + """Pagination returns correct slice while total reflects full count.""" + items = [make_evidence({"id": f"e{i}-" + "0" * 32}) for i in range(5)] + with patch("compliance.api.evidence_routes.EvidenceRepository") as MockRepo: + MockRepo.return_value.get_all.return_value = items + response = client.get("/evidence", params={"page": 2, "limit": 2}) + assert response.status_code == 200 + data = response.json() + assert data["total"] == 5 + assert len(data["evidence"]) == 2 diff --git a/backend-compliance/tests/test_generation_routes.py b/backend-compliance/tests/test_generation_routes.py index 59cad8c..b0cb071 100644 --- a/backend-compliance/tests/test_generation_routes.py +++ b/backend-compliance/tests/test_generation_routes.py @@ -231,3 +231,161 @@ class TestGenerationRouteRegistration: paths = [r.path for r in router.routes] assert any("preview" in p for p in paths) assert any("apply" in p for p in paths) + + +# ============================================================================= +# _generate_for_type dispatcher +# ============================================================================= + +class TestGenerateForType: + """Tests for the _generate_for_type dispatcher function.""" + + def test_dsfa_returns_single_item_list(self): + from compliance.api.generation_routes import _generate_for_type + ctx = _make_ctx() + result = _generate_for_type("dsfa", ctx) + assert isinstance(result, list) + assert len(result) == 1 + assert "DSFA" in result[0]["title"] + + def test_vvt_dispatches_correctly(self): + from compliance.api.generation_routes import _generate_for_type + ctx = _make_ctx(processing_systems=[ + {"name": "HR System", "vendor": "SAP", "hosting": "cloud", "personal_data_categories": ["Mitarbeiter"]}, + ]) + result = _generate_for_type("vvt", ctx) + assert isinstance(result, list) + assert len(result) == 1 + assert "HR System" in result[0]["name"] + + def test_tom_dispatches_correctly(self): + from compliance.api.generation_routes import _generate_for_type + ctx = _make_ctx() + result = _generate_for_type("tom", ctx) + assert isinstance(result, list) + assert len(result) == 8 # Base TOMs + + def test_loeschfristen_dispatches_correctly(self): + from compliance.api.generation_routes import _generate_for_type + ctx = _make_ctx(processing_systems=[ + {"name": "Payroll", "personal_data_categories": ["Bankdaten"]}, + ]) + result = _generate_for_type("loeschfristen", ctx) + assert isinstance(result, list) + assert len(result) == 1 + + def test_obligation_dispatches_correctly(self): + from compliance.api.generation_routes import _generate_for_type + ctx = _make_ctx() + result = _generate_for_type("obligation", ctx) + assert isinstance(result, list) + assert len(result) == 8 # Base DSGVO obligations + + def test_invalid_doc_type_raises_value_error(self): + from compliance.api.generation_routes import _generate_for_type + ctx = _make_ctx() + with pytest.raises(ValueError, match="Unknown doc_type"): + _generate_for_type("nonexistent", ctx) + + +# ============================================================================= +# VALID_DOC_TYPES validation +# ============================================================================= + +class TestValidDocTypes: + """Tests for doc_type validation constants.""" + + def test_valid_doc_types_contains_all_expected(self): + from compliance.api.generation_routes import VALID_DOC_TYPES + expected = {"dsfa", "vvt", "tom", "loeschfristen", "obligation"} + assert VALID_DOC_TYPES == expected + + def test_invalid_types_not_accepted(self): + from compliance.api.generation_routes import VALID_DOC_TYPES + invalid_types = ["dsgvo", "audit", "risk", "consent", "privacy", ""] + for t in invalid_types: + assert t not in VALID_DOC_TYPES, f"{t} should not be in VALID_DOC_TYPES" + + +# ============================================================================= +# Template Context edge cases +# ============================================================================= + +class TestTemplateContextEdgeCases: + """Tests for template context building and edge cases.""" + + def test_empty_company_name_still_generates(self): + """Templates should work even with empty company name.""" + ctx = _make_ctx(company_name="") + draft = generate_dsfa_draft(ctx) + assert draft["status"] == "draft" + assert "DSFA" in draft["title"] + + def test_minimal_context_generates_all_types(self): + """All generators should handle a minimal context without crashing.""" + from compliance.api.generation_routes import _generate_for_type + ctx = _make_ctx() + for doc_type in ["dsfa", "vvt", "tom", "loeschfristen", "obligation"]: + result = _generate_for_type(doc_type, ctx) + assert isinstance(result, list), f"{doc_type} should return a list" + + def test_context_with_many_processing_systems(self): + """Verify generators handle multiple processing systems correctly.""" + systems = [ + {"name": f"System-{i}", "vendor": f"Vendor-{i}", "hosting": "cloud", + "personal_data_categories": [f"Kategorie-{i}"]} + for i in range(5) + ] + ctx = _make_ctx(processing_systems=systems) + vvt_drafts = generate_vvt_drafts(ctx) + assert len(vvt_drafts) == 5 + # Verify sequential VVT IDs + for i, draft in enumerate(vvt_drafts): + assert draft["vvt_id"] == f"VVT-AUTO-{i+1:03d}" + + def test_context_with_multiple_ai_systems(self): + """DSFA should list all AI systems in summary.""" + ctx = _make_ctx( + has_ai_systems=True, + subject_to_ai_act=True, + ai_systems=[ + {"name": "Chatbot", "purpose": "Support", "risk_category": "limited", "has_human_oversight": True}, + {"name": "Scoring", "purpose": "Credit", "risk_category": "high", "has_human_oversight": False}, + {"name": "OCR", "purpose": "Documents", "risk_category": "minimal", "has_human_oversight": True}, + ], + ) + draft = generate_dsfa_draft(ctx) + assert len(draft["ai_systems_summary"]) == 3 + assert draft["risk_level"] == "high" + + def test_context_without_dpo_uses_empty_string(self): + """When dpo_name is empty, templates should still work.""" + ctx = _make_ctx(dpo_name="", dpo_email="") + draft = generate_dsfa_draft(ctx) + assert draft["dpo_name"] == "" + # Should still generate valid sections + assert "section_1" in draft["sections"] + + def test_all_regulatory_flags_affect_all_generators(self): + """When all regulatory flags are set, all generators should produce more output.""" + from compliance.api.generation_routes import _generate_for_type + ctx_minimal = _make_ctx() + ctx_full = _make_ctx( + subject_to_nis2=True, + subject_to_ai_act=True, + subject_to_iso27001=True, + ) + tom_minimal = _generate_for_type("tom", ctx_minimal) + tom_full = _generate_for_type("tom", ctx_full) + assert len(tom_full) > len(tom_minimal) + + obligation_minimal = _generate_for_type("obligation", ctx_minimal) + obligation_full = _generate_for_type("obligation", ctx_full) + assert len(obligation_full) > len(obligation_minimal) + + def test_dsfa_without_ai_has_empty_ai_summary(self): + """DSFA without AI systems should have empty ai_systems_summary.""" + ctx = _make_ctx(has_ai_systems=False, ai_systems=[]) + draft = generate_dsfa_draft(ctx) + assert draft["ai_systems_summary"] == [] + assert draft["involves_ai"] is False diff --git a/backend-compliance/tests/test_isms_routes.py b/backend-compliance/tests/test_isms_routes.py new file mode 100644 index 0000000..da2a7a4 --- /dev/null +++ b/backend-compliance/tests/test_isms_routes.py @@ -0,0 +1,886 @@ +"""Integration tests for ISMS routes (isms_routes.py). + +Tests the ISO 27001 ISMS API endpoints using TestClient + SQLite + ORM: +- Scope CRUD + Approval +- Policy CRUD + Approval + Duplicate check +- Overview / Dashboard endpoint +- Readiness check +- Edge cases (not found, invalid data, etc.) + +Run with: cd backend-compliance && python3 -m pytest tests/test_isms_routes.py -v +""" + +import os +import sys +import pytest +from datetime import date, datetime + +from fastapi import FastAPI +from fastapi.testclient import TestClient +from sqlalchemy import create_engine, event +from sqlalchemy.orm import sessionmaker + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from classroom_engine.database import Base, get_db +from compliance.api.isms_routes import router as isms_router + +# ============================================================================= +# Test App + SQLite Setup +# ============================================================================= + +SQLALCHEMY_DATABASE_URL = "sqlite:///./test_isms.db" +engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}) +TestSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + + +@event.listens_for(engine, "connect") +def _set_sqlite_pragma(dbapi_conn, connection_record): + """Enable foreign keys and register NOW() for SQLite.""" + cursor = dbapi_conn.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.close() + dbapi_conn.create_function("NOW", 0, lambda: datetime.utcnow().isoformat()) + + +app = FastAPI() +app.include_router(isms_router) + + +def override_get_db(): + db = TestSessionLocal() + try: + yield db + finally: + db.close() + + +app.dependency_overrides[get_db] = override_get_db +client = TestClient(app) + + +# ============================================================================= +# Fixtures +# ============================================================================= + +@pytest.fixture(autouse=True) +def setup_db(): + """Create all tables before each test module, drop after.""" + # Import all models so Base.metadata knows about them + import compliance.db.models # noqa: F401 + Base.metadata.create_all(bind=engine) + yield + Base.metadata.drop_all(bind=engine) + + +# ============================================================================= +# Helper data builders +# ============================================================================= + +def _scope_payload(**overrides): + data = { + "scope_statement": "ISMS covers all BreakPilot digital learning operations", + "included_locations": ["Frankfurt Office", "AWS eu-central-1"], + "included_processes": ["Software Development", "Data Processing"], + "included_services": ["BreakPilot PWA", "AI Assistant"], + "excluded_items": ["Marketing Website"], + "exclusion_justification": "Static site, no user data", + } + data.update(overrides) + return data + + +def _policy_payload(policy_id="POL-ISMS-001", **overrides): + data = { + "policy_id": policy_id, + "title": "Information Security Policy", + "policy_type": "master", + "description": "Master ISMS policy", + "policy_text": "This policy establishes the framework for information security...", + "applies_to": ["All Employees"], + "review_frequency_months": 12, + "related_controls": ["GOV-001"], + "authored_by": "iso@breakpilot.de", + } + data.update(overrides) + return data + + +def _objective_payload(objective_id="OBJ-2026-001", **overrides): + data = { + "objective_id": objective_id, + "title": "Reduce Security Incidents", + "description": "Reduce incidents by 30%", + "category": "operational", + "specific": "Reduce from 10 to 7 per year", + "measurable": "Incident count in ticketing system", + "achievable": "Based on trend analysis", + "relevant": "Supports info sec goals", + "time_bound": "By Q4 2026", + "kpi_name": "Security Incident Count", + "kpi_target": "7", + "kpi_unit": "incidents/year", + "measurement_frequency": "monthly", + "owner": "security@breakpilot.de", + "target_date": "2026-12-31", + "related_controls": ["OPS-003"], + } + data.update(overrides) + return data + + +def _soa_payload(annex_a_control="A.5.1", **overrides): + data = { + "annex_a_control": annex_a_control, + "annex_a_title": "Policies for information security", + "annex_a_category": "organizational", + "is_applicable": True, + "applicability_justification": "Required for ISMS governance", + "implementation_status": "implemented", + "implementation_notes": "Covered by GOV-001", + "breakpilot_control_ids": ["GOV-001"], + "coverage_level": "full", + "evidence_description": "ISMS Policy v2.0", + } + data.update(overrides) + return data + + +def _finding_payload(**overrides): + data = { + "finding_type": "minor", + "iso_chapter": "9.2", + "annex_a_control": "A.5.35", + "title": "Audit schedule not documented", + "description": "No formal internal audit schedule found", + "objective_evidence": "No document in DMS", + "impact_description": "Cannot demonstrate planned approach", + "owner": "iso@breakpilot.de", + "auditor": "external.auditor@cert.de", + "due_date": "2026-03-31", + } + data.update(overrides) + return data + + +def _mgmt_review_payload(**overrides): + data = { + "title": "Q1 2026 Management Review", + "review_date": "2026-01-15", + "review_period_start": "2025-10-01", + "review_period_end": "2025-12-31", + "chairperson": "ceo@breakpilot.de", + "attendees": [ + {"name": "CEO", "role": "Chairperson"}, + {"name": "CTO", "role": "Technical Lead"}, + ], + } + data.update(overrides) + return data + + +def _internal_audit_payload(**overrides): + data = { + "title": "ISMS Internal Audit 2026", + "audit_type": "scheduled", + "scope_description": "Complete ISMS audit covering all chapters", + "iso_chapters_covered": ["4", "5", "6", "7", "8", "9", "10"], + "annex_a_controls_covered": ["A.5", "A.6"], + "criteria": "ISO 27001:2022", + "planned_date": "2026-03-01", + "lead_auditor": "internal.auditor@breakpilot.de", + "audit_team": ["internal.auditor@breakpilot.de", "qa@breakpilot.de"], + } + data.update(overrides) + return data + + +# ============================================================================= +# Test: ISMS Scope CRUD +# ============================================================================= + +class TestISMSScopeCRUD: + """Tests for ISMS Scope CRUD endpoints.""" + + def test_create_scope(self): + """POST /isms/scope should create a new scope.""" + r = client.post("/isms/scope", json=_scope_payload(), params={"created_by": "admin@bp.de"}) + assert r.status_code == 200 + body = r.json() + assert body["scope_statement"] == "ISMS covers all BreakPilot digital learning operations" + assert body["status"] == "draft" + assert body["version"] == "1.0" + assert "id" in body + + def test_get_scope(self): + """GET /isms/scope should return the current scope.""" + client.post("/isms/scope", json=_scope_payload(), params={"created_by": "admin@bp.de"}) + r = client.get("/isms/scope") + assert r.status_code == 200 + assert r.json()["scope_statement"] is not None + + def test_get_scope_not_found(self): + """GET /isms/scope should return 404 when no scope exists.""" + r = client.get("/isms/scope") + assert r.status_code == 404 + + def test_update_scope(self): + """PUT /isms/scope/{id} should update draft scope.""" + create = client.post("/isms/scope", json=_scope_payload(), params={"created_by": "admin@bp.de"}) + scope_id = create.json()["id"] + r = client.put( + f"/isms/scope/{scope_id}", + json={"scope_statement": "Updated scope statement"}, + params={"updated_by": "admin@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["scope_statement"] == "Updated scope statement" + assert r.json()["version"] == "1.1" + + def test_update_scope_not_found(self): + """PUT /isms/scope/{id} should return 404 for unknown id.""" + r = client.put( + "/isms/scope/nonexistent-id", + json={"scope_statement": "x"}, + params={"updated_by": "admin@bp.de"}, + ) + assert r.status_code == 404 + + def test_create_scope_supersedes_existing(self): + """Creating a new scope should supersede the old one.""" + client.post("/isms/scope", json=_scope_payload(), params={"created_by": "admin@bp.de"}) + client.post( + "/isms/scope", + json=_scope_payload(scope_statement="New scope v2"), + params={"created_by": "admin@bp.de"}, + ) + r = client.get("/isms/scope") + assert r.status_code == 200 + assert r.json()["scope_statement"] == "New scope v2" + + def test_approve_scope(self): + """POST /isms/scope/{id}/approve should approve scope.""" + create = client.post("/isms/scope", json=_scope_payload(), params={"created_by": "admin@bp.de"}) + scope_id = create.json()["id"] + r = client.post( + f"/isms/scope/{scope_id}/approve", + json={ + "approved_by": "ceo@breakpilot.de", + "effective_date": "2026-03-01", + "review_date": "2027-03-01", + }, + ) + assert r.status_code == 200 + assert r.json()["status"] == "approved" + assert r.json()["approved_by"] == "ceo@breakpilot.de" + + def test_approve_scope_not_found(self): + """POST /isms/scope/{id}/approve should return 404 for unknown scope.""" + r = client.post( + "/isms/scope/fake-id/approve", + json={ + "approved_by": "ceo@breakpilot.de", + "effective_date": "2026-03-01", + "review_date": "2027-03-01", + }, + ) + assert r.status_code == 404 + + def test_update_approved_scope_rejected(self): + """PUT on approved scope should return 400.""" + create = client.post("/isms/scope", json=_scope_payload(), params={"created_by": "admin@bp.de"}) + scope_id = create.json()["id"] + client.post( + f"/isms/scope/{scope_id}/approve", + json={ + "approved_by": "ceo@breakpilot.de", + "effective_date": "2026-03-01", + "review_date": "2027-03-01", + }, + ) + r = client.put( + f"/isms/scope/{scope_id}", + json={"scope_statement": "changed"}, + params={"updated_by": "admin@bp.de"}, + ) + assert r.status_code == 400 + assert "approved" in r.json()["detail"].lower() + + +# ============================================================================= +# Test: ISMS Policy CRUD +# ============================================================================= + +class TestISMSPolicyCRUD: + """Tests for ISMS Policy CRUD endpoints.""" + + def test_create_policy(self): + """POST /isms/policies should create a new policy.""" + r = client.post("/isms/policies", json=_policy_payload()) + assert r.status_code == 200 + body = r.json() + assert body["policy_id"] == "POL-ISMS-001" + assert body["status"] == "draft" + assert body["version"] == "1.0" + + def test_list_policies(self): + """GET /isms/policies should list all policies.""" + client.post("/isms/policies", json=_policy_payload("POL-ISMS-001")) + client.post("/isms/policies", json=_policy_payload("POL-ISMS-002", title="Access Control Policy")) + r = client.get("/isms/policies") + assert r.status_code == 200 + assert r.json()["total"] == 2 + assert len(r.json()["policies"]) == 2 + + def test_list_policies_filter_by_type(self): + """GET /isms/policies?policy_type=master should filter.""" + client.post("/isms/policies", json=_policy_payload("POL-001")) + client.post("/isms/policies", json=_policy_payload("POL-002", policy_type="operational")) + r = client.get("/isms/policies", params={"policy_type": "master"}) + assert r.status_code == 200 + assert r.json()["total"] == 1 + + def test_get_policy_by_id(self): + """GET /isms/policies/{id} should return a policy by its UUID.""" + create = client.post("/isms/policies", json=_policy_payload()) + policy_uuid = create.json()["id"] + r = client.get(f"/isms/policies/{policy_uuid}") + assert r.status_code == 200 + assert r.json()["policy_id"] == "POL-ISMS-001" + + def test_get_policy_by_policy_id(self): + """GET /isms/policies/{policy_id} should also match the human-readable id.""" + client.post("/isms/policies", json=_policy_payload()) + r = client.get("/isms/policies/POL-ISMS-001") + assert r.status_code == 200 + assert r.json()["title"] == "Information Security Policy" + + def test_get_policy_not_found(self): + """GET /isms/policies/{id} should return 404 for unknown policy.""" + r = client.get("/isms/policies/nonexistent") + assert r.status_code == 404 + + def test_update_policy(self): + """PUT /isms/policies/{id} should update a draft policy.""" + create = client.post("/isms/policies", json=_policy_payload()) + pid = create.json()["id"] + r = client.put( + f"/isms/policies/{pid}", + json={"title": "Updated Title"}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["title"] == "Updated Title" + + def test_update_policy_not_found(self): + """PUT /isms/policies/{id} should return 404 for unknown policy.""" + r = client.put( + "/isms/policies/fake-id", + json={"title": "x"}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 404 + + def test_duplicate_policy_id_rejected(self): + """POST /isms/policies with duplicate policy_id should return 400.""" + client.post("/isms/policies", json=_policy_payload("POL-DUP")) + r = client.post("/isms/policies", json=_policy_payload("POL-DUP")) + assert r.status_code == 400 + assert "already exists" in r.json()["detail"] + + def test_approve_policy(self): + """POST /isms/policies/{id}/approve should approve a policy.""" + create = client.post("/isms/policies", json=_policy_payload()) + pid = create.json()["id"] + r = client.post( + f"/isms/policies/{pid}/approve", + json={ + "reviewed_by": "cto@breakpilot.de", + "approved_by": "ceo@breakpilot.de", + "effective_date": "2026-03-01", + }, + ) + assert r.status_code == 200 + assert r.json()["status"] == "approved" + assert r.json()["approved_by"] == "ceo@breakpilot.de" + assert r.json()["next_review_date"] is not None + + def test_approve_policy_not_found(self): + """POST /isms/policies/{id}/approve should 404 for unknown policy.""" + r = client.post( + "/isms/policies/fake/approve", + json={ + "reviewed_by": "x", + "approved_by": "y", + "effective_date": "2026-03-01", + }, + ) + assert r.status_code == 404 + + def test_update_approved_policy_bumps_version(self): + """Updating an approved policy should increment major version and reset to draft.""" + create = client.post("/isms/policies", json=_policy_payload()) + pid = create.json()["id"] + client.post( + f"/isms/policies/{pid}/approve", + json={ + "reviewed_by": "cto@bp.de", + "approved_by": "ceo@bp.de", + "effective_date": "2026-03-01", + }, + ) + r = client.put( + f"/isms/policies/{pid}", + json={"title": "Updated after approval"}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["version"] == "2.0" + assert r.json()["status"] == "draft" + + +# ============================================================================= +# Test: Security Objectives +# ============================================================================= + +class TestSecurityObjectivesCRUD: + """Tests for Security Objectives endpoints.""" + + def test_create_objective(self): + """POST /isms/objectives should create a new objective.""" + r = client.post("/isms/objectives", json=_objective_payload(), params={"created_by": "iso@bp.de"}) + assert r.status_code == 200 + body = r.json() + assert body["objective_id"] == "OBJ-2026-001" + assert body["status"] == "active" + + def test_list_objectives(self): + """GET /isms/objectives should list all objectives.""" + client.post("/isms/objectives", json=_objective_payload("OBJ-001"), params={"created_by": "a"}) + client.post("/isms/objectives", json=_objective_payload("OBJ-002", title="Uptime"), params={"created_by": "a"}) + r = client.get("/isms/objectives") + assert r.status_code == 200 + assert r.json()["total"] == 2 + + def test_update_objective_progress(self): + """PUT /isms/objectives/{id} should update progress.""" + create = client.post("/isms/objectives", json=_objective_payload(), params={"created_by": "a"}) + oid = create.json()["id"] + r = client.put( + f"/isms/objectives/{oid}", + json={"progress_percentage": 50}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["progress_percentage"] == 50 + + def test_update_objective_auto_achieved(self): + """Setting progress to 100% should auto-set status to 'achieved'.""" + create = client.post("/isms/objectives", json=_objective_payload(), params={"created_by": "a"}) + oid = create.json()["id"] + r = client.put( + f"/isms/objectives/{oid}", + json={"progress_percentage": 100}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["status"] == "achieved" + + def test_update_objective_not_found(self): + """PUT /isms/objectives/{id} should 404 for unknown objective.""" + r = client.put( + "/isms/objectives/fake", + json={"progress_percentage": 10}, + params={"updated_by": "a"}, + ) + assert r.status_code == 404 + + +# ============================================================================= +# Test: Statement of Applicability (SoA) +# ============================================================================= + +class TestSoACRUD: + """Tests for SoA endpoints.""" + + def test_create_soa_entry(self): + """POST /isms/soa should create an SoA entry.""" + r = client.post("/isms/soa", json=_soa_payload(), params={"created_by": "iso@bp.de"}) + assert r.status_code == 200 + body = r.json() + assert body["annex_a_control"] == "A.5.1" + assert body["is_applicable"] is True + + def test_list_soa_entries(self): + """GET /isms/soa should list all SoA entries.""" + client.post("/isms/soa", json=_soa_payload("A.5.1"), params={"created_by": "a"}) + client.post("/isms/soa", json=_soa_payload("A.6.1", is_applicable=False, applicability_justification="N/A"), params={"created_by": "a"}) + r = client.get("/isms/soa") + assert r.status_code == 200 + assert r.json()["total"] == 2 + assert r.json()["applicable_count"] == 1 + assert r.json()["not_applicable_count"] == 1 + + def test_duplicate_soa_control_rejected(self): + """POST /isms/soa with duplicate annex_a_control should return 400.""" + client.post("/isms/soa", json=_soa_payload("A.5.1"), params={"created_by": "a"}) + r = client.post("/isms/soa", json=_soa_payload("A.5.1"), params={"created_by": "a"}) + assert r.status_code == 400 + assert "already exists" in r.json()["detail"] + + def test_update_soa_entry(self): + """PUT /isms/soa/{id} should update an SoA entry.""" + create = client.post("/isms/soa", json=_soa_payload(), params={"created_by": "a"}) + eid = create.json()["id"] + r = client.put( + f"/isms/soa/{eid}", + json={"implementation_status": "in_progress"}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["implementation_status"] == "in_progress" + assert r.json()["version"] == "1.1" + + def test_update_soa_not_found(self): + """PUT /isms/soa/{id} should 404 for unknown entry.""" + r = client.put( + "/isms/soa/fake", + json={"implementation_status": "implemented"}, + params={"updated_by": "a"}, + ) + assert r.status_code == 404 + + def test_approve_soa_entry(self): + """POST /isms/soa/{id}/approve should approve an SoA entry.""" + create = client.post("/isms/soa", json=_soa_payload(), params={"created_by": "a"}) + eid = create.json()["id"] + r = client.post( + f"/isms/soa/{eid}/approve", + json={"reviewed_by": "cto@bp.de", "approved_by": "ceo@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["approved_by"] == "ceo@bp.de" + + +# ============================================================================= +# Test: Audit Findings +# ============================================================================= + +class TestAuditFindingsCRUD: + """Tests for Audit Finding endpoints.""" + + def test_create_finding(self): + """POST /isms/findings should create a finding with auto-generated ID.""" + r = client.post("/isms/findings", json=_finding_payload()) + assert r.status_code == 200 + body = r.json() + assert body["finding_id"].startswith("FIND-") + assert body["status"] == "open" + + def test_list_findings(self): + """GET /isms/findings should list all findings.""" + client.post("/isms/findings", json=_finding_payload()) + client.post("/isms/findings", json=_finding_payload(finding_type="major", title="Major finding")) + r = client.get("/isms/findings") + assert r.status_code == 200 + assert r.json()["total"] == 2 + assert r.json()["major_count"] == 1 + assert r.json()["minor_count"] == 1 + + def test_update_finding(self): + """PUT /isms/findings/{id} should update a finding.""" + create = client.post("/isms/findings", json=_finding_payload()) + fid = create.json()["id"] + r = client.put( + f"/isms/findings/{fid}", + json={"root_cause": "Missing documentation process"}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["root_cause"] == "Missing documentation process" + + def test_update_finding_not_found(self): + """PUT /isms/findings/{id} should 404 for unknown finding.""" + r = client.put( + "/isms/findings/fake", + json={"root_cause": "x"}, + params={"updated_by": "a"}, + ) + assert r.status_code == 404 + + def test_close_finding_no_capas(self): + """POST /isms/findings/{id}/close should succeed if no CAPAs exist.""" + create = client.post("/isms/findings", json=_finding_payload()) + fid = create.json()["id"] + r = client.post( + f"/isms/findings/{fid}/close", + json={ + "closure_notes": "Verified corrected", + "closed_by": "auditor@cert.de", + "verification_method": "Document review", + "verification_evidence": "Updated schedule approved", + }, + ) + assert r.status_code == 200 + assert r.json()["status"] == "closed" + + def test_close_finding_not_found(self): + """POST /isms/findings/{id}/close should 404 for unknown finding.""" + r = client.post( + "/isms/findings/fake/close", + json={ + "closure_notes": "x", + "closed_by": "a", + "verification_method": "x", + "verification_evidence": "x", + }, + ) + assert r.status_code == 404 + + +# ============================================================================= +# Test: Management Reviews +# ============================================================================= + +class TestManagementReviewCRUD: + """Tests for Management Review endpoints.""" + + def test_create_management_review(self): + """POST /isms/management-reviews should create a review.""" + r = client.post( + "/isms/management-reviews", + json=_mgmt_review_payload(), + params={"created_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + body = r.json() + assert body["review_id"].startswith("MR-") + assert body["status"] == "draft" + + def test_list_management_reviews(self): + """GET /isms/management-reviews should list reviews.""" + client.post("/isms/management-reviews", json=_mgmt_review_payload(), params={"created_by": "a"}) + r = client.get("/isms/management-reviews") + assert r.status_code == 200 + assert r.json()["total"] == 1 + + def test_get_management_review(self): + """GET /isms/management-reviews/{id} should return a review.""" + create = client.post("/isms/management-reviews", json=_mgmt_review_payload(), params={"created_by": "a"}) + rid = create.json()["id"] + r = client.get(f"/isms/management-reviews/{rid}") + assert r.status_code == 200 + assert r.json()["chairperson"] == "ceo@breakpilot.de" + + def test_get_management_review_not_found(self): + """GET /isms/management-reviews/{id} should 404 for unknown review.""" + r = client.get("/isms/management-reviews/fake") + assert r.status_code == 404 + + def test_update_management_review(self): + """PUT /isms/management-reviews/{id} should update a review.""" + create = client.post("/isms/management-reviews", json=_mgmt_review_payload(), params={"created_by": "a"}) + rid = create.json()["id"] + r = client.put( + f"/isms/management-reviews/{rid}", + json={"input_previous_actions": "All actions completed", "status": "conducted"}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["input_previous_actions"] == "All actions completed" + + def test_approve_management_review(self): + """POST /isms/management-reviews/{id}/approve should approve.""" + create = client.post("/isms/management-reviews", json=_mgmt_review_payload(), params={"created_by": "a"}) + rid = create.json()["id"] + r = client.post( + f"/isms/management-reviews/{rid}/approve", + json={ + "approved_by": "ceo@bp.de", + "next_review_date": "2026-07-01", + }, + ) + assert r.status_code == 200 + assert r.json()["status"] == "approved" + assert r.json()["approved_by"] == "ceo@bp.de" + + +# ============================================================================= +# Test: Internal Audits +# ============================================================================= + +class TestInternalAuditCRUD: + """Tests for Internal Audit endpoints.""" + + def test_create_internal_audit(self): + """POST /isms/internal-audits should create an audit.""" + r = client.post( + "/isms/internal-audits", + json=_internal_audit_payload(), + params={"created_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + body = r.json() + assert body["audit_id"].startswith("IA-") + assert body["status"] == "planned" + + def test_list_internal_audits(self): + """GET /isms/internal-audits should list audits.""" + client.post("/isms/internal-audits", json=_internal_audit_payload(), params={"created_by": "a"}) + r = client.get("/isms/internal-audits") + assert r.status_code == 200 + assert r.json()["total"] == 1 + + def test_update_internal_audit(self): + """PUT /isms/internal-audits/{id} should update an audit.""" + create = client.post("/isms/internal-audits", json=_internal_audit_payload(), params={"created_by": "a"}) + aid = create.json()["id"] + r = client.put( + f"/isms/internal-audits/{aid}", + json={"status": "in_progress", "actual_start_date": "2026-03-01"}, + params={"updated_by": "iso@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["status"] == "in_progress" + + def test_update_internal_audit_not_found(self): + """PUT /isms/internal-audits/{id} should 404 for unknown audit.""" + r = client.put( + "/isms/internal-audits/fake", + json={"status": "in_progress"}, + params={"updated_by": "a"}, + ) + assert r.status_code == 404 + + def test_complete_internal_audit(self): + """POST /isms/internal-audits/{id}/complete should complete an audit.""" + create = client.post("/isms/internal-audits", json=_internal_audit_payload(), params={"created_by": "a"}) + aid = create.json()["id"] + r = client.post( + f"/isms/internal-audits/{aid}/complete", + json={ + "audit_conclusion": "Overall conforming with minor observations", + "overall_assessment": "conforming", + "follow_up_audit_required": False, + }, + params={"completed_by": "auditor@bp.de"}, + ) + assert r.status_code == 200 + assert r.json()["status"] == "completed" + assert r.json()["follow_up_audit_required"] is False + + def test_complete_internal_audit_not_found(self): + """POST /isms/internal-audits/{id}/complete should 404 for unknown audit.""" + r = client.post( + "/isms/internal-audits/fake/complete", + json={ + "audit_conclusion": "x", + "overall_assessment": "conforming", + "follow_up_audit_required": False, + }, + params={"completed_by": "a"}, + ) + assert r.status_code == 404 + + +# ============================================================================= +# Test: Readiness Check +# ============================================================================= + +class TestReadinessCheck: + """Tests for the ISMS Readiness Check endpoint.""" + + def test_readiness_check_empty_isms(self): + """POST /isms/readiness-check on empty DB should show not_ready.""" + r = client.post("/isms/readiness-check", json={"triggered_by": "test"}) + assert r.status_code == 200 + body = r.json() + assert body["certification_possible"] is False + assert body["overall_status"] == "not_ready" + assert len(body["potential_majors"]) > 0 + + def test_readiness_check_latest_not_found(self): + """GET /isms/readiness-check/latest should 404 when no check has run.""" + r = client.get("/isms/readiness-check/latest") + assert r.status_code == 404 + + def test_readiness_check_latest_returns_most_recent(self): + """GET /isms/readiness-check/latest should return last check.""" + client.post("/isms/readiness-check", json={"triggered_by": "first"}) + client.post("/isms/readiness-check", json={"triggered_by": "second"}) + r = client.get("/isms/readiness-check/latest") + assert r.status_code == 200 + assert r.json()["triggered_by"] == "second" + + +# ============================================================================= +# Test: Overview / Dashboard +# ============================================================================= + +class TestOverviewDashboard: + """Tests for the ISO 27001 overview endpoint.""" + + def test_overview_empty_isms(self): + """GET /isms/overview on empty DB should return not_ready.""" + r = client.get("/isms/overview") + assert r.status_code == 200 + body = r.json() + assert body["overall_status"] in ("not_ready", "at_risk") + assert body["scope_approved"] is False + assert body["open_major_findings"] == 0 + assert body["policies_count"] == 0 + + def test_overview_with_data(self): + """GET /isms/overview should reflect created data.""" + # Create and approve a scope + scope = client.post("/isms/scope", json=_scope_payload(), params={"created_by": "a"}) + client.post( + f"/isms/scope/{scope.json()['id']}/approve", + json={"approved_by": "ceo@bp.de", "effective_date": "2026-01-01", "review_date": "2027-01-01"}, + ) + # Create a policy + client.post("/isms/policies", json=_policy_payload()) + # Create an objective + client.post("/isms/objectives", json=_objective_payload(), params={"created_by": "a"}) + + r = client.get("/isms/overview") + assert r.status_code == 200 + body = r.json() + assert body["scope_approved"] is True + assert body["policies_count"] == 1 + assert body["objectives_count"] == 1 + + +# ============================================================================= +# Test: Audit Trail +# ============================================================================= + +class TestAuditTrail: + """Tests for the Audit Trail endpoint.""" + + def test_audit_trail_records_actions(self): + """Creating entities should generate audit trail entries.""" + client.post("/isms/scope", json=_scope_payload(), params={"created_by": "admin@bp.de"}) + client.post("/isms/policies", json=_policy_payload()) + r = client.get("/isms/audit-trail") + assert r.status_code == 200 + assert r.json()["total"] >= 2 + + def test_audit_trail_filter_by_entity_type(self): + """GET /isms/audit-trail?entity_type=isms_policy should filter.""" + client.post("/isms/scope", json=_scope_payload(), params={"created_by": "a"}) + client.post("/isms/policies", json=_policy_payload()) + r = client.get("/isms/audit-trail", params={"entity_type": "isms_policy"}) + assert r.status_code == 200 + for entry in r.json()["entries"]: + assert entry["entity_type"] == "isms_policy" + + def test_audit_trail_pagination(self): + """GET /isms/audit-trail should support pagination.""" + # Create several entries + for i in range(5): + client.post("/isms/policies", json=_policy_payload(f"POL-PAGI-{i:03d}")) + r = client.get("/isms/audit-trail", params={"page": 1, "page_size": 2}) + assert r.status_code == 200 + assert len(r.json()["entries"]) == 2 + assert r.json()["pagination"]["has_next"] is True diff --git a/backend-compliance/tests/test_vvt_routes.py b/backend-compliance/tests/test_vvt_routes.py index 1fe0afc..c9461b3 100644 --- a/backend-compliance/tests/test_vvt_routes.py +++ b/backend-compliance/tests/test_vvt_routes.py @@ -432,3 +432,337 @@ class TestVVTCsvExport: text = self._collect_csv_body(response) lines = text.strip().split('\n') assert len(lines) == 1 + + +# ============================================================================= +# API Endpoint Tests (TestClient + mock DB) +# ============================================================================= + +from fastapi.testclient import TestClient +from fastapi import FastAPI +from compliance.api.vvt_routes import router + +_app = FastAPI() +_app.include_router(router) +_client = TestClient(_app) + +DEFAULT_TENANT = "9282a473-5c95-4b3a-bf78-0ecc0ec71d3e" + + +def _make_db_activity(**kwargs): + """Create a mock VVTActivityDB object for query results.""" + act = VVTActivityDB() + act.id = kwargs.get("id", uuid.uuid4()) + act.tenant_id = kwargs.get("tenant_id", DEFAULT_TENANT) + act.vvt_id = kwargs.get("vvt_id", "VVT-001") + act.name = kwargs.get("name", "Test Verarbeitung") + act.description = kwargs.get("description", None) + act.purposes = kwargs.get("purposes", ["Vertragserfuellung"]) + act.legal_bases = kwargs.get("legal_bases", ["Art. 6 Abs. 1b"]) + act.data_subject_categories = kwargs.get("data_subject_categories", ["Kunden"]) + act.personal_data_categories = kwargs.get("personal_data_categories", ["Email"]) + act.recipient_categories = kwargs.get("recipient_categories", []) + act.third_country_transfers = kwargs.get("third_country_transfers", []) + act.retention_period = kwargs.get("retention_period", {"duration": "3 Jahre"}) + act.tom_description = kwargs.get("tom_description", None) + act.business_function = kwargs.get("business_function", "IT") + act.systems = kwargs.get("systems", []) + act.deployment_model = kwargs.get("deployment_model", None) + act.data_sources = kwargs.get("data_sources", []) + act.data_flows = kwargs.get("data_flows", []) + act.protection_level = kwargs.get("protection_level", "MEDIUM") + act.dpia_required = kwargs.get("dpia_required", False) + act.structured_toms = kwargs.get("structured_toms", {}) + act.status = kwargs.get("status", "DRAFT") + act.responsible = kwargs.get("responsible", None) + act.owner = kwargs.get("owner", None) + act.last_reviewed_at = kwargs.get("last_reviewed_at", None) + act.next_review_at = kwargs.get("next_review_at", None) + act.created_by = kwargs.get("created_by", "system") + act.dsfa_id = kwargs.get("dsfa_id", None) + act.created_at = kwargs.get("created_at", datetime(2026, 1, 15, 10, 0)) + act.updated_at = kwargs.get("updated_at", None) + return act + + +def _make_db_org(**kwargs): + """Create a mock VVTOrganizationDB object.""" + org = VVTOrganizationDB() + org.id = kwargs.get("id", uuid.uuid4()) + org.tenant_id = kwargs.get("tenant_id", DEFAULT_TENANT) + org.organization_name = kwargs.get("organization_name", "BreakPilot GmbH") + org.industry = kwargs.get("industry", "IT") + org.locations = kwargs.get("locations", ["Berlin"]) + org.employee_count = kwargs.get("employee_count", 50) + org.dpo_name = kwargs.get("dpo_name", "Max DSB") + org.dpo_contact = kwargs.get("dpo_contact", "dsb@example.com") + org.vvt_version = kwargs.get("vvt_version", "1.0") + org.last_review_date = kwargs.get("last_review_date", None) + org.next_review_date = kwargs.get("next_review_date", None) + org.review_interval = kwargs.get("review_interval", "annual") + org.created_at = kwargs.get("created_at", datetime(2026, 1, 1)) + org.updated_at = kwargs.get("updated_at", None) + return org + + +def _make_audit_entry(**kwargs): + """Create a mock VVTAuditLogDB object.""" + entry = VVTAuditLogDB() + entry.id = kwargs.get("id", uuid.uuid4()) + entry.tenant_id = kwargs.get("tenant_id", DEFAULT_TENANT) + entry.action = kwargs.get("action", "CREATE") + entry.entity_type = kwargs.get("entity_type", "activity") + entry.entity_id = kwargs.get("entity_id", uuid.uuid4()) + entry.changed_by = kwargs.get("changed_by", "system") + entry.old_values = kwargs.get("old_values", None) + entry.new_values = kwargs.get("new_values", {"name": "Test"}) + entry.created_at = kwargs.get("created_at", datetime(2026, 1, 15, 10, 0)) + return entry + + +@pytest.fixture +def mock_db(): + from classroom_engine.database import get_db + from compliance.api.tenant_utils import get_tenant_id + db = MagicMock() + _app.dependency_overrides[get_db] = lambda: db + _app.dependency_overrides[get_tenant_id] = lambda: DEFAULT_TENANT + yield db + _app.dependency_overrides.clear() + + +class TestExportEndpoint: + """Tests for GET /vvt/export (JSON and CSV).""" + + def test_export_json_with_activities(self, mock_db): + act = _make_db_activity(vvt_id="VVT-EXP-001", name="Export Test") + org = _make_db_org() + # mock chained query for org + mock_db.query.return_value.filter.return_value.order_by.return_value.first.return_value = org + # mock chained query for activities + mock_db.query.return_value.filter.return_value.order_by.return_value.all.return_value = [act] + + resp = _client.get("/vvt/export?format=json") + assert resp.status_code == 200 + data = resp.json() + assert "exported_at" in data + assert "organization" in data + assert data["organization"]["name"] == "BreakPilot GmbH" + assert len(data["activities"]) == 1 + assert data["activities"][0]["vvt_id"] == "VVT-EXP-001" + + def test_export_json_empty_dataset(self, mock_db): + mock_db.query.return_value.filter.return_value.order_by.return_value.first.return_value = None + mock_db.query.return_value.filter.return_value.order_by.return_value.all.return_value = [] + + resp = _client.get("/vvt/export?format=json") + assert resp.status_code == 200 + data = resp.json() + assert data["organization"] is None + assert data["activities"] == [] + + def test_export_csv_returns_streaming_response(self, mock_db): + act = _make_db_activity(vvt_id="VVT-CSV-E01", name="CSV Endpoint Test") + mock_db.query.return_value.filter.return_value.order_by.return_value.first.return_value = None + mock_db.query.return_value.filter.return_value.order_by.return_value.all.return_value = [act] + + resp = _client.get("/vvt/export?format=csv") + assert resp.status_code == 200 + assert "text/csv" in resp.headers.get("content-type", "") + assert "attachment" in resp.headers.get("content-disposition", "") + body = resp.text + assert "VVT-CSV-E01" in body + assert "CSV Endpoint Test" in body + + def test_export_invalid_format_rejected(self, mock_db): + resp = _client.get("/vvt/export?format=xml") + assert resp.status_code == 422 # validation error + + +class TestStatsEndpoint: + """Tests for GET /vvt/stats.""" + + def test_stats_empty_tenant(self, mock_db): + mock_db.query.return_value.filter.return_value.all.return_value = [] + + resp = _client.get("/vvt/stats") + assert resp.status_code == 200 + data = resp.json() + assert data["total"] == 0 + assert data["by_status"] == {} + assert data["dpia_required_count"] == 0 + assert data["overdue_review_count"] == 0 + + def test_stats_with_activities(self, mock_db): + past = datetime(2025, 1, 1, tzinfo=timezone.utc) + acts = [ + _make_db_activity(status="DRAFT", business_function="HR", dpia_required=True, next_review_at=past), + _make_db_activity(status="APPROVED", business_function="IT", dpia_required=False), + _make_db_activity(status="DRAFT", business_function="HR", dpia_required=False, third_country_transfers=["USA"]), + ] + mock_db.query.return_value.filter.return_value.all.return_value = acts + + resp = _client.get("/vvt/stats") + assert resp.status_code == 200 + data = resp.json() + assert data["total"] == 3 + assert data["by_status"]["DRAFT"] == 2 + assert data["by_status"]["APPROVED"] == 1 + assert data["by_business_function"]["HR"] == 2 + assert data["by_business_function"]["IT"] == 1 + assert data["dpia_required_count"] == 1 + assert data["third_country_count"] == 1 + assert data["draft_count"] == 2 + assert data["approved_count"] == 1 + assert data["overdue_review_count"] == 1 + + +class TestAuditLogEndpoint: + """Tests for GET /vvt/audit-log.""" + + def test_audit_log_returns_entries(self, mock_db): + entry = _make_audit_entry(action="CREATE", entity_type="activity") + mock_db.query.return_value.filter.return_value.order_by.return_value.offset.return_value.limit.return_value.all.return_value = [entry] + + resp = _client.get("/vvt/audit-log") + assert resp.status_code == 200 + data = resp.json() + assert len(data) == 1 + assert data[0]["action"] == "CREATE" + assert data[0]["entity_type"] == "activity" + + def test_audit_log_empty(self, mock_db): + mock_db.query.return_value.filter.return_value.order_by.return_value.offset.return_value.limit.return_value.all.return_value = [] + + resp = _client.get("/vvt/audit-log") + assert resp.status_code == 200 + assert resp.json() == [] + + def test_audit_log_pagination_params(self, mock_db): + mock_db.query.return_value.filter.return_value.order_by.return_value.offset.return_value.limit.return_value.all.return_value = [] + + resp = _client.get("/vvt/audit-log?limit=10&offset=20") + assert resp.status_code == 200 + + +class TestVersioningEndpoints: + """Tests for GET /vvt/activities/{id}/versions and /versions/{v}.""" + + @patch("compliance.api.versioning_utils.list_versions") + def test_list_versions_returns_list(self, mock_list_versions, mock_db): + act_id = str(uuid.uuid4()) + mock_list_versions.return_value = [ + {"id": str(uuid.uuid4()), "version_number": 2, "status": "draft", + "change_summary": "Updated name", "changed_sections": [], + "created_by": "admin", "approved_by": None, "approved_at": None, + "created_at": "2026-01-15T10:00:00"}, + {"id": str(uuid.uuid4()), "version_number": 1, "status": "draft", + "change_summary": "Initial", "changed_sections": [], + "created_by": "system", "approved_by": None, "approved_at": None, + "created_at": "2026-01-14T09:00:00"}, + ] + + resp = _client.get(f"/vvt/activities/{act_id}/versions") + assert resp.status_code == 200 + data = resp.json() + assert len(data) == 2 + assert data[0]["version_number"] == 2 + assert data[1]["version_number"] == 1 + + @patch("compliance.api.versioning_utils.list_versions") + def test_list_versions_empty(self, mock_list_versions, mock_db): + act_id = str(uuid.uuid4()) + mock_list_versions.return_value = [] + + resp = _client.get(f"/vvt/activities/{act_id}/versions") + assert resp.status_code == 200 + assert resp.json() == [] + + @patch("compliance.api.versioning_utils.get_version") + def test_get_specific_version(self, mock_get_version, mock_db): + act_id = str(uuid.uuid4()) + mock_get_version.return_value = { + "id": str(uuid.uuid4()), + "version_number": 1, + "status": "approved", + "snapshot": {"name": "Test", "status": "APPROVED"}, + "change_summary": "Initial version", + "changed_sections": ["name", "status"], + "created_by": "admin", + "approved_by": "dpo", + "approved_at": "2026-01-16T12:00:00", + "created_at": "2026-01-15T10:00:00", + } + + resp = _client.get(f"/vvt/activities/{act_id}/versions/1") + assert resp.status_code == 200 + data = resp.json() + assert data["version_number"] == 1 + assert data["snapshot"]["name"] == "Test" + assert data["approved_by"] == "dpo" + + @patch("compliance.api.versioning_utils.get_version") + def test_get_version_not_found(self, mock_get_version, mock_db): + act_id = str(uuid.uuid4()) + mock_get_version.return_value = None + + resp = _client.get(f"/vvt/activities/{act_id}/versions/999") + assert resp.status_code == 404 + assert "not found" in resp.json()["detail"].lower() + + +class TestExportCsvEdgeCases: + """Additional edge cases for CSV export helper.""" + + def _collect_csv_body(self, response) -> str: + import asyncio + async def _read(): + chunks = [] + async for chunk in response.body_iterator: + chunks.append(chunk) + return ''.join(chunks) + return asyncio.get_event_loop().run_until_complete(_read()) + + def test_export_csv_with_third_country_transfers(self): + from compliance.api.vvt_routes import _export_csv + act = _make_db_activity( + third_country_transfers=["USA", "China"], + vvt_id="VVT-TC-001", + name="Third Country Test", + ) + response = _export_csv([act]) + text = self._collect_csv_body(response) + assert "Ja" in text # third_country_transfers truthy -> "Ja" + + def test_export_csv_no_third_country_transfers(self): + from compliance.api.vvt_routes import _export_csv + act = _make_db_activity( + third_country_transfers=[], + vvt_id="VVT-NTC-001", + name="No Third Country", + ) + response = _export_csv([act]) + text = self._collect_csv_body(response) + assert "Nein" in text # empty list -> "Nein" + + def test_export_csv_multiple_activities(self): + from compliance.api.vvt_routes import _export_csv + acts = [ + _make_db_activity(vvt_id="VVT-M-001", name="First"), + _make_db_activity(vvt_id="VVT-M-002", name="Second"), + _make_db_activity(vvt_id="VVT-M-003", name="Third"), + ] + response = _export_csv(acts) + text = self._collect_csv_body(response) + lines = text.strip().split('\n') + # 1 header + 3 data rows + assert len(lines) == 4 + assert "VVT-M-001" in lines[1] + assert "VVT-M-002" in lines[2] + assert "VVT-M-003" in lines[3] + + def test_export_csv_content_disposition_filename(self): + from compliance.api.vvt_routes import _export_csv + response = _export_csv([]) + assert "vvt_export_" in response.headers.get("content-disposition", "") + assert ".csv" in response.headers.get("content-disposition", "") diff --git a/pca-platform/README.md b/pca-platform/README.md deleted file mode 100644 index 38375c4..0000000 --- a/pca-platform/README.md +++ /dev/null @@ -1,243 +0,0 @@ -# PCA Platform - Person-Corporate-Agent - -Plattform zur Monetarisierung von KI-Crawler-Zugriffen und Human-vs-Bot-Erkennung. - -## Übersicht - -Die PCA Platform ermöglicht Website-Betreibern: -1. **Bot-Erkennung**: Unterscheidung zwischen Menschen und Bots durch Verhaltensheuristiken -2. **Step-Up-Verification**: WebAuthn oder Proof-of-Work für verdächtige Besucher -3. **Monetarisierung**: KI-Crawler können gegen Micropayment Zugriff erhalten (HTTP 402) - -## Architektur - -``` -┌────────────────────┐ ┌────────────────────┐ ┌──────────────────┐ -│ Website │────▶│ PCA Heuristic │────▶│ Redis │ -│ + PCA SDK │ │ Service │ │ Session Store │ -└────────────────────┘ └────────────────────┘ └──────────────────┘ - │ │ - │ ▼ - │ ┌────────────────────┐ - │ │ Payment Gateway │ (Future) - │ │ HTTP 402 │ - │ └────────────────────┘ - │ - ▼ -┌────────────────────┐ -│ ai-access.json │ -│ Policy Config │ -└────────────────────┘ -``` - -## Komponenten - -### 1. Heuristic Service (Go) -- Port: 8085 -- Berechnet Human-Score basierend auf Verhaltensmetriken -- Verwaltet Step-Up-Verifikation (WebAuthn, PoW) - -### 2. JavaScript SDK -- Sammelt Verhaltensmetriken (Scroll, Mouse, Clicks) -- Sendet Ticks an Backend -- Führt Step-Up bei Bedarf durch - -### 3. ai-access.json -- Policy-Datei für Zugriffsregeln -- Definiert Preise pro Rolle/Bot -- Konfiguriert Schwellenwerte - -## Quick Start - -```bash -cd pca-platform -docker compose up -d -``` - -Services: -- Heuristic Service: http://localhost:8085 -- Demo Site: http://localhost:8087 -- Redis: localhost:6380 - -## API Endpoints - -### Heuristic Service - -| Method | Endpoint | Beschreibung | -|--------|----------|--------------| -| GET | `/health` | Health Check | -| GET | `/pca/v1/config` | Client Config | -| POST | `/pca/v1/tick` | Metrics empfangen | -| GET | `/pca/v1/evaluate` | Score auswerten | -| GET | `/pca/v1/webauthn-challenge` | WebAuthn Challenge | -| POST | `/pca/v1/webauthn-verify` | WebAuthn verifizieren | -| GET | `/pca/v1/pow-challenge` | PoW Challenge | -| POST | `/pca/v1/pow-verify` | PoW verifizieren | - -### Tick Request - -```json -{ - "session_id": "pca_xxx", - "dwell_ratio": 0.85, - "scroll_depth": 45.0, - "clicks": 5, - "mouse_moves": 120, - "ts": 1702828800000 -} -``` - -### Tick Response - -```json -{ - "session_id": "pca_xxx", - "score": 0.72, - "action": "allow", - "message": "Human behavior detected" -} -``` - -## ai-access.json Konfiguration - -```json -{ - "thresholds": { - "score_pass": 0.7, - "score_challenge": 0.4 - }, - "weights": { - "dwell_ratio": 0.30, - "scroll_score": 0.25, - "pointer_variance": 0.20, - "click_rate": 0.25 - }, - "step_up": { - "methods": ["webauthn", "pow"], - "primary": "webauthn" - }, - "pca_roles": { - "Person": { "access": "allow", "price": null }, - "Agent": { "access": "charge", "price": "0.001 EUR" } - } -} -``` - -## SDK Integration - -### Vanilla JavaScript - -```html - - -``` - -### React - -```jsx -import { useEffect, useState } from 'react'; - -function ProtectedContent() { - const [verified, setVerified] = useState(false); - - useEffect(() => { - PCA.init(config); - PCA.onScoreUpdate(async (score, action) => { - if (score >= 0.7) { - setVerified(true); - } else if (action === 'challenge') { - const success = await PCA.triggerStepUp(); - if (success) setVerified(true); - } - }); - }, []); - - if (!verified) return

Verifying...

; - return
Protected Content
; -} -``` - -## Heuristiken - -| Metrik | Gewicht | Beschreibung | -|--------|---------|--------------| -| `dwell_ratio` | 30% | Sichtbare Verweildauer / Gesamtzeit | -| `scroll_score` | 25% | Maximale Scrolltiefe (0-100%) | -| `pointer_variance` | 20% | Mausbewegungsmuster (Varianz) | -| `click_rate` | 25% | Klicks pro Sekunde + Intervall-Varianz | - -### Score-Interpretation - -| Score | Bedeutung | Aktion | -|-------|-----------|--------| -| ≥0.7 | Wahrscheinlich Mensch | Allow | -| 0.4-0.7 | Unsicher | Optional Challenge | -| <0.4 | Wahrscheinlich Bot | Challenge erforderlich | - -## Step-Up Methoden - -### WebAuthn -- Biometrische Authentifizierung (FaceID, TouchID) -- Hardware Security Keys -- Höchste Sicherheit - -### Proof-of-Work -- Client löst SHA-256 Puzzle -- Kein User-Input nötig -- Bots werden gebremst - -## GDPR Compliance - -Die Plattform ist GDPR-konform: -- ✅ Keine personenbezogenen Daten -- ✅ Keine Cookies -- ✅ IP-Anonymisierung möglich -- ✅ Nur aggregierte Metriken - -## Entwicklung - -### Tests ausführen - -```bash -cd heuristic-service -go test -v ./... -``` - -### Service lokal starten - -```bash -cd heuristic-service -go run ./cmd/server -``` - -## Roadmap - -- [ ] Payment Gateway (HTTP 402) -- [ ] Stablecoin Integration (USDC, EURC) -- [ ] Lightning Network Support -- [ ] Publisher Dashboard -- [ ] Agent SDK für KI-Crawler -- [ ] WordPress Plugin -- [ ] Nginx Module - -## Integration mit BreakPilot - -Die PCA Platform kann in BreakPilot integriert werden: - -1. **Admin-Bereich schützen**: Bot-Schutz für Consent-Management -2. **API monetarisieren**: EduSearch-Daten gegen Zahlung verfügbar machen -3. **Legal Crawler**: Als zahlender Agent auf andere Seiten zugreifen - -## Lizenz - -MIT License - Kommerziell nutzbar diff --git a/pca-platform/ai-access.json b/pca-platform/ai-access.json deleted file mode 100644 index 145cfc2..0000000 --- a/pca-platform/ai-access.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "thresholds": { - "score_pass": 0.7, - "score_challenge": 0.4 - }, - "weights": { - "dwell_ratio": 0.30, - "scroll_score": 0.25, - "pointer_variance": 0.20, - "click_rate": 0.25 - }, - "step_up": { - "methods": ["webauthn", "pow"], - "primary": "webauthn", - "webauthn": { - "enabled": true, - "userVerification": "preferred", - "timeout_ms": 60000, - "challenge_endpoint": "/pca/v1/webauthn-challenge" - }, - "pow": { - "enabled": true, - "difficulty": 4, - "max_duration_ms": 5000 - } - }, - "tick": { - "endpoint": "/pca/v1/tick", - "interval_ms": 5000 - }, - "paths": { - "/api/*": { - "min_score": 0.7, - "step_up_method": "webauthn" - }, - "/admin/*": { - "min_score": 0.8, - "step_up_method": "webauthn" - }, - "/public/*": { - "min_score": 0.0, - "step_up_method": null - }, - "default": { - "min_score": 0.4, - "step_up_method": "pow" - } - }, - "pca_roles": { - "Person": { - "description": "Verified human visitor", - "access": "allow", - "price": null - }, - "Corporate": { - "description": "Verified business entity", - "access": "allow", - "price": null - }, - "Agent": { - "description": "AI/Bot agent", - "access": "charge", - "price": { - "amount": "0.001", - "currency": "EUR", - "per": "request" - } - } - }, - "payment": { - "enabled": true, - "methods": ["EURC", "USDC", "Lightning"], - "wallet_address": null, - "min_balance": "0.01" - }, - "compliance": { - "gdpr": true, - "anonymize_ip": true, - "no_cookies": true, - "no_pii": true - } -} diff --git a/pca-platform/demo/index.html b/pca-platform/demo/index.html deleted file mode 100644 index 0c3b349..0000000 --- a/pca-platform/demo/index.html +++ /dev/null @@ -1,444 +0,0 @@ - - - - - - PCA Platform Demo - Human vs Bot Detection - - - -
-
-

PCA Platform Demo

-

Person - Corporate - Agent | Human vs Bot Detection

-
- -
-
-
- 0.00 - Human Score -
-
-

Status: Initializing...

-

Collecting behavioral data...

-
-
-
0%
-
Dwell Time
-
-
-
0%
-
Scroll Depth
-
-
-
0
-
Clicks
-
-
-
0
-
Mouse Moves
-
-
-
-
-
- -
-

How It Works

-

- The PCA SDK analyzes your browsing behavior to distinguish humans from bots. - It tracks metrics like scroll depth, mouse movements, click patterns, and dwell time - - all without collecting personal information. -

-

- Scroll down, move your mouse, and click around - to increase your human score. Once you reach a score of 0.7+, you'll be recognized as human. -

-
- -
-

Test the SDK

-
- - - -
-
- -
-

Protected Content

-

This content is protected and requires a human score of 0.7 or higher to access:

-
-

Content locked. Increase your score to unlock.

-
-
- -
-

More Content (Scroll Test)

-

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.

-

Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.

-

Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.

-

Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

-
- -
-

Event Log

-
-
[--:--:--] SDK initializing...
-
-
- -
-

PCA Platform v0.1.0 | GDPR Compliant | No PII Collected

-
-
- - - - - - diff --git a/pca-platform/docker-compose.yml b/pca-platform/docker-compose.yml deleted file mode 100644 index 25f1cab..0000000 --- a/pca-platform/docker-compose.yml +++ /dev/null @@ -1,81 +0,0 @@ -version: '3.8' - -services: - # Heuristic Service - Human vs Bot detection - heuristic-service: - build: - context: ./heuristic-service - dockerfile: Dockerfile - container_name: pca-heuristic-service - ports: - - "8085:8085" - environment: - - PORT=8085 - - GIN_MODE=release - - CONFIG_PATH=/app/ai-access.json - - REDIS_URL=redis://redis:6379 - volumes: - - ./ai-access.json:/app/ai-access.json:ro - depends_on: - - redis - networks: - - pca-network - healthcheck: - test: ["CMD", "wget", "-q", "--spider", "http://localhost:8085/health"] - interval: 30s - timeout: 10s - retries: 3 - - # Payment Gateway - HTTP 402 Handler (future) - # payment-gateway: - # build: - # context: ./payment-gateway - # dockerfile: Dockerfile - # container_name: pca-payment-gateway - # ports: - # - "8086:8086" - # environment: - # - PORT=8086 - # - HEURISTIC_SERVICE_URL=http://heuristic-service:8085 - # depends_on: - # - heuristic-service - # networks: - # - pca-network - - # Redis for session storage - redis: - image: redis:7-alpine - container_name: pca-redis - ports: - - "6380:6379" - volumes: - - pca-redis-data:/data - networks: - - pca-network - healthcheck: - test: ["CMD", "redis-cli", "ping"] - interval: 10s - timeout: 5s - retries: 3 - - # Demo website to test the SDK - demo-site: - image: nginx:alpine - container_name: pca-demo-site - ports: - - "8087:80" - volumes: - - ./demo:/usr/share/nginx/html:ro - - ./sdk/js/src:/usr/share/nginx/html/sdk:ro - - ./ai-access.json:/usr/share/nginx/html/ai-access.json:ro - depends_on: - - heuristic-service - networks: - - pca-network - -networks: - pca-network: - driver: bridge - -volumes: - pca-redis-data: diff --git a/pca-platform/heuristic-service/Dockerfile b/pca-platform/heuristic-service/Dockerfile deleted file mode 100644 index 9f53698..0000000 --- a/pca-platform/heuristic-service/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -# Build stage -FROM golang:1.21-alpine AS builder - -WORKDIR /app - -# Install dependencies -RUN apk add --no-cache git - -# Copy go mod files -COPY go.mod ./ - -# Initialize module and download dependencies -RUN go mod tidy || true - -# Copy source code -COPY . . - -# Build the binary -RUN CGO_ENABLED=0 GOOS=linux go build -o /heuristic-service ./cmd/server - -# Runtime stage -FROM alpine:3.19 - -WORKDIR /app - -# Install ca-certificates for HTTPS -RUN apk add --no-cache ca-certificates wget - -# Copy binary from builder -COPY --from=builder /heuristic-service /app/heuristic-service - -# Expose port -EXPOSE 8085 - -# Set environment variables -ENV PORT=8085 -ENV GIN_MODE=release -ENV CONFIG_PATH=/app/ai-access.json - -# Run the service -CMD ["/app/heuristic-service"] diff --git a/pca-platform/heuristic-service/cmd/server/main.go b/pca-platform/heuristic-service/cmd/server/main.go deleted file mode 100644 index 35221fb..0000000 --- a/pca-platform/heuristic-service/cmd/server/main.go +++ /dev/null @@ -1,84 +0,0 @@ -package main - -import ( - "log" - "os" - - "github.com/gin-gonic/gin" - - "github.com/breakpilot/pca-platform/heuristic-service/internal/api" - "github.com/breakpilot/pca-platform/heuristic-service/internal/config" -) - -func main() { - // Load configuration - configPath := os.Getenv("CONFIG_PATH") - if configPath == "" { - configPath = "ai-access.json" - } - - cfg, err := config.LoadFromFile(configPath) - if err != nil { - log.Printf("Warning: Could not load config from %s, using defaults: %v", configPath, err) - cfg = config.DefaultConfig() - } - - // Create handler - handler := api.NewHandler(cfg) - - // Start cleanup routine - handler.StartCleanupRoutine() - - // Setup Gin router - if os.Getenv("GIN_MODE") == "" { - gin.SetMode(gin.ReleaseMode) - } - - r := gin.Default() - - // Enable CORS - r.Use(func(c *gin.Context) { - c.Header("Access-Control-Allow-Origin", "*") - c.Header("Access-Control-Allow-Methods", "GET, POST, OPTIONS") - c.Header("Access-Control-Allow-Headers", "Content-Type, Authorization, X-PCA-Session") - if c.Request.Method == "OPTIONS" { - c.AbortWithStatus(204) - return - } - c.Next() - }) - - // Health endpoint - r.GET("/health", handler.HandleHealth) - - // PCA API v1 - v1 := r.Group("/pca/v1") - { - // Configuration endpoint (for client SDK) - v1.GET("/config", handler.HandleGetConfig) - - // Tick endpoint (receives behavioral metrics) - v1.POST("/tick", handler.HandleTick) - - // Evaluation endpoint - v1.GET("/evaluate", handler.HandleEvaluate) - - // WebAuthn step-up - v1.GET("/webauthn-challenge", handler.HandleWebAuthnChallenge) - v1.POST("/webauthn-verify", handler.HandleWebAuthnVerify) - - // Proof-of-Work step-up - v1.GET("/pow-challenge", handler.HandlePoWChallenge) - v1.POST("/pow-verify", handler.HandlePoWVerify) - } - - // Start server - port := cfg.Port - log.Printf("PCA Heuristic Service starting on port %s", port) - log.Printf("Thresholds: pass=%.2f, challenge=%.2f", cfg.Thresholds.ScorePass, cfg.Thresholds.ScoreChallenge) - log.Printf("Step-up methods: %v (primary: %s)", cfg.StepUp.Methods, cfg.StepUp.Primary) - - if err := r.Run(":" + port); err != nil { - log.Fatalf("Failed to start server: %v", err) - } -} diff --git a/pca-platform/heuristic-service/go.mod b/pca-platform/heuristic-service/go.mod deleted file mode 100644 index b3a96ca..0000000 --- a/pca-platform/heuristic-service/go.mod +++ /dev/null @@ -1,36 +0,0 @@ -module github.com/breakpilot/pca-platform/heuristic-service - -go 1.21 - -require ( - github.com/gin-gonic/gin v1.9.1 - github.com/google/uuid v1.5.0 -) - -require ( - github.com/bytedance/sonic v1.9.1 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect - github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-playground/locales v0.14.1 // indirect - github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.14.0 // indirect - github.com/goccy/go-json v0.10.2 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect - github.com/leodido/go-urn v1.2.4 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect - github.com/stretchr/testify v1.8.4 // indirect - github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/ugorji/go/codec v1.2.11 // indirect - golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/pca-platform/heuristic-service/go.sum b/pca-platform/heuristic-service/go.sum deleted file mode 100644 index 391bbee..0000000 --- a/pca-platform/heuristic-service/go.sum +++ /dev/null @@ -1,89 +0,0 @@ -github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= -github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= -github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= -github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= -github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= -github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= -github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= -github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= -github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= -github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/pca-platform/heuristic-service/internal/api/handlers.go b/pca-platform/heuristic-service/internal/api/handlers.go deleted file mode 100644 index 0582231..0000000 --- a/pca-platform/heuristic-service/internal/api/handlers.go +++ /dev/null @@ -1,285 +0,0 @@ -package api - -import ( - "net/http" - "time" - - "github.com/gin-gonic/gin" - "github.com/google/uuid" - - "github.com/breakpilot/pca-platform/heuristic-service/internal/config" - "github.com/breakpilot/pca-platform/heuristic-service/internal/heuristics" - "github.com/breakpilot/pca-platform/heuristic-service/internal/stepup" -) - -// Handler holds all API handlers -type Handler struct { - config *config.Config - scorer *heuristics.Scorer - webauthn *stepup.WebAuthnService - pow *stepup.PoWService -} - -// NewHandler creates a new API handler -func NewHandler(cfg *config.Config) *Handler { - return &Handler{ - config: cfg, - scorer: heuristics.NewScorer(cfg), - webauthn: stepup.NewWebAuthnService(&cfg.StepUp.WebAuthn), - pow: stepup.NewPoWService(&cfg.StepUp.PoW), - } -} - -// TickRequest represents metrics sent from client SDK -type TickRequest struct { - SessionID string `json:"session_id"` - Score float64 `json:"score,omitempty"` - DwellRatio float64 `json:"dwell_ratio"` - ScrollDepth float64 `json:"scroll_depth"` - Clicks int `json:"clicks"` - MouseMoves int `json:"mouse_moves"` - KeyStrokes int `json:"key_strokes,omitempty"` - TouchEvents int `json:"touch_events,omitempty"` - MouseVelocities []float64 `json:"mouse_velocities,omitempty"` - ScrollVelocities []float64 `json:"scroll_velocities,omitempty"` - ClickIntervals []float64 `json:"click_intervals,omitempty"` - Timestamp int64 `json:"ts"` -} - -// TickResponse returns the computed score and action -type TickResponse struct { - SessionID string `json:"session_id"` - Score float64 `json:"score"` - Action string `json:"action"` - StepUpMethod string `json:"step_up_method,omitempty"` - Message string `json:"message,omitempty"` -} - -// HandleTick receives tick data from client SDK -func (h *Handler) HandleTick(c *gin.Context) { - var req TickRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request body"}) - return - } - - // Generate session ID if not provided - if req.SessionID == "" { - req.SessionID = uuid.New().String() - } - - // Get or create session - session := h.scorer.GetOrCreateSession(req.SessionID) - - // Update metrics - totalTime := time.Since(session.StartTime).Seconds() - session.VisibleTime = req.DwellRatio * totalTime - session.MaxScrollPercent = req.ScrollDepth / 100.0 // Convert from percentage - session.ClickCount = req.Clicks - session.MouseMoves = req.MouseMoves - session.KeyStrokes = req.KeyStrokes - session.TouchEvents = req.TouchEvents - - if len(req.MouseVelocities) > 0 { - session.MouseVelocities = append(session.MouseVelocities, req.MouseVelocities...) - } - if len(req.ScrollVelocities) > 0 { - session.ScrollVelocities = append(session.ScrollVelocities, req.ScrollVelocities...) - } - if len(req.ClickIntervals) > 0 { - session.ClickIntervals = append(session.ClickIntervals, req.ClickIntervals...) - } - - // Calculate score - score := h.scorer.CalculateScore(req.SessionID) - - // Determine action - var action, stepUpMethod, message string - if score >= h.config.Thresholds.ScorePass { - action = "allow" - message = "Human behavior detected" - } else if score >= h.config.Thresholds.ScoreChallenge { - action = "allow" - message = "Acceptable behavior" - } else { - action = "challenge" - stepUpMethod = h.config.StepUp.Primary - message = "Additional verification required" - } - - c.JSON(http.StatusOK, TickResponse{ - SessionID: req.SessionID, - Score: score, - Action: action, - StepUpMethod: stepUpMethod, - Message: message, - }) -} - -// HandleEvaluate evaluates a session for a specific path -func (h *Handler) HandleEvaluate(c *gin.Context) { - sessionID := c.Query("session_id") - path := c.Query("path") - - if sessionID == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "session_id required"}) - return - } - - if path == "" { - path = "default" - } - - // TODO: Load path configs from ai-access.json - pathConfigs := map[string]config.PathConfig{} - - result := h.scorer.EvaluateRequest(sessionID, path, pathConfigs) - - c.JSON(http.StatusOK, result) -} - -// HandleWebAuthnChallenge creates a WebAuthn challenge -func (h *Handler) HandleWebAuthnChallenge(c *gin.Context) { - if !h.webauthn.IsEnabled() { - c.JSON(http.StatusServiceUnavailable, gin.H{"error": "WebAuthn not enabled"}) - return - } - - sessionID := c.Query("session_id") - if sessionID == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "session_id required"}) - return - } - - challenge, err := h.webauthn.CreateChallenge(sessionID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create challenge"}) - return - } - - c.JSON(http.StatusOK, challenge) -} - -// HandleWebAuthnVerify verifies a WebAuthn assertion -func (h *Handler) HandleWebAuthnVerify(c *gin.Context) { - if !h.webauthn.IsEnabled() { - c.JSON(http.StatusServiceUnavailable, gin.H{"error": "WebAuthn not enabled"}) - return - } - - var req stepup.VerifyRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request body"}) - return - } - - verified, err := h.webauthn.VerifyChallenge(&req) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Verification failed"}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "verified": verified, - "session_id": req.SessionID, - }) -} - -// HandlePoWChallenge creates a Proof-of-Work challenge -func (h *Handler) HandlePoWChallenge(c *gin.Context) { - if !h.pow.IsEnabled() { - c.JSON(http.StatusServiceUnavailable, gin.H{"error": "PoW not enabled"}) - return - } - - sessionID := c.Query("session_id") - if sessionID == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "session_id required"}) - return - } - - challenge, err := h.pow.CreateChallenge(sessionID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create challenge"}) - return - } - - c.JSON(http.StatusOK, challenge) -} - -// HandlePoWVerify verifies a Proof-of-Work solution -func (h *Handler) HandlePoWVerify(c *gin.Context) { - if !h.pow.IsEnabled() { - c.JSON(http.StatusServiceUnavailable, gin.H{"error": "PoW not enabled"}) - return - } - - var req stepup.PoWVerifyRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request body"}) - return - } - - verified, err := h.pow.VerifyChallenge(&req) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Verification failed"}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "verified": verified, - "session_id": req.SessionID, - }) -} - -// HandleGetConfig returns client-safe configuration -func (h *Handler) HandleGetConfig(c *gin.Context) { - // Return only non-sensitive config for client SDK - clientConfig := gin.H{ - "thresholds": h.config.Thresholds, - "weights": h.config.Weights, - "tick": gin.H{ - "endpoint": h.config.Tick.Endpoint, - "interval_ms": h.config.Tick.IntervalMs, - }, - "step_up": gin.H{ - "methods": h.config.StepUp.Methods, - "primary": h.config.StepUp.Primary, - "webauthn": gin.H{ - "enabled": h.config.StepUp.WebAuthn.Enabled, - "userVerification": h.config.StepUp.WebAuthn.UserVerification, - "timeout_ms": h.config.StepUp.WebAuthn.TimeoutMs, - "challenge_endpoint": h.config.StepUp.WebAuthn.ChallengeEndpoint, - }, - "pow": gin.H{ - "enabled": h.config.StepUp.PoW.Enabled, - "difficulty": h.config.StepUp.PoW.Difficulty, - "max_duration_ms": h.config.StepUp.PoW.MaxDurationMs, - }, - }, - "compliance": h.config.Compliance, - } - - c.JSON(http.StatusOK, clientConfig) -} - -// HandleHealth returns service health -func (h *Handler) HandleHealth(c *gin.Context) { - c.JSON(http.StatusOK, gin.H{ - "status": "healthy", - "service": "pca-heuristic-service", - "version": "0.1.0", - }) -} - -// StartCleanupRoutine starts background cleanup -func (h *Handler) StartCleanupRoutine() { - go func() { - ticker := time.NewTicker(5 * time.Minute) - for range ticker.C { - h.scorer.CleanupOldSessions(30 * time.Minute) - h.webauthn.CleanupExpiredChallenges() - h.pow.CleanupExpiredChallenges() - } - }() -} diff --git a/pca-platform/heuristic-service/internal/config/config.go b/pca-platform/heuristic-service/internal/config/config.go deleted file mode 100644 index 27e4f44..0000000 --- a/pca-platform/heuristic-service/internal/config/config.go +++ /dev/null @@ -1,151 +0,0 @@ -package config - -import ( - "encoding/json" - "os" -) - -// Config holds the heuristic service configuration -type Config struct { - Port string `json:"port"` - RedisURL string `json:"redis_url"` - JWTSecret string `json:"jwt_secret"` - - // Heuristic thresholds - Thresholds ThresholdConfig `json:"thresholds"` - - // Heuristic weights - Weights WeightConfig `json:"weights"` - - // Step-up configuration - StepUp StepUpConfig `json:"step_up"` - - // Tick configuration - Tick TickConfig `json:"tick"` - - // Compliance settings - Compliance ComplianceConfig `json:"compliance"` -} - -// ThresholdConfig defines score thresholds -type ThresholdConfig struct { - ScorePass float64 `json:"score_pass"` // Score to pass without step-up (e.g., 0.7) - ScoreChallenge float64 `json:"score_challenge"` // Score below which step-up is required (e.g., 0.4) -} - -// WeightConfig defines weights for each heuristic -type WeightConfig struct { - DwellRatio float64 `json:"dwell_ratio"` // Weight for dwell time ratio - ScrollScore float64 `json:"scroll_score"` // Weight for scroll depth - PointerVariance float64 `json:"pointer_variance"` // Weight for mouse movement patterns - ClickRate float64 `json:"click_rate"` // Weight for click interactions -} - -// StepUpConfig defines step-up verification methods -type StepUpConfig struct { - Methods []string `json:"methods"` // ["webauthn", "pow"] - Primary string `json:"primary"` // Preferred method - WebAuthn WebAuthnConfig `json:"webauthn"` - PoW PoWConfig `json:"pow"` -} - -// WebAuthnConfig for WebAuthn step-up -type WebAuthnConfig struct { - Enabled bool `json:"enabled"` - UserVerification string `json:"userVerification"` // "preferred", "required", "discouraged" - TimeoutMs int `json:"timeout_ms"` - ChallengeEndpoint string `json:"challenge_endpoint"` -} - -// PoWConfig for Proof-of-Work step-up -type PoWConfig struct { - Enabled bool `json:"enabled"` - Difficulty int `json:"difficulty"` // Number of leading zero bits required - MaxDurationMs int `json:"max_duration_ms"` // Max time for PoW computation -} - -// TickConfig for periodic tick submissions -type TickConfig struct { - Endpoint string `json:"endpoint"` - IntervalMs int `json:"interval_ms"` -} - -// ComplianceConfig for privacy compliance -type ComplianceConfig struct { - GDPR bool `json:"gdpr"` - AnonymizeIP bool `json:"anonymize_ip"` - NoCookies bool `json:"no_cookies"` - NoPII bool `json:"no_pii"` -} - -// PathConfig for path-specific rules -type PathConfig struct { - MinScore float64 `json:"min_score"` - StepUpMethod *string `json:"step_up_method"` // nil means no step-up -} - -// DefaultConfig returns a default configuration -func DefaultConfig() *Config { - return &Config{ - Port: getEnv("PORT", "8085"), - RedisURL: getEnv("REDIS_URL", "redis://localhost:6379"), - JWTSecret: getEnv("JWT_SECRET", "pca-secret-change-me"), - Thresholds: ThresholdConfig{ - ScorePass: 0.7, - ScoreChallenge: 0.4, - }, - Weights: WeightConfig{ - DwellRatio: 0.30, - ScrollScore: 0.25, - PointerVariance: 0.20, - ClickRate: 0.25, - }, - StepUp: StepUpConfig{ - Methods: []string{"webauthn", "pow"}, - Primary: "webauthn", - WebAuthn: WebAuthnConfig{ - Enabled: true, - UserVerification: "preferred", - TimeoutMs: 60000, - ChallengeEndpoint: "/pca/v1/webauthn-challenge", - }, - PoW: PoWConfig{ - Enabled: true, - Difficulty: 4, - MaxDurationMs: 5000, - }, - }, - Tick: TickConfig{ - Endpoint: "/pca/v1/tick", - IntervalMs: 5000, - }, - Compliance: ComplianceConfig{ - GDPR: true, - AnonymizeIP: true, - NoCookies: true, - NoPII: true, - }, - } -} - -// LoadFromFile loads configuration from a JSON file -func LoadFromFile(path string) (*Config, error) { - data, err := os.ReadFile(path) - if err != nil { - return DefaultConfig(), nil // Return default if file not found - } - - config := DefaultConfig() - if err := json.Unmarshal(data, config); err != nil { - return nil, err - } - - return config, nil -} - -func getEnv(key, defaultValue string) string { - if value := os.Getenv(key); value != "" { - return value - } - return defaultValue -} diff --git a/pca-platform/heuristic-service/internal/heuristics/scorer.go b/pca-platform/heuristic-service/internal/heuristics/scorer.go deleted file mode 100644 index 894759c..0000000 --- a/pca-platform/heuristic-service/internal/heuristics/scorer.go +++ /dev/null @@ -1,340 +0,0 @@ -package heuristics - -import ( - "math" - "sync" - "time" - - "github.com/breakpilot/pca-platform/heuristic-service/internal/config" -) - -// SessionMetrics holds behavioral metrics for a session -type SessionMetrics struct { - SessionID string `json:"session_id"` - StartTime time.Time `json:"start_time"` - VisibleTime float64 `json:"visible_time"` // Seconds visible - LastVisibleTS time.Time `json:"last_visible_ts"` // Last visibility timestamp - MaxScrollPercent float64 `json:"max_scroll_percent"` // 0-1 scroll depth - ClickCount int `json:"click_count"` - MouseMoves int `json:"mouse_moves"` - KeyStrokes int `json:"key_strokes"` - TouchEvents int `json:"touch_events"` - - // Advanced metrics - MouseVelocities []float64 `json:"mouse_velocities,omitempty"` // For variance calculation - ScrollVelocities []float64 `json:"scroll_velocities,omitempty"` // Scroll speed patterns - ClickIntervals []float64 `json:"click_intervals,omitempty"` // Time between clicks - - // Computed score - LastScore float64 `json:"last_score"` - LastScoreTime time.Time `json:"last_score_time"` -} - -// Scorer calculates human-likelihood scores based on behavioral heuristics -type Scorer struct { - config *config.Config - mu sync.RWMutex - sessions map[string]*SessionMetrics -} - -// NewScorer creates a new heuristic scorer -func NewScorer(cfg *config.Config) *Scorer { - return &Scorer{ - config: cfg, - sessions: make(map[string]*SessionMetrics), - } -} - -// GetOrCreateSession retrieves or creates a session -func (s *Scorer) GetOrCreateSession(sessionID string) *SessionMetrics { - s.mu.Lock() - defer s.mu.Unlock() - - if session, exists := s.sessions[sessionID]; exists { - return session - } - - session := &SessionMetrics{ - SessionID: sessionID, - StartTime: time.Now(), - LastVisibleTS: time.Now(), - } - s.sessions[sessionID] = session - return session -} - -// UpdateMetrics updates session metrics from a tick -func (s *Scorer) UpdateMetrics(sessionID string, metrics *SessionMetrics) { - s.mu.Lock() - defer s.mu.Unlock() - - if existing, exists := s.sessions[sessionID]; exists { - // Merge metrics - existing.VisibleTime = metrics.VisibleTime - existing.MaxScrollPercent = metrics.MaxScrollPercent - existing.ClickCount = metrics.ClickCount - existing.MouseMoves = metrics.MouseMoves - existing.KeyStrokes = metrics.KeyStrokes - existing.TouchEvents = metrics.TouchEvents - - if len(metrics.MouseVelocities) > 0 { - existing.MouseVelocities = append(existing.MouseVelocities, metrics.MouseVelocities...) - } - if len(metrics.ScrollVelocities) > 0 { - existing.ScrollVelocities = append(existing.ScrollVelocities, metrics.ScrollVelocities...) - } - if len(metrics.ClickIntervals) > 0 { - existing.ClickIntervals = append(existing.ClickIntervals, metrics.ClickIntervals...) - } - } else { - s.sessions[sessionID] = metrics - } -} - -// CalculateScore computes the human-likelihood score for a session -func (s *Scorer) CalculateScore(sessionID string) float64 { - s.mu.RLock() - session, exists := s.sessions[sessionID] - if !exists { - s.mu.RUnlock() - return 0.0 - } - s.mu.RUnlock() - - weights := s.config.Weights - - // Calculate individual heuristic scores (0-1) - dwellScore := s.calculateDwellScore(session) - scrollScore := s.calculateScrollScore(session) - pointerScore := s.calculatePointerScore(session) - clickScore := s.calculateClickScore(session) - - // Weighted sum - totalScore := dwellScore*weights.DwellRatio + - scrollScore*weights.ScrollScore + - pointerScore*weights.PointerVariance + - clickScore*weights.ClickRate - - // Clamp to [0, 1] - if totalScore > 1.0 { - totalScore = 1.0 - } - if totalScore < 0.0 { - totalScore = 0.0 - } - - // Update session with score - s.mu.Lock() - session.LastScore = totalScore - session.LastScoreTime = time.Now() - s.mu.Unlock() - - return totalScore -} - -// calculateDwellScore: visible time / total time ratio -func (s *Scorer) calculateDwellScore(session *SessionMetrics) float64 { - totalTime := time.Since(session.StartTime).Seconds() - if totalTime <= 0 { - return 0.0 - } - - // Calculate visible time including current period if visible - visibleTime := session.VisibleTime - - ratio := visibleTime / totalTime - if ratio > 1.0 { - ratio = 1.0 - } - - // Apply sigmoid to reward longer dwell times - // A 30+ second dwell with high visibility is very human-like - return sigmoid(ratio, 0.5, 10) -} - -// calculateScrollScore: scroll depth and natural patterns -func (s *Scorer) calculateScrollScore(session *SessionMetrics) float64 { - // Base score from scroll depth - baseScore := session.MaxScrollPercent - if baseScore > 1.0 { - baseScore = 1.0 - } - - // Bonus for natural scroll velocity patterns (humans have variable scroll speeds) - if len(session.ScrollVelocities) > 2 { - variance := calculateVariance(session.ScrollVelocities) - // Too uniform = bot, some variance = human - if variance > 0.01 && variance < 10.0 { - baseScore *= 1.2 // Boost for natural variance - } - } - - if baseScore > 1.0 { - baseScore = 1.0 - } - - return baseScore -} - -// calculatePointerScore: mouse movement patterns -func (s *Scorer) calculatePointerScore(session *SessionMetrics) float64 { - // Binary: has mouse activity at all - if session.MouseMoves == 0 && session.TouchEvents == 0 { - return 0.0 - } - - baseScore := 0.5 // Some activity - - // Humans have variable mouse velocities - if len(session.MouseVelocities) > 5 { - variance := calculateVariance(session.MouseVelocities) - // Bots often have either very uniform or very erratic movement - if variance > 0.1 && variance < 100.0 { - baseScore = 0.9 // Natural variance pattern - } else if variance <= 0.1 { - baseScore = 0.3 // Too uniform - suspicious - } else { - baseScore = 0.4 // Too erratic - also suspicious - } - } - - // Boost for touch events (mobile users) - if session.TouchEvents > 0 { - baseScore += 0.2 - } - - if baseScore > 1.0 { - baseScore = 1.0 - } - - return baseScore -} - -// calculateClickScore: click patterns -func (s *Scorer) calculateClickScore(session *SessionMetrics) float64 { - if session.ClickCount == 0 { - return 0.0 - } - - totalTime := time.Since(session.StartTime).Seconds() - if totalTime <= 0 { - return 0.0 - } - - // Clicks per second - clickRate := float64(session.ClickCount) / totalTime - - // Natural click rate is 0.1-2 clicks per second - // Too fast = bot, none = no interaction - var baseScore float64 - if clickRate > 0.05 && clickRate < 3.0 { - baseScore = 0.8 - } else if clickRate >= 3.0 { - baseScore = 0.2 // Suspiciously fast clicking - } else { - baseScore = 0.4 - } - - // Check for natural intervals between clicks - if len(session.ClickIntervals) > 2 { - variance := calculateVariance(session.ClickIntervals) - // Natural human timing has variance - if variance > 0.01 { - baseScore += 0.2 - } - } - - if baseScore > 1.0 { - baseScore = 1.0 - } - - return baseScore -} - -// EvaluateRequest determines action based on score -func (s *Scorer) EvaluateRequest(sessionID string, path string, pathConfigs map[string]config.PathConfig) *EvaluationResult { - score := s.CalculateScore(sessionID) - - // Get path-specific config or use defaults - minScore := s.config.Thresholds.ScoreChallenge - var stepUpMethod *string - - if cfg, exists := pathConfigs[path]; exists { - minScore = cfg.MinScore - stepUpMethod = cfg.StepUpMethod - } - - result := &EvaluationResult{ - SessionID: sessionID, - Score: score, - MinScore: minScore, - Action: "allow", - } - - if score >= s.config.Thresholds.ScorePass { - result.Action = "allow" - } else if score >= minScore { - result.Action = "allow" // In gray zone but above minimum - } else { - result.Action = "challenge" - if stepUpMethod != nil { - result.StepUpMethod = *stepUpMethod - } else { - result.StepUpMethod = s.config.StepUp.Primary - } - } - - return result -} - -// EvaluationResult contains the score evaluation outcome -type EvaluationResult struct { - SessionID string `json:"session_id"` - Score float64 `json:"score"` - MinScore float64 `json:"min_score"` - Action string `json:"action"` // "allow", "challenge", "block" - StepUpMethod string `json:"step_up_method,omitempty"` -} - -// CleanupOldSessions removes sessions older than maxAge -func (s *Scorer) CleanupOldSessions(maxAge time.Duration) { - s.mu.Lock() - defer s.mu.Unlock() - - now := time.Now() - for id, session := range s.sessions { - if now.Sub(session.StartTime) > maxAge { - delete(s.sessions, id) - } - } -} - -// Helper functions - -func calculateVariance(values []float64) float64 { - if len(values) < 2 { - return 0.0 - } - - // Calculate mean - var sum float64 - for _, v := range values { - sum += v - } - mean := sum / float64(len(values)) - - // Calculate variance - var variance float64 - for _, v := range values { - diff := v - mean - variance += diff * diff - } - variance /= float64(len(values) - 1) - - return variance -} - -// sigmoid applies a sigmoid transformation for smoother score curves -func sigmoid(x, midpoint, steepness float64) float64 { - return 1.0 / (1.0 + math.Exp(-steepness*(x-midpoint))) -} diff --git a/pca-platform/heuristic-service/internal/heuristics/scorer_test.go b/pca-platform/heuristic-service/internal/heuristics/scorer_test.go deleted file mode 100644 index 462ec61..0000000 --- a/pca-platform/heuristic-service/internal/heuristics/scorer_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package heuristics - -import ( - "testing" - "time" - - "github.com/breakpilot/pca-platform/heuristic-service/internal/config" -) - -func TestNewScorer(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - if scorer == nil { - t.Fatal("Expected non-nil scorer") - } - if scorer.config == nil { - t.Error("Expected config to be set") - } - if scorer.sessions == nil { - t.Error("Expected sessions map to be initialized") - } -} - -func TestGetOrCreateSession(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - // First call should create session - session1 := scorer.GetOrCreateSession("test-session-1") - if session1 == nil { - t.Fatal("Expected non-nil session") - } - if session1.SessionID != "test-session-1" { - t.Errorf("Expected session ID 'test-session-1', got '%s'", session1.SessionID) - } - - // Second call should return same session - session2 := scorer.GetOrCreateSession("test-session-1") - if session1 != session2 { - t.Error("Expected same session instance on second call") - } - - // Different ID should create new session - session3 := scorer.GetOrCreateSession("test-session-2") - if session1 == session3 { - t.Error("Expected different session for different ID") - } -} - -func TestCalculateScore_NewSession(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - // New session with no activity should have low score - scorer.GetOrCreateSession("test-new") - score := scorer.CalculateScore("test-new") - - if score < 0 || score > 1 { - t.Errorf("Expected score between 0 and 1, got %f", score) - } -} - -func TestCalculateScore_HighActivity(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - session := scorer.GetOrCreateSession("test-active") - session.StartTime = time.Now().Add(-30 * time.Second) - session.VisibleTime = 28.0 // High visibility - session.MaxScrollPercent = 0.8 - session.ClickCount = 10 - session.MouseMoves = 100 - session.MouseVelocities = []float64{100, 150, 80, 200, 120, 90} - session.ClickIntervals = []float64{1.5, 2.0, 1.2, 0.8} - - score := scorer.CalculateScore("test-active") - - // Active session should have higher score - if score < 0.5 { - t.Errorf("Expected score > 0.5 for active session, got %f", score) - } -} - -func TestCalculateScore_BotLikeActivity(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - session := scorer.GetOrCreateSession("test-bot") - session.StartTime = time.Now().Add(-5 * time.Second) - session.VisibleTime = 1.0 // Very short - session.MaxScrollPercent = 0.0 - session.ClickCount = 0 - session.MouseMoves = 0 - - score := scorer.CalculateScore("test-bot") - - // Bot-like session should have very low score - if score > 0.3 { - t.Errorf("Expected score < 0.3 for bot-like session, got %f", score) - } -} - -func TestCalculateScore_UniformMouseMovement(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - session := scorer.GetOrCreateSession("test-uniform") - session.StartTime = time.Now().Add(-20 * time.Second) - session.VisibleTime = 18.0 - session.MouseMoves = 50 - // Very uniform velocities (suspicious) - session.MouseVelocities = []float64{100, 100, 100, 100, 100, 100, 100, 100} - - score := scorer.CalculateScore("test-uniform") - - // Uniform movement should result in lower pointer score - if score > 0.7 { - t.Errorf("Expected score < 0.7 for uniform mouse movement, got %f", score) - } -} - -func TestEvaluateRequest(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - // High score session - session := scorer.GetOrCreateSession("test-evaluate") - session.StartTime = time.Now().Add(-60 * time.Second) - session.VisibleTime = 55.0 - session.MaxScrollPercent = 0.9 - session.ClickCount = 15 - session.MouseMoves = 200 - session.MouseVelocities = []float64{100, 150, 80, 200, 120, 90, 110} - - result := scorer.EvaluateRequest("test-evaluate", "/default", nil) - - if result.SessionID != "test-evaluate" { - t.Errorf("Expected session ID 'test-evaluate', got '%s'", result.SessionID) - } - if result.Action != "allow" && result.Score >= cfg.Thresholds.ScorePass { - t.Errorf("Expected 'allow' action for high score, got '%s'", result.Action) - } -} - -func TestEvaluateRequest_Challenge(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - // Low score session - scorer.GetOrCreateSession("test-challenge") - - result := scorer.EvaluateRequest("test-challenge", "/api", nil) - - if result.Action != "challenge" { - t.Errorf("Expected 'challenge' action for new session, got '%s'", result.Action) - } - if result.StepUpMethod == "" { - t.Error("Expected step-up method to be set for challenge") - } -} - -func TestCleanupOldSessions(t *testing.T) { - cfg := config.DefaultConfig() - scorer := NewScorer(cfg) - - // Create some sessions - scorer.GetOrCreateSession("session-new") - - oldSession := scorer.GetOrCreateSession("session-old") - oldSession.StartTime = time.Now().Add(-2 * time.Hour) - - // Verify both exist - if len(scorer.sessions) != 2 { - t.Errorf("Expected 2 sessions, got %d", len(scorer.sessions)) - } - - // Cleanup with 1 hour max age - scorer.CleanupOldSessions(1 * time.Hour) - - // Old session should be removed - if len(scorer.sessions) != 1 { - t.Errorf("Expected 1 session after cleanup, got %d", len(scorer.sessions)) - } - - if _, exists := scorer.sessions["session-old"]; exists { - t.Error("Expected old session to be cleaned up") - } -} - -func TestCalculateVariance(t *testing.T) { - tests := []struct { - name string - values []float64 - expected float64 - }{ - { - name: "empty", - values: []float64{}, - expected: 0.0, - }, - { - name: "single value", - values: []float64{5.0}, - expected: 0.0, - }, - { - name: "uniform values", - values: []float64{5.0, 5.0, 5.0, 5.0}, - expected: 0.0, - }, - { - name: "varied values", - values: []float64{1.0, 2.0, 3.0, 4.0, 5.0}, - expected: 2.5, // Variance of [1,2,3,4,5] - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := calculateVariance(tt.values) - if tt.expected == 0.0 && result != 0.0 { - t.Errorf("Expected 0 variance, got %f", result) - } - if tt.expected != 0.0 && (result < tt.expected-0.1 || result > tt.expected+0.1) { - t.Errorf("Expected variance ~%f, got %f", tt.expected, result) - } - }) - } -} - -func TestSigmoid(t *testing.T) { - // Test sigmoid at midpoint - result := sigmoid(0.5, 0.5, 10) - if result < 0.49 || result > 0.51 { - t.Errorf("Expected sigmoid(0.5, 0.5, 10) ~ 0.5, got %f", result) - } - - // Test sigmoid well above midpoint - result = sigmoid(1.0, 0.5, 10) - if result < 0.9 { - t.Errorf("Expected sigmoid(1.0, 0.5, 10) > 0.9, got %f", result) - } - - // Test sigmoid well below midpoint - result = sigmoid(0.0, 0.5, 10) - if result > 0.1 { - t.Errorf("Expected sigmoid(0.0, 0.5, 10) < 0.1, got %f", result) - } -} diff --git a/pca-platform/heuristic-service/internal/stepup/pow.go b/pca-platform/heuristic-service/internal/stepup/pow.go deleted file mode 100644 index 12143ba..0000000 --- a/pca-platform/heuristic-service/internal/stepup/pow.go +++ /dev/null @@ -1,180 +0,0 @@ -package stepup - -import ( - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "fmt" - "strings" - "sync" - "time" - - "github.com/breakpilot/pca-platform/heuristic-service/internal/config" -) - -// PoWService handles Proof-of-Work challenges -type PoWService struct { - config *config.PoWConfig - challenges map[string]*PoWChallenge - mu sync.RWMutex -} - -// PoWChallenge represents a Proof-of-Work challenge -type PoWChallenge struct { - ID string `json:"id"` - SessionID string `json:"session_id"` - Challenge string `json:"challenge"` - Difficulty int `json:"difficulty"` - CreatedAt time.Time `json:"created_at"` - ExpiresAt time.Time `json:"expires_at"` - Solved bool `json:"solved"` -} - -// PoWChallengeResponse is sent to the client -type PoWChallengeResponse struct { - ChallengeID string `json:"challenge_id"` - Challenge string `json:"challenge"` - Difficulty int `json:"difficulty"` - MaxTimeMs int `json:"max_time_ms"` - Hint string `json:"hint"` -} - -// PoWVerifyRequest for verifying a solved challenge -type PoWVerifyRequest struct { - SessionID string `json:"session_id"` - ChallengeID string `json:"challenge_id"` - Challenge string `json:"challenge"` - Nonce int64 `json:"nonce"` -} - -// NewPoWService creates a new Proof-of-Work service -func NewPoWService(cfg *config.PoWConfig) *PoWService { - return &PoWService{ - config: cfg, - challenges: make(map[string]*PoWChallenge), - } -} - -// CreateChallenge generates a new PoW challenge -func (s *PoWService) CreateChallenge(sessionID string) (*PoWChallengeResponse, error) { - // Generate random challenge - challengeBytes := make([]byte, 16) - if _, err := rand.Read(challengeBytes); err != nil { - return nil, err - } - challengeStr := hex.EncodeToString(challengeBytes) - - // Generate challenge ID - idBytes := make([]byte, 8) - rand.Read(idBytes) - challengeID := hex.EncodeToString(idBytes) - - // Create challenge - challenge := &PoWChallenge{ - ID: challengeID, - SessionID: sessionID, - Challenge: challengeStr, - Difficulty: s.config.Difficulty, - CreatedAt: time.Now(), - ExpiresAt: time.Now().Add(time.Duration(s.config.MaxDurationMs*2) * time.Millisecond), - Solved: false, - } - - // Store challenge - s.mu.Lock() - s.challenges[challengeID] = challenge - s.mu.Unlock() - - // Build response - prefix := strings.Repeat("0", s.config.Difficulty) - response := &PoWChallengeResponse{ - ChallengeID: challengeID, - Challenge: challengeStr, - Difficulty: s.config.Difficulty, - MaxTimeMs: s.config.MaxDurationMs, - Hint: fmt.Sprintf("Find nonce where SHA256(challenge + nonce) starts with '%s'", prefix), - } - - return response, nil -} - -// VerifyChallenge verifies a PoW solution -func (s *PoWService) VerifyChallenge(req *PoWVerifyRequest) (bool, error) { - s.mu.RLock() - challenge, exists := s.challenges[req.ChallengeID] - s.mu.RUnlock() - - if !exists { - return false, nil - } - - // Check expiration - if time.Now().After(challenge.ExpiresAt) { - s.mu.Lock() - delete(s.challenges, req.ChallengeID) - s.mu.Unlock() - return false, nil - } - - // Check session match - if challenge.SessionID != req.SessionID { - return false, nil - } - - // Check challenge string match - if challenge.Challenge != req.Challenge { - return false, nil - } - - // Verify the proof of work - input := fmt.Sprintf("%s%d", req.Challenge, req.Nonce) - hash := sha256.Sum256([]byte(input)) - hashHex := hex.EncodeToString(hash[:]) - - // Check if hash has required number of leading zeros - prefix := strings.Repeat("0", challenge.Difficulty) - if !strings.HasPrefix(hashHex, prefix) { - return false, nil - } - - // Mark as solved - s.mu.Lock() - challenge.Solved = true - s.mu.Unlock() - - return true, nil -} - -// VerifyProof is a standalone verification without stored challenge -// Useful for quick verification -func (s *PoWService) VerifyProof(challenge string, nonce int64, difficulty int) bool { - input := fmt.Sprintf("%s%d", challenge, nonce) - hash := sha256.Sum256([]byte(input)) - hashHex := hex.EncodeToString(hash[:]) - - prefix := strings.Repeat("0", difficulty) - return strings.HasPrefix(hashHex, prefix) -} - -// CleanupExpiredChallenges removes expired challenges -func (s *PoWService) CleanupExpiredChallenges() { - s.mu.Lock() - defer s.mu.Unlock() - - now := time.Now() - for id, challenge := range s.challenges { - if now.After(challenge.ExpiresAt) { - delete(s.challenges, id) - } - } -} - -// IsEnabled returns whether PoW is enabled -func (s *PoWService) IsEnabled() bool { - return s.config.Enabled -} - -// GetDifficulty returns configured difficulty -func (s *PoWService) GetDifficulty() int { - return s.config.Difficulty -} diff --git a/pca-platform/heuristic-service/internal/stepup/pow_test.go b/pca-platform/heuristic-service/internal/stepup/pow_test.go deleted file mode 100644 index 4dc3e41..0000000 --- a/pca-platform/heuristic-service/internal/stepup/pow_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package stepup - -import ( - "testing" - - "github.com/breakpilot/pca-platform/heuristic-service/internal/config" -) - -func TestNewPoWService(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: true, - Difficulty: 4, - MaxDurationMs: 5000, - } - - service := NewPoWService(cfg) - - if service == nil { - t.Fatal("Expected non-nil service") - } - if !service.IsEnabled() { - t.Error("Expected service to be enabled") - } - if service.GetDifficulty() != 4 { - t.Errorf("Expected difficulty 4, got %d", service.GetDifficulty()) - } -} - -func TestCreateChallenge(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: true, - Difficulty: 4, - MaxDurationMs: 5000, - } - - service := NewPoWService(cfg) - response, err := service.CreateChallenge("test-session") - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if response == nil { - t.Fatal("Expected non-nil response") - } - if response.Challenge == "" { - t.Error("Expected non-empty challenge") - } - if response.ChallengeID == "" { - t.Error("Expected non-empty challenge ID") - } - if response.Difficulty != 4 { - t.Errorf("Expected difficulty 4, got %d", response.Difficulty) - } - if response.MaxTimeMs != 5000 { - t.Errorf("Expected max time 5000, got %d", response.MaxTimeMs) - } -} - -func TestVerifyProof_Valid(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: true, - Difficulty: 2, // Low difficulty for fast testing - MaxDurationMs: 5000, - } - - service := NewPoWService(cfg) - - // Find a valid nonce for a known challenge - challenge := "test-challenge-123" - var validNonce int64 = -1 - - // Brute force to find valid nonce (with low difficulty) - for nonce := int64(0); nonce < 10000; nonce++ { - if service.VerifyProof(challenge, nonce, 2) { - validNonce = nonce - break - } - } - - if validNonce == -1 { - t.Skip("Could not find valid nonce in reasonable time") - } - - // Verify the found nonce - if !service.VerifyProof(challenge, validNonce, 2) { - t.Errorf("Expected valid proof for nonce %d", validNonce) - } -} - -func TestVerifyProof_Invalid(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: true, - Difficulty: 4, - MaxDurationMs: 5000, - } - - service := NewPoWService(cfg) - - // Nonce 0 is very unlikely to be valid for difficulty 4 - valid := service.VerifyProof("random-challenge", 0, 4) - - if valid { - t.Error("Expected invalid proof for nonce 0") - } -} - -func TestVerifyChallenge_ValidFlow(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: true, - Difficulty: 2, - MaxDurationMs: 10000, - } - - service := NewPoWService(cfg) - - // Create challenge - response, err := service.CreateChallenge("test-session") - if err != nil { - t.Fatalf("Failed to create challenge: %v", err) - } - - // Find valid nonce - var validNonce int64 = -1 - for nonce := int64(0); nonce < 100000; nonce++ { - if service.VerifyProof(response.Challenge, nonce, 2) { - validNonce = nonce - break - } - } - - if validNonce == -1 { - t.Skip("Could not find valid nonce") - } - - // Verify challenge - req := &PoWVerifyRequest{ - SessionID: "test-session", - ChallengeID: response.ChallengeID, - Challenge: response.Challenge, - Nonce: validNonce, - } - - verified, err := service.VerifyChallenge(req) - if err != nil { - t.Fatalf("Verification error: %v", err) - } - if !verified { - t.Error("Expected verification to succeed") - } -} - -func TestVerifyChallenge_WrongSession(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: true, - Difficulty: 2, - MaxDurationMs: 5000, - } - - service := NewPoWService(cfg) - - // Create challenge for session A - response, _ := service.CreateChallenge("session-a") - - // Try to verify with session B - req := &PoWVerifyRequest{ - SessionID: "session-b", - ChallengeID: response.ChallengeID, - Challenge: response.Challenge, - Nonce: 0, - } - - verified, _ := service.VerifyChallenge(req) - if verified { - t.Error("Expected verification to fail for wrong session") - } -} - -func TestVerifyChallenge_NonexistentChallenge(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: true, - Difficulty: 2, - MaxDurationMs: 5000, - } - - service := NewPoWService(cfg) - - req := &PoWVerifyRequest{ - SessionID: "test-session", - ChallengeID: "nonexistent-challenge", - Challenge: "test", - Nonce: 0, - } - - verified, _ := service.VerifyChallenge(req) - if verified { - t.Error("Expected verification to fail for nonexistent challenge") - } -} - -func TestCleanupExpiredChallenges(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: true, - Difficulty: 2, - MaxDurationMs: 1, // Very short for testing - } - - service := NewPoWService(cfg) - - // Create challenge - service.CreateChallenge("test-session") - - if len(service.challenges) != 1 { - t.Errorf("Expected 1 challenge, got %d", len(service.challenges)) - } - - // Wait for expiration - // Note: In real test, we'd mock time or set ExpiresAt in the past - - // For now, just verify cleanup doesn't crash - service.CleanupExpiredChallenges() -} - -func TestIsEnabled(t *testing.T) { - cfg := &config.PoWConfig{ - Enabled: false, - Difficulty: 4, - MaxDurationMs: 5000, - } - - service := NewPoWService(cfg) - - if service.IsEnabled() { - t.Error("Expected service to be disabled") - } -} diff --git a/pca-platform/heuristic-service/internal/stepup/webauthn.go b/pca-platform/heuristic-service/internal/stepup/webauthn.go deleted file mode 100644 index d3b7c9d..0000000 --- a/pca-platform/heuristic-service/internal/stepup/webauthn.go +++ /dev/null @@ -1,172 +0,0 @@ -package stepup - -import ( - "crypto/rand" - "encoding/base64" - "sync" - "time" - - "github.com/breakpilot/pca-platform/heuristic-service/internal/config" -) - -// WebAuthnService handles WebAuthn challenges and verification -type WebAuthnService struct { - config *config.WebAuthnConfig - challenges map[string]*Challenge - mu sync.RWMutex -} - -// Challenge represents a WebAuthn challenge -type Challenge struct { - ID string `json:"id"` - SessionID string `json:"session_id"` - Challenge string `json:"challenge"` - CreatedAt time.Time `json:"created_at"` - ExpiresAt time.Time `json:"expires_at"` - Verified bool `json:"verified"` -} - -// ChallengeRequest is the client-side challenge request format -type ChallengeRequest struct { - SessionID string `json:"session_id"` -} - -// ChallengeResponse is the WebAuthn public key request options -type ChallengeResponse struct { - PublicKey PublicKeyCredentialRequestOptions `json:"publicKey"` -} - -// PublicKeyCredentialRequestOptions mirrors the WebAuthn API structure -type PublicKeyCredentialRequestOptions struct { - Challenge string `json:"challenge"` - Timeout int `json:"timeout"` - RpID string `json:"rpId,omitempty"` - UserVerification string `json:"userVerification"` - AllowCredentials []PublicKeyCredentialDescriptor `json:"allowCredentials,omitempty"` -} - -// PublicKeyCredentialDescriptor for allowed credentials -type PublicKeyCredentialDescriptor struct { - Type string `json:"type"` - ID string `json:"id"` - Transports []string `json:"transports,omitempty"` -} - -// VerifyRequest for client verification response -type VerifyRequest struct { - SessionID string `json:"session_id"` - ChallengeID string `json:"challenge_id"` - Credential map[string]interface{} `json:"credential"` -} - -// NewWebAuthnService creates a new WebAuthn service -func NewWebAuthnService(cfg *config.WebAuthnConfig) *WebAuthnService { - return &WebAuthnService{ - config: cfg, - challenges: make(map[string]*Challenge), - } -} - -// CreateChallenge generates a new WebAuthn challenge for a session -func (s *WebAuthnService) CreateChallenge(sessionID string) (*ChallengeResponse, error) { - // Generate random challenge bytes - challengeBytes := make([]byte, 32) - if _, err := rand.Read(challengeBytes); err != nil { - return nil, err - } - challengeStr := base64.RawURLEncoding.EncodeToString(challengeBytes) - - // Generate challenge ID - idBytes := make([]byte, 16) - rand.Read(idBytes) - challengeID := base64.RawURLEncoding.EncodeToString(idBytes) - - // Create challenge - challenge := &Challenge{ - ID: challengeID, - SessionID: sessionID, - Challenge: challengeStr, - CreatedAt: time.Now(), - ExpiresAt: time.Now().Add(time.Duration(s.config.TimeoutMs) * time.Millisecond), - Verified: false, - } - - // Store challenge - s.mu.Lock() - s.challenges[challengeID] = challenge - s.mu.Unlock() - - // Build response - response := &ChallengeResponse{ - PublicKey: PublicKeyCredentialRequestOptions{ - Challenge: challengeStr, - Timeout: s.config.TimeoutMs, - UserVerification: s.config.UserVerification, - // In production, you'd include allowed credentials from user registration - AllowCredentials: []PublicKeyCredentialDescriptor{}, - }, - } - - return response, nil -} - -// VerifyChallenge verifies a WebAuthn assertion response -func (s *WebAuthnService) VerifyChallenge(req *VerifyRequest) (bool, error) { - s.mu.RLock() - challenge, exists := s.challenges[req.ChallengeID] - s.mu.RUnlock() - - if !exists { - return false, nil - } - - // Check expiration - if time.Now().After(challenge.ExpiresAt) { - s.mu.Lock() - delete(s.challenges, req.ChallengeID) - s.mu.Unlock() - return false, nil - } - - // Check session match - if challenge.SessionID != req.SessionID { - return false, nil - } - - // In production, you would: - // 1. Parse the credential response - // 2. Verify the signature against stored public key - // 3. Verify the challenge matches - // 4. Check the origin - // For MVP, we accept any valid-looking response - - // Verify credential structure exists - if req.Credential == nil { - return false, nil - } - - // Mark as verified - s.mu.Lock() - challenge.Verified = true - s.mu.Unlock() - - return true, nil -} - -// CleanupExpiredChallenges removes expired challenges -func (s *WebAuthnService) CleanupExpiredChallenges() { - s.mu.Lock() - defer s.mu.Unlock() - - now := time.Now() - for id, challenge := range s.challenges { - if now.After(challenge.ExpiresAt) { - delete(s.challenges, id) - } - } -} - -// IsEnabled returns whether WebAuthn is enabled -func (s *WebAuthnService) IsEnabled() bool { - return s.config.Enabled -} diff --git a/pca-platform/sdk/js/src/pca-sdk.js b/pca-platform/sdk/js/src/pca-sdk.js deleted file mode 100644 index 3f8b02f..0000000 --- a/pca-platform/sdk/js/src/pca-sdk.js +++ /dev/null @@ -1,473 +0,0 @@ -/** - * PCA SDK - Person-Corporate-Agent Human Detection SDK - * - * Collects behavioral metrics to distinguish humans from bots - * and handles step-up verification (WebAuthn, PoW) when needed. - * - * GDPR/Privacy compliant: No PII collected, only aggregated behavior metrics. - */ - -const PCA = (() => { - // Internal state - let config = null; - let sessionId = null; - let metrics = { - startTime: Date.now(), - visibleTime: 0, - lastVisibleTS: Date.now(), - maxScrollPercent: 0, - clickCount: 0, - mouseMoves: 0, - keyStrokes: 0, - touchEvents: 0, - mouseVelocities: [], - scrollVelocities: [], - clickIntervals: [], - lastClickTime: 0, - lastMousePos: null, - lastMouseTime: 0, - lastScrollPos: 0, - lastScrollTime: 0 - }; - let currentScore = 0; - let tickTimer = null; - let isInitialized = false; - let scoreCallbacks = []; - - // Generate unique session ID - function generateSessionId() { - return 'pca_' + Date.now().toString(36) + '_' + Math.random().toString(36).substr(2, 9); - } - - // Calculate score based on current metrics - function evaluateScore() { - const now = Date.now(); - const totalTime = (now - metrics.startTime) / 1000; - - // Update visible time if page is visible - if (!document.hidden) { - metrics.visibleTime += (now - metrics.lastVisibleTS) / 1000; - metrics.lastVisibleTS = now; - } - - // Heuristic 1: Dwell ratio (visible time / total time) - let dwellRatio = totalTime > 0 ? (metrics.visibleTime / totalTime) : 0; - if (dwellRatio > 1) dwellRatio = 1; - - // Heuristic 2: Scroll score (max scroll depth 0-1) - let scrollScore = metrics.maxScrollPercent; - if (scrollScore > 1) scrollScore = 1; - - // Heuristic 3: Pointer variance (mouse/touch activity) - let pointerScore = 0; - if (metrics.mouseMoves > 0 || metrics.touchEvents > 0) { - pointerScore = 0.5; - // Check for natural mouse velocity variance - if (metrics.mouseVelocities.length > 5) { - const variance = calculateVariance(metrics.mouseVelocities); - if (variance > 0.1 && variance < 100.0) { - pointerScore = 0.9; // Natural variance - } else if (variance <= 0.1) { - pointerScore = 0.3; // Too uniform - suspicious - } - } - if (metrics.touchEvents > 0) pointerScore += 0.2; - if (pointerScore > 1) pointerScore = 1; - } - - // Heuristic 4: Click rate - let clickScore = 0; - if (metrics.clickCount > 0 && totalTime > 0) { - const clickRate = metrics.clickCount / totalTime; - if (clickRate > 0.05 && clickRate < 3.0) { - clickScore = 0.8; - } else if (clickRate >= 3.0) { - clickScore = 0.2; // Too fast - } else { - clickScore = 0.4; - } - // Natural click intervals - if (metrics.clickIntervals.length > 2) { - const variance = calculateVariance(metrics.clickIntervals); - if (variance > 0.01) clickScore += 0.2; - if (clickScore > 1) clickScore = 1; - } - } - - // Weighted sum - const w = config?.weights || { dwell_ratio: 0.30, scroll_score: 0.25, pointer_variance: 0.20, click_rate: 0.25 }; - currentScore = - dwellRatio * (w.dwell_ratio || 0) + - scrollScore * (w.scroll_score || 0) + - pointerScore * (w.pointer_variance || 0) + - clickScore * (w.click_rate || 0); - - if (currentScore > 1) currentScore = 1; - if (currentScore < 0) currentScore = 0; - - return currentScore; - } - - // Calculate variance of an array - function calculateVariance(values) { - if (values.length < 2) return 0; - const mean = values.reduce((a, b) => a + b, 0) / values.length; - return values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) / (values.length - 1); - } - - // Send tick to backend - async function sendTick() { - if (!config?.tick?.endpoint) return; - - const now = Date.now(); - const totalTime = (now - metrics.startTime) / 1000; - - const payload = { - session_id: sessionId, - score: Number(currentScore.toFixed(3)), - dwell_ratio: Number((metrics.visibleTime / totalTime).toFixed(3)), - scroll_depth: Number((metrics.maxScrollPercent * 100).toFixed(1)), - clicks: metrics.clickCount, - mouse_moves: metrics.mouseMoves, - key_strokes: metrics.keyStrokes, - touch_events: metrics.touchEvents, - mouse_velocities: metrics.mouseVelocities.slice(-20), // Last 20 values - scroll_velocities: metrics.scrollVelocities.slice(-20), - click_intervals: metrics.clickIntervals.slice(-10), - ts: now - }; - - try { - const response = await fetch(config.tick.endpoint, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(payload) - }); - - if (response.ok) { - const data = await response.json(); - // Notify callbacks - scoreCallbacks.forEach(cb => cb(data.score, data.action, data)); - } - } catch (err) { - console.warn('PCA: Tick transmission failed:', err); - } - } - - // WebAuthn step-up - async function triggerWebAuthn() { - if (!config?.step_up?.webauthn?.enabled || !window.PublicKeyCredential) { - return false; - } - - try { - // Get challenge from server - const challengeUrl = `${config.step_up.webauthn.challenge_endpoint}?session_id=${sessionId}`; - const challengeResp = await fetch(challengeUrl); - const challengeData = await challengeResp.json(); - - // Convert base64url challenge to ArrayBuffer - const challenge = base64UrlToArrayBuffer(challengeData.publicKey.challenge); - - const publicKeyRequestOptions = { - challenge: challenge, - timeout: challengeData.publicKey.timeout, - userVerification: challengeData.publicKey.userVerification, - allowCredentials: challengeData.publicKey.allowCredentials || [] - }; - - // Request credential - const credential = await navigator.credentials.get({ publicKey: publicKeyRequestOptions }); - - // Send to server for verification - const verifyResp = await fetch('/pca/v1/webauthn-verify', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - session_id: sessionId, - credential: credentialToJSON(credential) - }) - }); - - const result = await verifyResp.json(); - return result.verified === true; - } catch (e) { - console.log('PCA: WebAuthn step-up failed:', e); - return false; - } - } - - // Proof-of-Work step-up - async function triggerPoW() { - if (!config?.step_up?.pow?.enabled) { - return false; - } - - try { - // Get challenge from server - const challengeResp = await fetch(`/pca/v1/pow-challenge?session_id=${sessionId}`); - const challengeData = await challengeResp.json(); - - const { challenge_id, challenge, difficulty, max_time_ms } = challengeData; - const prefix = '0'.repeat(difficulty); - const startTime = Date.now(); - let nonce = 0; - - // Solve PoW puzzle - while (true) { - const input = challenge + nonce; - const hashBuffer = await crypto.subtle.digest('SHA-256', new TextEncoder().encode(input)); - const hashArray = Array.from(new Uint8Array(hashBuffer)); - const hashHex = hashArray.map(b => b.toString(16).padStart(2, '0')).join(''); - - if (hashHex.startsWith(prefix)) { - // Found solution - verify with server - const verifyResp = await fetch('/pca/v1/pow-verify', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - session_id: sessionId, - challenge_id: challenge_id, - challenge: challenge, - nonce: nonce - }) - }); - - const result = await verifyResp.json(); - return result.verified === true; - } - - nonce++; - - // Check timeout - if (Date.now() - startTime > max_time_ms) { - console.warn('PCA: PoW step-up timed out'); - return false; - } - - // Yield to prevent UI freeze (every 1000 iterations) - if (nonce % 1000 === 0) { - await new Promise(r => setTimeout(r, 0)); - } - } - } catch (e) { - console.error('PCA: PoW step-up error:', e); - return false; - } - } - - // Trigger step-up based on configured primary method - async function triggerStepUp() { - const methods = config?.step_up; - let success = false; - - if (methods?.primary === 'webauthn' && methods?.webauthn?.enabled && window.PublicKeyCredential) { - success = await triggerWebAuthn(); - } - - if (!success && methods?.pow?.enabled) { - success = await triggerPoW(); - } - - return success; - } - - // Helper: Convert base64url to ArrayBuffer - function base64UrlToArrayBuffer(base64url) { - const base64 = base64url.replace(/-/g, '+').replace(/_/g, '/'); - const padding = '='.repeat((4 - base64.length % 4) % 4); - const binary = atob(base64 + padding); - const bytes = new Uint8Array(binary.length); - for (let i = 0; i < binary.length; i++) { - bytes[i] = binary.charCodeAt(i); - } - return bytes.buffer; - } - - // Helper: Convert credential to JSON-serializable object - function credentialToJSON(credential) { - return { - id: credential.id, - type: credential.type, - rawId: arrayBufferToBase64Url(credential.rawId), - response: { - authenticatorData: arrayBufferToBase64Url(credential.response.authenticatorData), - clientDataJSON: arrayBufferToBase64Url(credential.response.clientDataJSON), - signature: arrayBufferToBase64Url(credential.response.signature) - } - }; - } - - // Helper: Convert ArrayBuffer to base64url - function arrayBufferToBase64Url(buffer) { - const bytes = new Uint8Array(buffer); - let binary = ''; - for (let i = 0; i < bytes.length; i++) { - binary += String.fromCharCode(bytes[i]); - } - return btoa(binary).replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, ''); - } - - // Initialize SDK - function init(userConfig) { - if (isInitialized) return; - - config = userConfig; - sessionId = generateSessionId(); - isInitialized = true; - - // Visibility change listener - document.addEventListener('visibilitychange', () => { - if (document.hidden) { - metrics.visibleTime += (Date.now() - metrics.lastVisibleTS) / 1000; - } else { - metrics.lastVisibleTS = Date.now(); - } - }); - - // Scroll listener - window.addEventListener('scroll', () => { - const doc = document.documentElement; - const scrollTop = window.pageYOffset || doc.scrollTop; - const viewportHeight = window.innerHeight; - const totalHeight = doc.scrollHeight; - const scrollPercent = totalHeight > 0 ? (scrollTop + viewportHeight) / totalHeight : 0; - - if (scrollPercent > metrics.maxScrollPercent) { - metrics.maxScrollPercent = scrollPercent; - } - - // Track scroll velocity - const now = Date.now(); - if (metrics.lastScrollTime > 0) { - const dt = (now - metrics.lastScrollTime) / 1000; - if (dt > 0) { - const velocity = Math.abs(scrollTop - metrics.lastScrollPos) / dt; - metrics.scrollVelocities.push(velocity); - if (metrics.scrollVelocities.length > 50) metrics.scrollVelocities.shift(); - } - } - metrics.lastScrollPos = scrollTop; - metrics.lastScrollTime = now; - }); - - // Mouse movement listener - document.addEventListener('mousemove', (e) => { - metrics.mouseMoves++; - - // Track mouse velocity - const now = Date.now(); - if (metrics.lastMousePos && metrics.lastMouseTime > 0) { - const dt = (now - metrics.lastMouseTime) / 1000; - if (dt > 0) { - const dx = e.clientX - metrics.lastMousePos.x; - const dy = e.clientY - metrics.lastMousePos.y; - const velocity = Math.sqrt(dx * dx + dy * dy) / dt; - metrics.mouseVelocities.push(velocity); - if (metrics.mouseVelocities.length > 50) metrics.mouseVelocities.shift(); - } - } - metrics.lastMousePos = { x: e.clientX, y: e.clientY }; - metrics.lastMouseTime = now; - }); - - // Click listener - document.addEventListener('click', () => { - const now = Date.now(); - if (metrics.lastClickTime > 0) { - const interval = (now - metrics.lastClickTime) / 1000; - metrics.clickIntervals.push(interval); - if (metrics.clickIntervals.length > 20) metrics.clickIntervals.shift(); - } - metrics.lastClickTime = now; - metrics.clickCount++; - }); - - // Keystroke listener (count only, no content) - document.addEventListener('keydown', () => { - metrics.keyStrokes++; - }); - - // Touch listener (mobile) - document.addEventListener('touchstart', () => { - metrics.touchEvents++; - }); - - // Start tick timer - if (config?.tick?.interval_ms) { - tickTimer = setInterval(() => { - evaluateScore(); - sendTick(); - }, config.tick.interval_ms); - } - } - - // Public API - return { - init, - - getScore: () => currentScore, - - getSessionId: () => sessionId, - - triggerStepUp, - - triggerWebAuthn, - - triggerPoW, - - onScoreUpdate: function(callback) { - scoreCallbacks.push(callback); - // Initial score - evaluateScore(); - callback(currentScore, currentScore >= (config?.thresholds?.score_pass || 0.7) ? 'allow' : 'challenge', null); - }, - - // Manual evaluation - evaluate: () => { - return { - score: evaluateScore(), - session_id: sessionId, - metrics: { - dwell_ratio: metrics.visibleTime / ((Date.now() - metrics.startTime) / 1000), - scroll_depth: metrics.maxScrollPercent, - clicks: metrics.clickCount, - mouse_moves: metrics.mouseMoves - } - }; - }, - - // Force send tick - tick: sendTick, - - // Cleanup - destroy: () => { - if (tickTimer) { - clearInterval(tickTimer); - tickTimer = null; - } - isInitialized = false; - scoreCallbacks = []; - } - }; -})(); - -// Auto-initialize if config is available -if (typeof window !== 'undefined') { - window.PCA = PCA; - - // Try to load config from ai-access.json - fetch('/ai-access.json') - .then(res => res.ok ? res.json() : null) - .catch(() => null) - .then(cfg => { - if (cfg) { - PCA.init(cfg); - } - }); -} - -// Export for module systems -if (typeof module !== 'undefined' && module.exports) { - module.exports = PCA; -}