feat: Vorbereitung-Module auf 100% — Persistenz, Backend-Services, UCCA Frontend
All checks were successful
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-ai-compliance (push) Successful in 37s
CI / test-python-backend-compliance (push) Successful in 32s
CI / test-python-document-crawler (push) Successful in 22s
CI / test-python-dsms-gateway (push) Successful in 18s
All checks were successful
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-ai-compliance (push) Successful in 37s
CI / test-python-backend-compliance (push) Successful in 32s
CI / test-python-document-crawler (push) Successful in 22s
CI / test-python-dsms-gateway (push) Successful in 18s
Phase A: PostgreSQL State Store (sdk_states Tabelle, InMemory-Fallback) Phase B: Modules dynamisch vom Backend, Scope DB-Persistenz, Source Policy State Phase C: UCCA Frontend (3 Seiten, Wizard, RiskScoreGauge), Obligations Live-Daten Phase D: Document Import (PDF/LLM/Gap-Analyse), System Screening (SBOM/OSV.dev) Phase E: Company Profile CRUD mit Audit-Logging Phase F: Tests (Python + TypeScript), flow-data.ts DB-Tabellen aktualisiert Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -100,8 +100,8 @@ export const SDK_FLOW_STEPS: SDKFlowStep[] = [
|
||||
inputs: [],
|
||||
outputs: ['companyProfile', 'complianceScope'],
|
||||
prerequisiteSteps: [],
|
||||
dbTables: [],
|
||||
dbMode: 'none',
|
||||
dbTables: ['sdk_states', 'compliance_company_profiles', 'compliance_company_profile_audit'],
|
||||
dbMode: 'read/write',
|
||||
ragCollections: [],
|
||||
isOptional: false,
|
||||
url: '/sdk/company-profile',
|
||||
@@ -120,8 +120,8 @@ export const SDK_FLOW_STEPS: SDKFlowStep[] = [
|
||||
inputs: ['companyProfile'],
|
||||
outputs: ['complianceDepthLevel'],
|
||||
prerequisiteSteps: ['company-profile'],
|
||||
dbTables: [],
|
||||
dbMode: 'none',
|
||||
dbTables: ['sdk_states'],
|
||||
dbMode: 'read/write',
|
||||
ragCollections: [],
|
||||
isOptional: false,
|
||||
url: '/sdk/compliance-scope',
|
||||
@@ -141,12 +141,12 @@ export const SDK_FLOW_STEPS: SDKFlowStep[] = [
|
||||
inputs: ['companyProfile'],
|
||||
outputs: ['useCases'],
|
||||
prerequisiteSteps: ['company-profile'],
|
||||
dbTables: [],
|
||||
dbMode: 'none',
|
||||
dbTables: ['ucca_assessments', 'ucca_findings', 'ucca_controls'],
|
||||
dbMode: 'read/write',
|
||||
ragCollections: ['bp_compliance_ce'],
|
||||
ragPurpose: 'CE-Regulierungen fuer Use-Case Matching',
|
||||
isOptional: false,
|
||||
url: '/sdk/advisory-board',
|
||||
url: '/sdk/use-cases',
|
||||
},
|
||||
{
|
||||
id: 'import',
|
||||
@@ -159,8 +159,8 @@ export const SDK_FLOW_STEPS: SDKFlowStep[] = [
|
||||
inputs: ['useCases'],
|
||||
outputs: ['importedDocuments'],
|
||||
prerequisiteSteps: ['use-case-assessment'],
|
||||
dbTables: [],
|
||||
dbMode: 'none',
|
||||
dbTables: ['compliance_imported_documents', 'compliance_gap_analyses'],
|
||||
dbMode: 'read/write',
|
||||
ragCollections: [],
|
||||
isOptional: true,
|
||||
url: '/sdk/import',
|
||||
@@ -179,8 +179,8 @@ export const SDK_FLOW_STEPS: SDKFlowStep[] = [
|
||||
inputs: ['useCases'],
|
||||
outputs: ['screening', 'sbom'],
|
||||
prerequisiteSteps: ['use-case-assessment'],
|
||||
dbTables: [],
|
||||
dbMode: 'none',
|
||||
dbTables: ['compliance_screenings', 'compliance_security_issues'],
|
||||
dbMode: 'read/write',
|
||||
ragCollections: [],
|
||||
isOptional: false,
|
||||
url: '/sdk/screening',
|
||||
@@ -199,8 +199,8 @@ export const SDK_FLOW_STEPS: SDKFlowStep[] = [
|
||||
inputs: ['companyProfile', 'screening'],
|
||||
outputs: ['modules'],
|
||||
prerequisiteSteps: ['screening'],
|
||||
dbTables: [],
|
||||
dbMode: 'none',
|
||||
dbTables: ['compliance_service_modules', 'sdk_states'],
|
||||
dbMode: 'read/write',
|
||||
ragCollections: ['bp_compliance_gesetze'],
|
||||
ragPurpose: 'Regulierungen den Modulen zuordnen',
|
||||
isOptional: false,
|
||||
@@ -220,8 +220,8 @@ export const SDK_FLOW_STEPS: SDKFlowStep[] = [
|
||||
inputs: ['modules'],
|
||||
outputs: ['sourcePolicy'],
|
||||
prerequisiteSteps: ['modules'],
|
||||
dbTables: [],
|
||||
dbMode: 'none',
|
||||
dbTables: ['compliance_source_policies', 'compliance_allowed_sources', 'compliance_pii_field_rules', 'compliance_source_policy_audit'],
|
||||
dbMode: 'read/write',
|
||||
ragCollections: [],
|
||||
isOptional: false,
|
||||
url: '/sdk/source-policy',
|
||||
|
||||
@@ -49,22 +49,30 @@ export default function ComplianceScopePage() {
|
||||
const [isLoading, setIsLoading] = useState(true)
|
||||
const [isEvaluating, setIsEvaluating] = useState(false)
|
||||
|
||||
// Load from localStorage on mount
|
||||
// Load from SDK context first (persisted via State API), then localStorage as fallback
|
||||
useEffect(() => {
|
||||
try {
|
||||
const stored = localStorage.getItem(STORAGE_KEY)
|
||||
if (stored) {
|
||||
const parsed = JSON.parse(stored) as ComplianceScopeState
|
||||
setScopeState(parsed)
|
||||
// Also sync to SDK context
|
||||
dispatch({ type: 'SET_COMPLIANCE_SCOPE', payload: parsed })
|
||||
// Priority 1: SDK context (loaded from PostgreSQL via State API)
|
||||
if (sdkState.complianceScope && sdkState.complianceScope.answers?.length > 0) {
|
||||
setScopeState(sdkState.complianceScope)
|
||||
// Also update localStorage for offline fallback
|
||||
localStorage.setItem(STORAGE_KEY, JSON.stringify(sdkState.complianceScope))
|
||||
} else {
|
||||
// Priority 2: localStorage fallback
|
||||
const stored = localStorage.getItem(STORAGE_KEY)
|
||||
if (stored) {
|
||||
const parsed = JSON.parse(stored) as ComplianceScopeState
|
||||
setScopeState(parsed)
|
||||
// Sync to SDK context for backend persistence
|
||||
dispatch({ type: 'SET_COMPLIANCE_SCOPE', payload: parsed })
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to load compliance scope state from localStorage:', error)
|
||||
console.error('Failed to load compliance scope state:', error)
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}, [dispatch])
|
||||
}, [dispatch, sdkState.complianceScope])
|
||||
|
||||
// Save to localStorage and SDK context whenever state changes
|
||||
useEffect(() => {
|
||||
|
||||
@@ -358,108 +358,105 @@ export default function ImportPage() {
|
||||
if (files.length === 0) return
|
||||
|
||||
setIsAnalyzing(true)
|
||||
const allGaps: GapItem[] = []
|
||||
|
||||
// Simulate upload and analysis
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const file = files[i]
|
||||
|
||||
// Update to uploading
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, status: 'uploading' as const } : f)))
|
||||
|
||||
// Simulate upload progress
|
||||
for (let p = 0; p <= 100; p += 20) {
|
||||
await new Promise(resolve => setTimeout(resolve, 100))
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, progress: p } : f)))
|
||||
// Upload progress
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, progress: 30 } : f)))
|
||||
|
||||
// Prepare form data for backend
|
||||
const formData = new FormData()
|
||||
formData.append('file', file.file)
|
||||
formData.append('document_type', file.type)
|
||||
formData.append('tenant_id', 'default')
|
||||
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, progress: 60, status: 'analyzing' as const } : f)))
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/sdk/v1/import/analyze', {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
const result = await response.json()
|
||||
|
||||
// Create imported document from backend response
|
||||
const doc: ImportedDocument = {
|
||||
id: result.document_id || file.id,
|
||||
name: file.file.name,
|
||||
type: result.detected_type || file.type,
|
||||
fileUrl: URL.createObjectURL(file.file),
|
||||
uploadedAt: new Date(),
|
||||
analyzedAt: new Date(),
|
||||
analysisResult: {
|
||||
detectedType: result.detected_type || file.type,
|
||||
confidence: result.confidence || 0.85,
|
||||
extractedEntities: result.extracted_entities || [],
|
||||
gaps: result.gap_analysis?.gaps || [],
|
||||
recommendations: result.recommendations || [],
|
||||
},
|
||||
}
|
||||
|
||||
addImportedDocument(doc)
|
||||
|
||||
// Collect gaps
|
||||
if (result.gap_analysis?.gaps) {
|
||||
for (const gap of result.gap_analysis.gaps) {
|
||||
allGaps.push({
|
||||
id: gap.id,
|
||||
category: gap.category,
|
||||
description: gap.description,
|
||||
severity: gap.severity,
|
||||
regulation: gap.regulation,
|
||||
requiredAction: gap.required_action,
|
||||
relatedStepId: gap.related_step_id || '',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, progress: 100, status: 'complete' as const } : f)))
|
||||
} else {
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, status: 'error' as const, error: 'Analyse fehlgeschlagen' } : f)))
|
||||
}
|
||||
} catch {
|
||||
// Fallback: create basic document without backend analysis
|
||||
const doc: ImportedDocument = {
|
||||
id: file.id,
|
||||
name: file.file.name,
|
||||
type: file.type,
|
||||
fileUrl: URL.createObjectURL(file.file),
|
||||
uploadedAt: new Date(),
|
||||
analyzedAt: new Date(),
|
||||
analysisResult: {
|
||||
detectedType: file.type,
|
||||
confidence: 0.5,
|
||||
extractedEntities: [],
|
||||
gaps: [],
|
||||
recommendations: ['Backend nicht erreichbar — manuelle Pruefung empfohlen'],
|
||||
},
|
||||
}
|
||||
addImportedDocument(doc)
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, progress: 100, status: 'complete' as const } : f)))
|
||||
}
|
||||
|
||||
// Update to analyzing
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, status: 'analyzing' as const } : f)))
|
||||
|
||||
// Simulate analysis
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
|
||||
// Create imported document
|
||||
const doc: ImportedDocument = {
|
||||
id: file.id,
|
||||
name: file.file.name,
|
||||
type: file.type,
|
||||
fileUrl: URL.createObjectURL(file.file),
|
||||
uploadedAt: new Date(),
|
||||
analyzedAt: new Date(),
|
||||
analysisResult: {
|
||||
detectedType: file.type,
|
||||
confidence: 0.85 + Math.random() * 0.15,
|
||||
extractedEntities: ['DSGVO', 'AI Act', 'Personenbezogene Daten'],
|
||||
gaps: [],
|
||||
recommendations: ['KI-spezifische Klauseln ergaenzen', 'AI Act Anforderungen pruefen'],
|
||||
},
|
||||
}
|
||||
|
||||
addImportedDocument(doc)
|
||||
|
||||
// Update to complete
|
||||
setFiles(prev => prev.map(f => (f.id === file.id ? { ...f, status: 'complete' as const } : f)))
|
||||
}
|
||||
|
||||
// Generate mock gap analysis
|
||||
const gaps: GapItem[] = [
|
||||
{
|
||||
id: 'gap-1',
|
||||
category: 'AI Act Compliance',
|
||||
description: 'Keine Risikoklassifizierung fuer KI-Systeme vorhanden',
|
||||
severity: 'CRITICAL',
|
||||
regulation: 'EU AI Act Art. 6',
|
||||
requiredAction: 'Risikoklassifizierung durchfuehren',
|
||||
relatedStepId: 'ai-act',
|
||||
},
|
||||
{
|
||||
id: 'gap-2',
|
||||
category: 'Transparenz',
|
||||
description: 'Informationspflichten bei automatisierten Entscheidungen fehlen',
|
||||
severity: 'HIGH',
|
||||
regulation: 'DSGVO Art. 13, 14, 22',
|
||||
requiredAction: 'Datenschutzerklaerung erweitern',
|
||||
relatedStepId: 'einwilligungen',
|
||||
},
|
||||
{
|
||||
id: 'gap-3',
|
||||
category: 'TOMs',
|
||||
description: 'KI-spezifische technische Massnahmen nicht dokumentiert',
|
||||
severity: 'MEDIUM',
|
||||
regulation: 'DSGVO Art. 32',
|
||||
requiredAction: 'TOMs um KI-Aspekte erweitern',
|
||||
relatedStepId: 'tom',
|
||||
},
|
||||
{
|
||||
id: 'gap-4',
|
||||
category: 'VVT',
|
||||
description: 'KI-basierte Verarbeitungstaetigkeiten nicht erfasst',
|
||||
severity: 'HIGH',
|
||||
regulation: 'DSGVO Art. 30',
|
||||
requiredAction: 'VVT aktualisieren',
|
||||
relatedStepId: 'vvt',
|
||||
},
|
||||
{
|
||||
id: 'gap-5',
|
||||
category: 'Aufsicht',
|
||||
description: 'Menschliche Aufsicht nicht definiert',
|
||||
severity: 'MEDIUM',
|
||||
regulation: 'EU AI Act Art. 14',
|
||||
requiredAction: 'Aufsichtsprozesse definieren',
|
||||
relatedStepId: 'controls',
|
||||
},
|
||||
]
|
||||
|
||||
// Build gap analysis summary
|
||||
const gapAnalysis: GapAnalysis = {
|
||||
id: `analysis-${Date.now()}`,
|
||||
createdAt: new Date(),
|
||||
totalGaps: gaps.length,
|
||||
criticalGaps: gaps.filter(g => g.severity === 'CRITICAL').length,
|
||||
highGaps: gaps.filter(g => g.severity === 'HIGH').length,
|
||||
mediumGaps: gaps.filter(g => g.severity === 'MEDIUM').length,
|
||||
lowGaps: gaps.filter(g => g.severity === 'LOW').length,
|
||||
gaps,
|
||||
recommendedPackages: ['analyse', 'dokumentation'],
|
||||
totalGaps: allGaps.length,
|
||||
criticalGaps: allGaps.filter(g => g.severity === 'CRITICAL').length,
|
||||
highGaps: allGaps.filter(g => g.severity === 'HIGH').length,
|
||||
mediumGaps: allGaps.filter(g => g.severity === 'MEDIUM').length,
|
||||
lowGaps: allGaps.filter(g => g.severity === 'LOW').length,
|
||||
gaps: allGaps,
|
||||
recommendedPackages: allGaps.length > 0 ? ['analyse', 'dokumentation'] : [],
|
||||
}
|
||||
|
||||
setAnalysisResult(gapAnalysis)
|
||||
|
||||
@@ -19,11 +19,26 @@ interface DisplayModule extends ServiceModule {
|
||||
completionPercent: number
|
||||
}
|
||||
|
||||
interface BackendModule {
|
||||
id: string
|
||||
name: string
|
||||
display_name: string
|
||||
description: string
|
||||
service_type: string | null
|
||||
processes_pii: boolean
|
||||
ai_components: boolean
|
||||
criticality: string
|
||||
is_active: boolean
|
||||
compliance_score: number | null
|
||||
regulation_count: number
|
||||
risk_count: number
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// AVAILABLE MODULES (Templates)
|
||||
// FALLBACK MODULES (used when backend is unavailable)
|
||||
// =============================================================================
|
||||
|
||||
const availableModules: Omit<DisplayModule, 'status' | 'completionPercent'>[] = [
|
||||
const fallbackModules: Omit<DisplayModule, 'status' | 'completionPercent'>[] = [
|
||||
{
|
||||
id: 'mod-gdpr',
|
||||
name: 'DSGVO Compliance',
|
||||
@@ -74,6 +89,34 @@ const availableModules: Omit<DisplayModule, 'status' | 'completionPercent'>[] =
|
||||
},
|
||||
]
|
||||
|
||||
// =============================================================================
|
||||
// HELPERS
|
||||
// =============================================================================
|
||||
|
||||
function categorizeModule(name: string): ModuleCategory {
|
||||
const lower = name.toLowerCase()
|
||||
if (lower.includes('dsgvo') || lower.includes('gdpr') || lower.includes('datenschutz')) return 'gdpr'
|
||||
if (lower.includes('ai act') || lower.includes('ki-verordnung')) return 'ai-act'
|
||||
if (lower.includes('iso 27001') || lower.includes('iso27001') || lower.includes('isms')) return 'iso27001'
|
||||
if (lower.includes('nis2') || lower.includes('netz- und informations')) return 'nis2'
|
||||
return 'custom'
|
||||
}
|
||||
|
||||
function mapBackendToDisplay(m: BackendModule): Omit<DisplayModule, 'status' | 'completionPercent'> {
|
||||
return {
|
||||
id: m.id,
|
||||
name: m.display_name || m.name,
|
||||
description: m.description || '',
|
||||
category: categorizeModule(m.display_name || m.name),
|
||||
regulations: [],
|
||||
criticality: (m.criticality || 'MEDIUM').toUpperCase(),
|
||||
processesPersonalData: m.processes_pii,
|
||||
hasAIComponents: m.ai_components,
|
||||
requirementsCount: m.regulation_count || 0,
|
||||
controlsCount: m.risk_count || 0,
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// COMPONENTS
|
||||
// =============================================================================
|
||||
@@ -124,13 +167,15 @@ function ModuleCard({
|
||||
</div>
|
||||
<h3 className="text-lg font-semibold text-gray-900">{module.name}</h3>
|
||||
<p className="text-sm text-gray-500 mt-1">{module.description}</p>
|
||||
<div className="mt-2 flex flex-wrap gap-1">
|
||||
{module.regulations.map(reg => (
|
||||
<span key={reg} className="px-2 py-0.5 text-xs bg-gray-100 text-gray-600 rounded">
|
||||
{reg}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
{module.regulations.length > 0 && (
|
||||
<div className="mt-2 flex flex-wrap gap-1">
|
||||
{module.regulations.map(reg => (
|
||||
<span key={reg} className="px-2 py-0.5 text-xs bg-gray-100 text-gray-600 rounded">
|
||||
{reg}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -193,6 +238,33 @@ function ModuleCard({
|
||||
export default function ModulesPage() {
|
||||
const { state, dispatch } = useSDK()
|
||||
const [filter, setFilter] = useState<string>('all')
|
||||
const [availableModules, setAvailableModules] = useState<Omit<DisplayModule, 'status' | 'completionPercent'>[]>(fallbackModules)
|
||||
const [isLoadingModules, setIsLoadingModules] = useState(true)
|
||||
const [backendError, setBackendError] = useState<string | null>(null)
|
||||
|
||||
// Load modules from backend
|
||||
useEffect(() => {
|
||||
async function loadModules() {
|
||||
try {
|
||||
const response = await fetch('/api/sdk/v1/modules')
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
if (data.modules && data.modules.length > 0) {
|
||||
const mapped = data.modules.map(mapBackendToDisplay)
|
||||
setAvailableModules(mapped)
|
||||
setBackendError(null)
|
||||
}
|
||||
} else {
|
||||
setBackendError('Backend nicht erreichbar — zeige Standard-Module')
|
||||
}
|
||||
} catch {
|
||||
setBackendError('Backend nicht erreichbar — zeige Standard-Module')
|
||||
} finally {
|
||||
setIsLoadingModules(false)
|
||||
}
|
||||
}
|
||||
loadModules()
|
||||
}, [])
|
||||
|
||||
// Convert SDK modules to display modules with additional UI properties
|
||||
const displayModules: DisplayModule[] = availableModules.map(template => {
|
||||
@@ -243,7 +315,6 @@ export default function ModulesPage() {
|
||||
}
|
||||
|
||||
const handleDeactivateModule = (moduleId: string) => {
|
||||
// Remove module by updating state without it
|
||||
const updatedModules = state.modules.filter(m => m.id !== moduleId)
|
||||
dispatch({ type: 'SET_STATE', payload: { modules: updatedModules } })
|
||||
}
|
||||
@@ -268,6 +339,18 @@ export default function ModulesPage() {
|
||||
</button>
|
||||
</StepHeader>
|
||||
|
||||
{/* Backend Status */}
|
||||
{backendError && (
|
||||
<div className="bg-amber-50 border border-amber-200 rounded-lg p-3 text-sm text-amber-700">
|
||||
{backendError}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Loading */}
|
||||
{isLoadingModules && (
|
||||
<div className="text-center py-8 text-gray-500">Lade Module vom Backend...</div>
|
||||
)}
|
||||
|
||||
{/* Stats */}
|
||||
<div className="grid grid-cols-1 md:grid-cols-4 gap-4">
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use client'
|
||||
|
||||
import React, { useState } from 'react'
|
||||
import React, { useState, useEffect } from 'react'
|
||||
import { useSDK } from '@/lib/sdk'
|
||||
import { StepHeader, STEP_EXPLANATIONS } from '@/components/sdk/StepHeader'
|
||||
|
||||
@@ -21,73 +21,6 @@ interface Obligation {
|
||||
linkedSystems: string[]
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MOCK DATA
|
||||
// =============================================================================
|
||||
|
||||
const mockObligations: Obligation[] = [
|
||||
{
|
||||
id: 'obl-1',
|
||||
title: 'Risikomanagementsystem implementieren',
|
||||
description: 'Ein Risikomanagementsystem fuer das Hochrisiko-KI-System muss implementiert werden.',
|
||||
source: 'AI Act',
|
||||
sourceArticle: 'Art. 9',
|
||||
deadline: new Date('2024-06-01'),
|
||||
status: 'in-progress',
|
||||
priority: 'critical',
|
||||
responsible: 'IT Security',
|
||||
linkedSystems: ['Bewerber-Screening'],
|
||||
},
|
||||
{
|
||||
id: 'obl-2',
|
||||
title: 'Technische Dokumentation erstellen',
|
||||
description: 'Umfassende technische Dokumentation fuer alle Hochrisiko-KI-Systeme.',
|
||||
source: 'AI Act',
|
||||
sourceArticle: 'Art. 11',
|
||||
deadline: new Date('2024-05-15'),
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
responsible: 'Entwicklung',
|
||||
linkedSystems: ['Bewerber-Screening'],
|
||||
},
|
||||
{
|
||||
id: 'obl-3',
|
||||
title: 'Datenschutzerklaerung aktualisieren',
|
||||
description: 'Die Datenschutzerklaerung muss an die neuen KI-Verarbeitungen angepasst werden.',
|
||||
source: 'DSGVO',
|
||||
sourceArticle: 'Art. 13/14',
|
||||
deadline: new Date('2024-02-01'),
|
||||
status: 'overdue',
|
||||
priority: 'high',
|
||||
responsible: 'Datenschutz',
|
||||
linkedSystems: ['Kundenservice Chatbot', 'Empfehlungsalgorithmus'],
|
||||
},
|
||||
{
|
||||
id: 'obl-4',
|
||||
title: 'KI-Kennzeichnung implementieren',
|
||||
description: 'Nutzer muessen informiert werden, dass sie mit einem KI-System interagieren.',
|
||||
source: 'AI Act',
|
||||
sourceArticle: 'Art. 52',
|
||||
deadline: new Date('2024-03-01'),
|
||||
status: 'completed',
|
||||
priority: 'medium',
|
||||
responsible: 'UX Team',
|
||||
linkedSystems: ['Kundenservice Chatbot'],
|
||||
},
|
||||
{
|
||||
id: 'obl-5',
|
||||
title: 'Menschliche Aufsicht sicherstellen',
|
||||
description: 'Prozesse fuer menschliche Aufsicht bei automatisierten Entscheidungen.',
|
||||
source: 'AI Act',
|
||||
sourceArticle: 'Art. 14',
|
||||
deadline: new Date('2024-04-01'),
|
||||
status: 'pending',
|
||||
priority: 'critical',
|
||||
responsible: 'Operations',
|
||||
linkedSystems: ['Bewerber-Screening'],
|
||||
},
|
||||
]
|
||||
|
||||
// =============================================================================
|
||||
// COMPONENTS
|
||||
// =============================================================================
|
||||
@@ -188,14 +121,124 @@ function ObligationCard({ obligation }: { obligation: Obligation }) {
|
||||
)
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// HELPERS
|
||||
// =============================================================================
|
||||
|
||||
function mapControlsToObligations(assessments: Array<{
|
||||
id: string
|
||||
title?: string
|
||||
domain?: string
|
||||
result?: {
|
||||
required_controls?: Array<{
|
||||
id: string
|
||||
title: string
|
||||
description: string
|
||||
gdpr_ref?: string
|
||||
effort?: string
|
||||
}>
|
||||
triggered_rules?: Array<{
|
||||
rule_code: string
|
||||
title: string
|
||||
severity: string
|
||||
gdpr_ref: string
|
||||
}>
|
||||
risk_level?: string
|
||||
}
|
||||
}>): Obligation[] {
|
||||
const obligations: Obligation[] = []
|
||||
|
||||
for (const assessment of assessments) {
|
||||
// Map triggered rules to obligations
|
||||
const rules = assessment.result?.triggered_rules || []
|
||||
for (const rule of rules) {
|
||||
const severity = rule.severity
|
||||
obligations.push({
|
||||
id: `${assessment.id}-${rule.rule_code}`,
|
||||
title: rule.title,
|
||||
description: `Aus Assessment: ${assessment.title || assessment.id.slice(0, 8)}`,
|
||||
source: rule.gdpr_ref?.includes('AI Act') ? 'AI Act' : 'DSGVO',
|
||||
sourceArticle: rule.gdpr_ref || '',
|
||||
deadline: null,
|
||||
status: 'pending',
|
||||
priority: severity === 'BLOCK' ? 'critical' : severity === 'WARN' ? 'high' : 'medium',
|
||||
responsible: 'Compliance Team',
|
||||
linkedSystems: assessment.title ? [assessment.title] : [],
|
||||
})
|
||||
}
|
||||
|
||||
// Map required controls to obligations
|
||||
const controls = assessment.result?.required_controls || []
|
||||
for (const control of controls) {
|
||||
obligations.push({
|
||||
id: `${assessment.id}-ctrl-${control.id}`,
|
||||
title: control.title,
|
||||
description: control.description,
|
||||
source: control.gdpr_ref?.includes('AI Act') ? 'AI Act' : 'DSGVO',
|
||||
sourceArticle: control.gdpr_ref || '',
|
||||
deadline: null,
|
||||
status: 'pending',
|
||||
priority: assessment.result?.risk_level === 'HIGH' || assessment.result?.risk_level === 'UNACCEPTABLE' ? 'high' : 'medium',
|
||||
responsible: 'IT / Compliance',
|
||||
linkedSystems: assessment.title ? [assessment.title] : [],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return obligations
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// MAIN PAGE
|
||||
// =============================================================================
|
||||
|
||||
export default function ObligationsPage() {
|
||||
const { state } = useSDK()
|
||||
const [obligations] = useState<Obligation[]>(mockObligations)
|
||||
const [obligations, setObligations] = useState<Obligation[]>([])
|
||||
const [filter, setFilter] = useState<string>('all')
|
||||
const [loading, setLoading] = useState(true)
|
||||
const [backendAvailable, setBackendAvailable] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
async function loadObligations() {
|
||||
try {
|
||||
const response = await fetch('/api/sdk/v1/ucca/assessments')
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
const assessments = data.assessments || []
|
||||
if (assessments.length > 0) {
|
||||
const mapped = mapControlsToObligations(assessments)
|
||||
setObligations(mapped)
|
||||
setBackendAvailable(true)
|
||||
setLoading(false)
|
||||
return
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Backend unavailable, use SDK state obligations
|
||||
}
|
||||
|
||||
// Fallback: use obligations from SDK state
|
||||
if (state.obligations && state.obligations.length > 0) {
|
||||
setObligations(state.obligations.map(o => ({
|
||||
id: o.id,
|
||||
title: o.title,
|
||||
description: o.description || '',
|
||||
source: o.source || 'DSGVO',
|
||||
sourceArticle: o.sourceArticle || '',
|
||||
deadline: o.deadline ? new Date(o.deadline) : null,
|
||||
status: (o.status as Obligation['status']) || 'pending',
|
||||
priority: (o.priority as Obligation['priority']) || 'medium',
|
||||
responsible: o.responsible || 'Compliance Team',
|
||||
linkedSystems: o.linkedSystems || [],
|
||||
})))
|
||||
}
|
||||
|
||||
setLoading(false)
|
||||
}
|
||||
|
||||
loadObligations()
|
||||
}, [state.obligations])
|
||||
|
||||
const filteredObligations = filter === 'all'
|
||||
? obligations
|
||||
@@ -226,6 +269,18 @@ export default function ObligationsPage() {
|
||||
</button>
|
||||
</StepHeader>
|
||||
|
||||
{/* Backend Status */}
|
||||
{backendAvailable && (
|
||||
<div className="bg-green-50 border border-green-200 rounded-lg p-3 text-sm text-green-700">
|
||||
Pflichten aus UCCA-Assessments geladen (Live-Daten)
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Loading */}
|
||||
{loading && (
|
||||
<div className="text-center py-8 text-gray-500">Lade Pflichten...</div>
|
||||
)}
|
||||
|
||||
{/* Stats */}
|
||||
<div className="grid grid-cols-1 md:grid-cols-4 gap-4">
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
@@ -288,7 +343,6 @@ export default function ObligationsPage() {
|
||||
<div className="space-y-4">
|
||||
{filteredObligations
|
||||
.sort((a, b) => {
|
||||
// Sort by status priority: overdue > in-progress > pending > completed
|
||||
const statusOrder = { overdue: 0, 'in-progress': 1, pending: 2, completed: 3 }
|
||||
return statusOrder[a.status] - statusOrder[b.status]
|
||||
})
|
||||
@@ -297,7 +351,7 @@ export default function ObligationsPage() {
|
||||
))}
|
||||
</div>
|
||||
|
||||
{filteredObligations.length === 0 && (
|
||||
{filteredObligations.length === 0 && !loading && (
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-12 text-center">
|
||||
<div className="w-16 h-16 mx-auto bg-gray-100 rounded-full flex items-center justify-center mb-4">
|
||||
<svg className="w-8 h-8 text-gray-400" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
@@ -305,7 +359,9 @@ export default function ObligationsPage() {
|
||||
</svg>
|
||||
</div>
|
||||
<h3 className="text-lg font-semibold text-gray-900">Keine Pflichten gefunden</h3>
|
||||
<p className="mt-2 text-gray-500">Passen Sie den Filter an oder fuegen Sie neue Pflichten hinzu.</p>
|
||||
<p className="mt-2 text-gray-500">
|
||||
Erstellen Sie zuerst ein Use Case Assessment, um automatisch Pflichten abzuleiten.
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -1,85 +1,8 @@
|
||||
'use client'
|
||||
|
||||
import React, { useState } from 'react'
|
||||
import React, { useState, useRef } from 'react'
|
||||
import { useSDK, ScreeningResult, SecurityIssue, SBOMComponent } from '@/lib/sdk'
|
||||
|
||||
// =============================================================================
|
||||
// MOCK DATA
|
||||
// =============================================================================
|
||||
|
||||
const mockSBOMComponents: SBOMComponent[] = [
|
||||
{
|
||||
name: 'react',
|
||||
version: '18.3.0',
|
||||
type: 'library',
|
||||
purl: 'pkg:npm/react@18.3.0',
|
||||
licenses: ['MIT'],
|
||||
vulnerabilities: [],
|
||||
},
|
||||
{
|
||||
name: 'next',
|
||||
version: '15.1.0',
|
||||
type: 'framework',
|
||||
purl: 'pkg:npm/next@15.1.0',
|
||||
licenses: ['MIT'],
|
||||
vulnerabilities: [],
|
||||
},
|
||||
{
|
||||
name: 'lodash',
|
||||
version: '4.17.21',
|
||||
type: 'library',
|
||||
purl: 'pkg:npm/lodash@4.17.21',
|
||||
licenses: ['MIT'],
|
||||
vulnerabilities: [
|
||||
{
|
||||
id: 'CVE-2021-23337',
|
||||
cve: 'CVE-2021-23337',
|
||||
severity: 'HIGH',
|
||||
title: 'Prototype Pollution',
|
||||
description: 'Lodash versions prior to 4.17.21 are vulnerable to Command Injection via the template function.',
|
||||
cvss: 7.2,
|
||||
fixedIn: '4.17.21',
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
const mockSecurityIssues: SecurityIssue[] = [
|
||||
{
|
||||
id: 'issue-1',
|
||||
severity: 'CRITICAL',
|
||||
title: 'SQL Injection Vulnerability',
|
||||
description: 'Unvalidated user input in database queries',
|
||||
cve: 'CVE-2024-12345',
|
||||
cvss: 9.8,
|
||||
affectedComponent: 'database-connector',
|
||||
remediation: 'Use parameterized queries',
|
||||
status: 'OPEN',
|
||||
},
|
||||
{
|
||||
id: 'issue-2',
|
||||
severity: 'HIGH',
|
||||
title: 'Cross-Site Scripting (XSS)',
|
||||
description: 'Reflected XSS in search functionality',
|
||||
cve: 'CVE-2024-12346',
|
||||
cvss: 7.5,
|
||||
affectedComponent: 'search-module',
|
||||
remediation: 'Sanitize and encode user input',
|
||||
status: 'IN_PROGRESS',
|
||||
},
|
||||
{
|
||||
id: 'issue-3',
|
||||
severity: 'MEDIUM',
|
||||
title: 'Insecure Cookie Configuration',
|
||||
description: 'Session cookies missing Secure and HttpOnly flags',
|
||||
cve: null,
|
||||
cvss: 5.3,
|
||||
affectedComponent: 'auth-service',
|
||||
remediation: 'Set Secure and HttpOnly flags on cookies',
|
||||
status: 'OPEN',
|
||||
},
|
||||
]
|
||||
|
||||
// =============================================================================
|
||||
// COMPONENTS
|
||||
// =============================================================================
|
||||
@@ -243,62 +166,120 @@ export default function ScreeningPage() {
|
||||
const [isScanning, setIsScanning] = useState(false)
|
||||
const [scanProgress, setScanProgress] = useState(0)
|
||||
const [scanStatus, setScanStatus] = useState('')
|
||||
const [repositoryUrl, setRepositoryUrl] = useState('')
|
||||
|
||||
const startScan = async () => {
|
||||
if (!repositoryUrl) return
|
||||
const [scanError, setScanError] = useState<string | null>(null)
|
||||
const fileInputRef = useRef<HTMLInputElement>(null)
|
||||
|
||||
const startScan = async (file: File) => {
|
||||
setIsScanning(true)
|
||||
setScanProgress(0)
|
||||
setScanStatus('Initialisierung...')
|
||||
setScanError(null)
|
||||
|
||||
// Simulate scan progress
|
||||
const steps = [
|
||||
{ progress: 10, status: 'Repository wird geklont...' },
|
||||
{ progress: 25, status: 'Abhängigkeiten werden analysiert...' },
|
||||
{ progress: 40, status: 'SBOM wird generiert...' },
|
||||
{ progress: 60, status: 'Schwachstellenscan läuft...' },
|
||||
{ progress: 80, status: 'Lizenzprüfung...' },
|
||||
{ progress: 95, status: 'Bericht wird erstellt...' },
|
||||
{ progress: 100, status: 'Abgeschlossen!' },
|
||||
]
|
||||
// Show progress steps while API processes
|
||||
const progressInterval = setInterval(() => {
|
||||
setScanProgress(prev => {
|
||||
if (prev >= 90) return prev
|
||||
const step = Math.random() * 15 + 5
|
||||
const next = Math.min(prev + step, 90)
|
||||
const statuses = [
|
||||
'Abhaengigkeiten werden analysiert...',
|
||||
'SBOM wird generiert...',
|
||||
'Schwachstellenscan laeuft...',
|
||||
'OSV.dev Datenbank wird abgefragt...',
|
||||
'Lizenzpruefung...',
|
||||
]
|
||||
setScanStatus(statuses[Math.min(Math.floor(next / 20), statuses.length - 1)])
|
||||
return next
|
||||
})
|
||||
}, 600)
|
||||
|
||||
for (const step of steps) {
|
||||
await new Promise(r => setTimeout(r, 800))
|
||||
setScanProgress(step.progress)
|
||||
setScanStatus(step.status)
|
||||
try {
|
||||
const formData = new FormData()
|
||||
formData.append('file', file)
|
||||
formData.append('tenant_id', 'default')
|
||||
|
||||
const response = await fetch('/api/sdk/v1/screening/scan', {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
})
|
||||
|
||||
clearInterval(progressInterval)
|
||||
|
||||
if (!response.ok) {
|
||||
const err = await response.json().catch(() => ({ error: 'Unknown error' }))
|
||||
throw new Error(err.details || err.error || `HTTP ${response.status}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
setScanProgress(100)
|
||||
setScanStatus('Abgeschlossen!')
|
||||
|
||||
// Map backend response to ScreeningResult
|
||||
const issues: SecurityIssue[] = (data.issues || []).map((i: any) => ({
|
||||
id: i.id,
|
||||
severity: i.severity,
|
||||
title: i.title,
|
||||
description: i.description,
|
||||
cve: i.cve || null,
|
||||
cvss: i.cvss || null,
|
||||
affectedComponent: i.affected_component,
|
||||
remediation: i.remediation,
|
||||
status: i.status || 'OPEN',
|
||||
}))
|
||||
|
||||
const components: SBOMComponent[] = (data.components || []).map((c: any) => ({
|
||||
name: c.name,
|
||||
version: c.version,
|
||||
type: c.type,
|
||||
purl: c.purl,
|
||||
licenses: c.licenses || [],
|
||||
vulnerabilities: c.vulnerabilities || [],
|
||||
}))
|
||||
|
||||
const result: ScreeningResult = {
|
||||
id: data.id,
|
||||
status: 'COMPLETED',
|
||||
startedAt: data.started_at ? new Date(data.started_at) : new Date(),
|
||||
completedAt: data.completed_at ? new Date(data.completed_at) : new Date(),
|
||||
sbom: {
|
||||
format: data.sbom_format || 'CycloneDX',
|
||||
version: data.sbom_version || '1.5',
|
||||
components,
|
||||
dependencies: [],
|
||||
generatedAt: new Date(),
|
||||
},
|
||||
securityScan: {
|
||||
totalIssues: data.total_issues || issues.length,
|
||||
critical: data.critical_issues || 0,
|
||||
high: data.high_issues || 0,
|
||||
medium: data.medium_issues || 0,
|
||||
low: data.low_issues || 0,
|
||||
issues,
|
||||
},
|
||||
error: null,
|
||||
}
|
||||
|
||||
dispatch({ type: 'SET_SCREENING', payload: result })
|
||||
issues.forEach(issue => {
|
||||
dispatch({ type: 'ADD_SECURITY_ISSUE', payload: issue })
|
||||
})
|
||||
} catch (error: any) {
|
||||
clearInterval(progressInterval)
|
||||
console.error('Screening scan failed:', error)
|
||||
setScanError(error.message || 'Scan fehlgeschlagen')
|
||||
setScanProgress(0)
|
||||
setScanStatus('')
|
||||
} finally {
|
||||
setIsScanning(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Set mock results
|
||||
const result: ScreeningResult = {
|
||||
id: `scan-${Date.now()}`,
|
||||
status: 'COMPLETED',
|
||||
startedAt: new Date(Date.now() - 30000),
|
||||
completedAt: new Date(),
|
||||
sbom: {
|
||||
format: 'CycloneDX',
|
||||
version: '1.5',
|
||||
components: mockSBOMComponents,
|
||||
dependencies: [],
|
||||
generatedAt: new Date(),
|
||||
},
|
||||
securityScan: {
|
||||
totalIssues: mockSecurityIssues.length,
|
||||
critical: mockSecurityIssues.filter(i => i.severity === 'CRITICAL').length,
|
||||
high: mockSecurityIssues.filter(i => i.severity === 'HIGH').length,
|
||||
medium: mockSecurityIssues.filter(i => i.severity === 'MEDIUM').length,
|
||||
low: mockSecurityIssues.filter(i => i.severity === 'LOW').length,
|
||||
issues: mockSecurityIssues,
|
||||
},
|
||||
error: null,
|
||||
const handleFileSelect = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0]
|
||||
if (file) {
|
||||
startScan(file)
|
||||
}
|
||||
|
||||
dispatch({ type: 'SET_SCREENING', payload: result })
|
||||
mockSecurityIssues.forEach(issue => {
|
||||
dispatch({ type: 'ADD_SECURITY_ISSUE', payload: issue })
|
||||
})
|
||||
|
||||
setIsScanning(false)
|
||||
}
|
||||
|
||||
return (
|
||||
@@ -314,30 +295,33 @@ export default function ScreeningPage() {
|
||||
{/* Scan Input */}
|
||||
{!state.screening && !isScanning && (
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
<h3 className="font-semibold text-gray-900 mb-4">Repository scannen</h3>
|
||||
<h3 className="font-semibold text-gray-900 mb-4">Abhaengigkeiten scannen</h3>
|
||||
<p className="text-sm text-gray-500 mb-4">
|
||||
Laden Sie eine Abhaengigkeitsdatei hoch, um ein SBOM zu generieren und Schwachstellen zu erkennen.
|
||||
</p>
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
accept=".json,.txt,.lock"
|
||||
onChange={handleFileSelect}
|
||||
className="hidden"
|
||||
/>
|
||||
<div className="flex gap-4">
|
||||
<input
|
||||
type="text"
|
||||
value={repositoryUrl}
|
||||
onChange={e => setRepositoryUrl(e.target.value)}
|
||||
placeholder="https://github.com/organization/repository"
|
||||
className="flex-1 px-4 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-purple-500 focus:border-transparent"
|
||||
/>
|
||||
<button
|
||||
onClick={startScan}
|
||||
disabled={!repositoryUrl}
|
||||
className={`px-6 py-2 rounded-lg font-medium transition-colors ${
|
||||
repositoryUrl
|
||||
? 'bg-purple-600 text-white hover:bg-purple-700'
|
||||
: 'bg-gray-200 text-gray-400 cursor-not-allowed'
|
||||
}`}
|
||||
onClick={() => fileInputRef.current?.click()}
|
||||
className="px-6 py-2 bg-purple-600 text-white rounded-lg font-medium hover:bg-purple-700 transition-colors"
|
||||
>
|
||||
Scan starten
|
||||
Datei auswaehlen & scannen
|
||||
</button>
|
||||
</div>
|
||||
<p className="mt-2 text-sm text-gray-500">
|
||||
Unterstützte Formate: Git URL, GitHub, GitLab, Bitbucket
|
||||
Unterstuetzte Formate: package-lock.json, requirements.txt, yarn.lock
|
||||
</p>
|
||||
{scanError && (
|
||||
<div className="mt-4 p-3 bg-red-50 border border-red-200 rounded-lg text-sm text-red-700">
|
||||
{scanError}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
||||
179
admin-compliance/app/(sdk)/sdk/use-cases/[id]/page.tsx
Normal file
179
admin-compliance/app/(sdk)/sdk/use-cases/[id]/page.tsx
Normal file
@@ -0,0 +1,179 @@
|
||||
'use client'
|
||||
|
||||
import React, { useState, useEffect } from 'react'
|
||||
import { useParams, useRouter } from 'next/navigation'
|
||||
import Link from 'next/link'
|
||||
import { AssessmentResultCard } from '@/components/sdk/use-case-assessment/AssessmentResultCard'
|
||||
|
||||
interface FullAssessment {
|
||||
id: string
|
||||
title: string
|
||||
tenant_id: string
|
||||
domain: string
|
||||
created_at: string
|
||||
use_case_text?: string
|
||||
intake?: Record<string, unknown>
|
||||
result?: {
|
||||
feasibility: string
|
||||
risk_level: string
|
||||
risk_score: number
|
||||
complexity: string
|
||||
dsfa_recommended: boolean
|
||||
art22_risk: boolean
|
||||
training_allowed: string
|
||||
summary: string
|
||||
recommendation: string
|
||||
alternative_approach?: string
|
||||
triggered_rules?: Array<{
|
||||
rule_code: string
|
||||
title: string
|
||||
severity: string
|
||||
gdpr_ref: string
|
||||
}>
|
||||
required_controls?: Array<{
|
||||
id: string
|
||||
title: string
|
||||
description: string
|
||||
effort: string
|
||||
}>
|
||||
recommended_architecture?: Array<{
|
||||
id: string
|
||||
title: string
|
||||
description: string
|
||||
benefit: string
|
||||
}>
|
||||
}
|
||||
}
|
||||
|
||||
export default function AssessmentDetailPage() {
|
||||
const params = useParams()
|
||||
const router = useRouter()
|
||||
const assessmentId = params.id as string
|
||||
|
||||
const [assessment, setAssessment] = useState<FullAssessment | null>(null)
|
||||
const [loading, setLoading] = useState(true)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
|
||||
useEffect(() => {
|
||||
async function load() {
|
||||
try {
|
||||
const response = await fetch(`/api/sdk/v1/ucca/assessments/${assessmentId}`)
|
||||
if (!response.ok) {
|
||||
throw new Error('Assessment nicht gefunden')
|
||||
}
|
||||
const data = await response.json()
|
||||
setAssessment(data)
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Fehler beim Laden')
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
if (assessmentId) {
|
||||
// Try the direct endpoint first; if it fails, try the list endpoint and filter
|
||||
load().catch(() => {
|
||||
// Fallback: fetch from list
|
||||
fetch('/api/sdk/v1/ucca/assessments')
|
||||
.then(r => r.json())
|
||||
.then(data => {
|
||||
const found = (data.assessments || []).find((a: FullAssessment) => a.id === assessmentId)
|
||||
if (found) {
|
||||
setAssessment(found)
|
||||
setError(null)
|
||||
}
|
||||
})
|
||||
.catch(() => {})
|
||||
.finally(() => setLoading(false))
|
||||
})
|
||||
}
|
||||
}, [assessmentId])
|
||||
|
||||
const handleDelete = async () => {
|
||||
if (!confirm('Assessment wirklich loeschen?')) return
|
||||
try {
|
||||
await fetch(`/api/sdk/v1/ucca/assessments/${assessmentId}`, { method: 'DELETE' })
|
||||
router.push('/sdk/use-cases')
|
||||
} catch {
|
||||
// Ignore delete errors
|
||||
}
|
||||
}
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className="flex items-center justify-center h-64">
|
||||
<div className="text-gray-500">Lade Assessment...</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (error || !assessment) {
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<div className="bg-red-50 border border-red-200 rounded-lg p-6 text-center">
|
||||
<h3 className="text-lg font-semibold text-red-800">Fehler</h3>
|
||||
<p className="text-red-600 mt-1">{error || 'Assessment nicht gefunden'}</p>
|
||||
</div>
|
||||
<Link href="/sdk/use-cases" className="text-purple-600 hover:text-purple-700">
|
||||
Zurueck zur Uebersicht
|
||||
</Link>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
{/* Breadcrumb */}
|
||||
<div className="flex items-center gap-2 text-sm text-gray-500">
|
||||
<Link href="/sdk/use-cases" className="hover:text-purple-600">Use Cases</Link>
|
||||
<span>/</span>
|
||||
<span className="text-gray-900">{assessment.title || assessmentId.slice(0, 8)}</span>
|
||||
</div>
|
||||
|
||||
{/* Header */}
|
||||
<div className="flex items-start justify-between">
|
||||
<div>
|
||||
<h1 className="text-2xl font-bold text-gray-900">{assessment.title || 'Assessment Detail'}</h1>
|
||||
<div className="flex items-center gap-4 mt-2 text-sm text-gray-500">
|
||||
<span>Domain: {assessment.domain}</span>
|
||||
<span>Erstellt: {new Date(assessment.created_at).toLocaleDateString('de-DE')}</span>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<button
|
||||
onClick={handleDelete}
|
||||
className="px-4 py-2 text-sm text-red-600 hover:bg-red-50 rounded-lg transition-colors"
|
||||
>
|
||||
Loeschen
|
||||
</button>
|
||||
<Link
|
||||
href="/sdk/use-cases"
|
||||
className="px-4 py-2 text-sm bg-gray-100 text-gray-700 rounded-lg hover:bg-gray-200 transition-colors"
|
||||
>
|
||||
Zurueck
|
||||
</Link>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Use Case Text */}
|
||||
{assessment.use_case_text && (
|
||||
<div className="bg-gray-50 rounded-xl border border-gray-200 p-6">
|
||||
<h3 className="text-sm font-medium text-gray-500 mb-2">Beschreibung des Anwendungsfalls</h3>
|
||||
<p className="text-gray-800 whitespace-pre-wrap">{assessment.use_case_text}</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Result */}
|
||||
{assessment.result && (
|
||||
<AssessmentResultCard result={assessment.result} />
|
||||
)}
|
||||
|
||||
{/* No Result */}
|
||||
{!assessment.result && (
|
||||
<div className="bg-yellow-50 border border-yellow-200 rounded-xl p-6 text-center">
|
||||
<p className="text-yellow-700">Dieses Assessment hat noch kein Ergebnis.</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
464
admin-compliance/app/(sdk)/sdk/use-cases/new/page.tsx
Normal file
464
admin-compliance/app/(sdk)/sdk/use-cases/new/page.tsx
Normal file
@@ -0,0 +1,464 @@
|
||||
'use client'
|
||||
|
||||
import React, { useState } from 'react'
|
||||
import { useRouter } from 'next/navigation'
|
||||
import { AssessmentResultCard } from '@/components/sdk/use-case-assessment/AssessmentResultCard'
|
||||
|
||||
// =============================================================================
|
||||
// WIZARD STEPS CONFIG
|
||||
// =============================================================================
|
||||
|
||||
const WIZARD_STEPS = [
|
||||
{ id: 1, title: 'Grundlegendes', description: 'Titel und Beschreibung' },
|
||||
{ id: 2, title: 'Datenkategorien', description: 'Welche Daten werden verarbeitet?' },
|
||||
{ id: 3, title: 'Automatisierung', description: 'Grad der Automatisierung' },
|
||||
{ id: 4, title: 'Hosting & Modell', description: 'Technische Details' },
|
||||
{ id: 5, title: 'Datenhaltung', description: 'Aufbewahrung und Speicherung' },
|
||||
]
|
||||
|
||||
const DOMAINS = [
|
||||
{ value: 'healthcare', label: 'Gesundheit' },
|
||||
{ value: 'finance', label: 'Finanzen' },
|
||||
{ value: 'education', label: 'Bildung' },
|
||||
{ value: 'retail', label: 'Handel' },
|
||||
{ value: 'it_services', label: 'IT-Dienstleistungen' },
|
||||
{ value: 'consulting', label: 'Beratung' },
|
||||
{ value: 'manufacturing', label: 'Produktion' },
|
||||
{ value: 'hr', label: 'Personalwesen' },
|
||||
{ value: 'marketing', label: 'Marketing' },
|
||||
{ value: 'legal', label: 'Recht' },
|
||||
{ value: 'public', label: 'Oeffentlicher Sektor' },
|
||||
{ value: 'general', label: 'Allgemein' },
|
||||
]
|
||||
|
||||
// =============================================================================
|
||||
// MAIN COMPONENT
|
||||
// =============================================================================
|
||||
|
||||
export default function NewUseCasePage() {
|
||||
const router = useRouter()
|
||||
const [currentStep, setCurrentStep] = useState(1)
|
||||
const [isSubmitting, setIsSubmitting] = useState(false)
|
||||
const [result, setResult] = useState<unknown>(null)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
|
||||
// Form state
|
||||
const [form, setForm] = useState({
|
||||
title: '',
|
||||
use_case_text: '',
|
||||
domain: 'general',
|
||||
// Data Types
|
||||
personal_data: false,
|
||||
special_categories: false,
|
||||
minors_data: false,
|
||||
health_data: false,
|
||||
biometric_data: false,
|
||||
financial_data: false,
|
||||
// Purpose
|
||||
purpose_profiling: false,
|
||||
purpose_automated_decision: false,
|
||||
purpose_marketing: false,
|
||||
purpose_analytics: false,
|
||||
purpose_service_delivery: false,
|
||||
// Automation
|
||||
automation: 'assistive' as 'assistive' | 'semi_automated' | 'fully_automated',
|
||||
// Hosting
|
||||
hosting_provider: 'self_hosted',
|
||||
hosting_region: 'eu',
|
||||
// Model Usage
|
||||
model_rag: false,
|
||||
model_finetune: false,
|
||||
model_training: false,
|
||||
model_inference: true,
|
||||
// Retention
|
||||
retention_days: 90,
|
||||
retention_purpose: '',
|
||||
})
|
||||
|
||||
const updateForm = (updates: Partial<typeof form>) => {
|
||||
setForm(prev => ({ ...prev, ...updates }))
|
||||
}
|
||||
|
||||
const handleSubmit = async () => {
|
||||
setIsSubmitting(true)
|
||||
setError(null)
|
||||
try {
|
||||
const intake = {
|
||||
title: form.title,
|
||||
use_case_text: form.use_case_text,
|
||||
domain: form.domain,
|
||||
data_types: {
|
||||
personal_data: form.personal_data,
|
||||
special_categories: form.special_categories,
|
||||
minors_data: form.minors_data,
|
||||
health_data: form.health_data,
|
||||
biometric_data: form.biometric_data,
|
||||
financial_data: form.financial_data,
|
||||
},
|
||||
purpose: {
|
||||
profiling: form.purpose_profiling,
|
||||
automated_decision: form.purpose_automated_decision,
|
||||
marketing: form.purpose_marketing,
|
||||
analytics: form.purpose_analytics,
|
||||
service_delivery: form.purpose_service_delivery,
|
||||
},
|
||||
automation: form.automation,
|
||||
hosting: {
|
||||
provider: form.hosting_provider,
|
||||
region: form.hosting_region,
|
||||
},
|
||||
model_usage: {
|
||||
rag: form.model_rag,
|
||||
finetune: form.model_finetune,
|
||||
training: form.model_training,
|
||||
inference: form.model_inference,
|
||||
},
|
||||
retention: {
|
||||
days: form.retention_days,
|
||||
purpose: form.retention_purpose,
|
||||
},
|
||||
store_raw_text: true,
|
||||
}
|
||||
|
||||
const response = await fetch('/api/sdk/v1/ucca/assess', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(intake),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errData = await response.json().catch(() => null)
|
||||
throw new Error(errData?.error || `HTTP ${response.status}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
setResult(data)
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Fehler bei der Bewertung')
|
||||
} finally {
|
||||
setIsSubmitting(false)
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a result, show it
|
||||
if (result) {
|
||||
const r = result as { assessment?: { id: string }; result?: Record<string, unknown> }
|
||||
return (
|
||||
<div className="max-w-4xl mx-auto space-y-6">
|
||||
<div className="flex items-center justify-between">
|
||||
<h1 className="text-2xl font-bold text-gray-900">Assessment Ergebnis</h1>
|
||||
<div className="flex gap-2">
|
||||
{r.assessment?.id && (
|
||||
<button
|
||||
onClick={() => router.push(`/sdk/use-cases/${r.assessment!.id}`)}
|
||||
className="px-4 py-2 bg-purple-600 text-white rounded-lg hover:bg-purple-700"
|
||||
>
|
||||
Zum Assessment
|
||||
</button>
|
||||
)}
|
||||
<button
|
||||
onClick={() => router.push('/sdk/use-cases')}
|
||||
className="px-4 py-2 bg-gray-100 text-gray-700 rounded-lg hover:bg-gray-200"
|
||||
>
|
||||
Zur Uebersicht
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
{r.result && (
|
||||
<AssessmentResultCard result={r.result as Parameters<typeof AssessmentResultCard>[0]['result']} />
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="max-w-3xl mx-auto space-y-6">
|
||||
{/* Header */}
|
||||
<div>
|
||||
<h1 className="text-2xl font-bold text-gray-900">Neues Use Case Assessment</h1>
|
||||
<p className="mt-1 text-gray-500">
|
||||
Beschreiben Sie Ihren KI-Anwendungsfall Schritt fuer Schritt
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Step Indicator */}
|
||||
<div className="flex items-center gap-2">
|
||||
{WIZARD_STEPS.map((step, idx) => (
|
||||
<React.Fragment key={step.id}>
|
||||
<button
|
||||
onClick={() => setCurrentStep(step.id)}
|
||||
className={`flex items-center gap-2 px-3 py-2 rounded-lg text-sm transition-colors ${
|
||||
currentStep === step.id
|
||||
? 'bg-purple-600 text-white'
|
||||
: currentStep > step.id
|
||||
? 'bg-green-100 text-green-700'
|
||||
: 'bg-gray-100 text-gray-500'
|
||||
}`}
|
||||
>
|
||||
<span className="w-6 h-6 rounded-full bg-white/20 flex items-center justify-center text-xs font-bold">
|
||||
{currentStep > step.id ? '✓' : step.id}
|
||||
</span>
|
||||
<span className="hidden md:inline">{step.title}</span>
|
||||
</button>
|
||||
{idx < WIZARD_STEPS.length - 1 && <div className="flex-1 h-px bg-gray-200" />}
|
||||
</React.Fragment>
|
||||
))}
|
||||
</div>
|
||||
|
||||
{/* Error */}
|
||||
{error && (
|
||||
<div className="bg-red-50 border border-red-200 rounded-lg p-4 text-red-700">{error}</div>
|
||||
)}
|
||||
|
||||
{/* Step Content */}
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
{/* Step 1: Grundlegendes */}
|
||||
{currentStep === 1 && (
|
||||
<div className="space-y-4">
|
||||
<h2 className="text-lg font-semibold text-gray-900">Grundlegende Informationen</h2>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-700 mb-1">Titel</label>
|
||||
<input
|
||||
type="text"
|
||||
value={form.title}
|
||||
onChange={e => updateForm({ title: e.target.value })}
|
||||
placeholder="z.B. Chatbot fuer Kundenservice"
|
||||
className="w-full px-4 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-purple-500 focus:border-transparent"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-700 mb-1">Beschreibung</label>
|
||||
<textarea
|
||||
value={form.use_case_text}
|
||||
onChange={e => updateForm({ use_case_text: e.target.value })}
|
||||
rows={4}
|
||||
placeholder="Beschreiben Sie den Anwendungsfall..."
|
||||
className="w-full px-4 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-purple-500 focus:border-transparent"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-700 mb-1">Branche</label>
|
||||
<select
|
||||
value={form.domain}
|
||||
onChange={e => updateForm({ domain: e.target.value })}
|
||||
className="w-full px-4 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-purple-500"
|
||||
>
|
||||
{DOMAINS.map(d => (
|
||||
<option key={d.value} value={d.value}>{d.label}</option>
|
||||
))}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Step 2: Datenkategorien */}
|
||||
{currentStep === 2 && (
|
||||
<div className="space-y-4">
|
||||
<h2 className="text-lg font-semibold text-gray-900">Welche Daten werden verarbeitet?</h2>
|
||||
{[
|
||||
{ key: 'personal_data', label: 'Personenbezogene Daten', desc: 'Name, E-Mail, Adresse etc.' },
|
||||
{ key: 'special_categories', label: 'Besondere Kategorien (Art. 9)', desc: 'Religion, Gesundheit, politische Meinung' },
|
||||
{ key: 'health_data', label: 'Gesundheitsdaten', desc: 'Diagnosen, Medikation, Fitness' },
|
||||
{ key: 'biometric_data', label: 'Biometrische Daten', desc: 'Gesichtserkennung, Fingerabdruck, Stimme' },
|
||||
{ key: 'minors_data', label: 'Daten von Minderjaehrigen', desc: 'Unter 16 Jahren' },
|
||||
{ key: 'financial_data', label: 'Finanzdaten', desc: 'Kontodaten, Transaktionen, Kreditwuerdigkeit' },
|
||||
].map(item => (
|
||||
<label key={item.key} className="flex items-start gap-3 p-3 bg-gray-50 rounded-lg cursor-pointer hover:bg-gray-100">
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={form[item.key as keyof typeof form] as boolean}
|
||||
onChange={e => updateForm({ [item.key]: e.target.checked })}
|
||||
className="mt-1 rounded border-gray-300 text-purple-600 focus:ring-purple-500"
|
||||
/>
|
||||
<div>
|
||||
<div className="font-medium text-gray-900">{item.label}</div>
|
||||
<div className="text-sm text-gray-500">{item.desc}</div>
|
||||
</div>
|
||||
</label>
|
||||
))}
|
||||
|
||||
<h3 className="text-sm font-medium text-gray-700 mt-4">Zweck der Verarbeitung</h3>
|
||||
{[
|
||||
{ key: 'purpose_profiling', label: 'Profiling', desc: 'Automatisierte Analyse personenbezogener Aspekte' },
|
||||
{ key: 'purpose_automated_decision', label: 'Automatisierte Entscheidung', desc: 'Art. 22 DSGVO — Entscheidung ohne menschliches Zutun' },
|
||||
{ key: 'purpose_marketing', label: 'Marketing', desc: 'Werbung, Personalisierung, Targeting' },
|
||||
{ key: 'purpose_analytics', label: 'Analytics', desc: 'Statistische Auswertung, Business Intelligence' },
|
||||
{ key: 'purpose_service_delivery', label: 'Serviceerbringung', desc: 'Kernfunktion des Produkts/Services' },
|
||||
].map(item => (
|
||||
<label key={item.key} className="flex items-start gap-3 p-3 bg-gray-50 rounded-lg cursor-pointer hover:bg-gray-100">
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={form[item.key as keyof typeof form] as boolean}
|
||||
onChange={e => updateForm({ [item.key]: e.target.checked })}
|
||||
className="mt-1 rounded border-gray-300 text-purple-600 focus:ring-purple-500"
|
||||
/>
|
||||
<div>
|
||||
<div className="font-medium text-gray-900">{item.label}</div>
|
||||
<div className="text-sm text-gray-500">{item.desc}</div>
|
||||
</div>
|
||||
</label>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Step 3: Automatisierung */}
|
||||
{currentStep === 3 && (
|
||||
<div className="space-y-4">
|
||||
<h2 className="text-lg font-semibold text-gray-900">Grad der Automatisierung</h2>
|
||||
{[
|
||||
{ value: 'assistive', label: 'Assistiv', desc: 'KI unterstuetzt, Mensch entscheidet immer' },
|
||||
{ value: 'semi_automated', label: 'Teilautomatisiert', desc: 'KI schlaegt vor, Mensch prueft und bestaetigt' },
|
||||
{ value: 'fully_automated', label: 'Vollautomatisiert', desc: 'KI entscheidet autonom, Mensch ueberwacht nur' },
|
||||
].map(item => (
|
||||
<label
|
||||
key={item.value}
|
||||
className={`flex items-start gap-3 p-4 rounded-lg border-2 cursor-pointer transition-all ${
|
||||
form.automation === item.value
|
||||
? 'border-purple-500 bg-purple-50'
|
||||
: 'border-gray-200 hover:border-gray-300'
|
||||
}`}
|
||||
>
|
||||
<input
|
||||
type="radio"
|
||||
name="automation"
|
||||
value={item.value}
|
||||
checked={form.automation === item.value}
|
||||
onChange={e => updateForm({ automation: e.target.value as typeof form.automation })}
|
||||
className="mt-1 text-purple-600 focus:ring-purple-500"
|
||||
/>
|
||||
<div>
|
||||
<div className="font-medium text-gray-900">{item.label}</div>
|
||||
<div className="text-sm text-gray-500">{item.desc}</div>
|
||||
</div>
|
||||
</label>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Step 4: Hosting & Modell */}
|
||||
{currentStep === 4 && (
|
||||
<div className="space-y-4">
|
||||
<h2 className="text-lg font-semibold text-gray-900">Technische Details</h2>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-700 mb-1">Hosting</label>
|
||||
<select
|
||||
value={form.hosting_provider}
|
||||
onChange={e => updateForm({ hosting_provider: e.target.value })}
|
||||
className="w-full px-4 py-2 border border-gray-300 rounded-lg"
|
||||
>
|
||||
<option value="self_hosted">Eigenes Hosting</option>
|
||||
<option value="aws">AWS</option>
|
||||
<option value="azure">Microsoft Azure</option>
|
||||
<option value="gcp">Google Cloud</option>
|
||||
<option value="hetzner">Hetzner (DE)</option>
|
||||
<option value="other">Anderer Anbieter</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-700 mb-1">Region</label>
|
||||
<select
|
||||
value={form.hosting_region}
|
||||
onChange={e => updateForm({ hosting_region: e.target.value })}
|
||||
className="w-full px-4 py-2 border border-gray-300 rounded-lg"
|
||||
>
|
||||
<option value="eu">EU</option>
|
||||
<option value="de">Deutschland</option>
|
||||
<option value="us">USA</option>
|
||||
<option value="other">Andere</option>
|
||||
</select>
|
||||
</div>
|
||||
<h3 className="text-sm font-medium text-gray-700 mt-4">Modell-Nutzung</h3>
|
||||
{[
|
||||
{ key: 'model_inference', label: 'Inferenz', desc: 'Vortrainiertes Modell nutzen (Standard)' },
|
||||
{ key: 'model_rag', label: 'RAG (Retrieval-Augmented)', desc: 'Eigene Daten als Kontext bereitstellen' },
|
||||
{ key: 'model_finetune', label: 'Fine-Tuning', desc: 'Modell mit eigenen Daten nachtrainieren' },
|
||||
{ key: 'model_training', label: 'Training', desc: 'Eigenes Modell von Grund auf trainieren' },
|
||||
].map(item => (
|
||||
<label key={item.key} className="flex items-start gap-3 p-3 bg-gray-50 rounded-lg cursor-pointer hover:bg-gray-100">
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={form[item.key as keyof typeof form] as boolean}
|
||||
onChange={e => updateForm({ [item.key]: e.target.checked })}
|
||||
className="mt-1 rounded border-gray-300 text-purple-600 focus:ring-purple-500"
|
||||
/>
|
||||
<div>
|
||||
<div className="font-medium text-gray-900">{item.label}</div>
|
||||
<div className="text-sm text-gray-500">{item.desc}</div>
|
||||
</div>
|
||||
</label>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Step 5: Datenhaltung */}
|
||||
{currentStep === 5 && (
|
||||
<div className="space-y-4">
|
||||
<h2 className="text-lg font-semibold text-gray-900">Datenhaltung & Aufbewahrung</h2>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||
Aufbewahrungsdauer (Tage)
|
||||
</label>
|
||||
<input
|
||||
type="number"
|
||||
min={0}
|
||||
value={form.retention_days}
|
||||
onChange={e => updateForm({ retention_days: parseInt(e.target.value) || 0 })}
|
||||
className="w-full px-4 py-2 border border-gray-300 rounded-lg"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||
Zweck der Aufbewahrung
|
||||
</label>
|
||||
<textarea
|
||||
value={form.retention_purpose}
|
||||
onChange={e => updateForm({ retention_purpose: e.target.value })}
|
||||
rows={3}
|
||||
placeholder="z.B. Vertragliche Pflichten, gesetzliche Aufbewahrungsfristen..."
|
||||
className="w-full px-4 py-2 border border-gray-300 rounded-lg"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Navigation Buttons */}
|
||||
<div className="flex items-center justify-between">
|
||||
<button
|
||||
onClick={() => currentStep > 1 ? setCurrentStep(currentStep - 1) : router.push('/sdk/use-cases')}
|
||||
className="px-4 py-2 text-gray-600 hover:bg-gray-100 rounded-lg transition-colors"
|
||||
>
|
||||
{currentStep === 1 ? 'Abbrechen' : 'Zurueck'}
|
||||
</button>
|
||||
|
||||
{currentStep < 5 ? (
|
||||
<button
|
||||
onClick={() => setCurrentStep(currentStep + 1)}
|
||||
disabled={currentStep === 1 && !form.title}
|
||||
className="px-6 py-2 bg-purple-600 text-white rounded-lg hover:bg-purple-700 transition-colors disabled:opacity-50"
|
||||
>
|
||||
Weiter
|
||||
</button>
|
||||
) : (
|
||||
<button
|
||||
onClick={handleSubmit}
|
||||
disabled={isSubmitting || !form.title}
|
||||
className="px-6 py-2 bg-green-600 text-white rounded-lg hover:bg-green-700 transition-colors disabled:opacity-50 flex items-center gap-2"
|
||||
>
|
||||
{isSubmitting ? (
|
||||
<>
|
||||
<svg className="w-5 h-5 animate-spin" fill="none" viewBox="0 0 24 24">
|
||||
<circle className="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" strokeWidth="4" />
|
||||
<path className="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z" />
|
||||
</svg>
|
||||
Bewerte...
|
||||
</>
|
||||
) : (
|
||||
'Assessment starten'
|
||||
)}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
210
admin-compliance/app/(sdk)/sdk/use-cases/page.tsx
Normal file
210
admin-compliance/app/(sdk)/sdk/use-cases/page.tsx
Normal file
@@ -0,0 +1,210 @@
|
||||
'use client'
|
||||
|
||||
import React, { useState, useEffect } from 'react'
|
||||
import Link from 'next/link'
|
||||
import { RiskScoreGauge } from '@/components/sdk/use-case-assessment/RiskScoreGauge'
|
||||
|
||||
interface Assessment {
|
||||
id: string
|
||||
title: string
|
||||
feasibility: string
|
||||
risk_level: string
|
||||
risk_score: number
|
||||
domain: string
|
||||
created_at: string
|
||||
}
|
||||
|
||||
const FEASIBILITY_STYLES: Record<string, { bg: string; text: string; label: string }> = {
|
||||
YES: { bg: 'bg-green-100', text: 'text-green-700', label: 'Machbar' },
|
||||
CONDITIONAL: { bg: 'bg-yellow-100', text: 'text-yellow-700', label: 'Bedingt' },
|
||||
NO: { bg: 'bg-red-100', text: 'text-red-700', label: 'Nein' },
|
||||
}
|
||||
|
||||
export default function UseCasesPage() {
|
||||
const [assessments, setAssessments] = useState<Assessment[]>([])
|
||||
const [loading, setLoading] = useState(true)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
const [filterFeasibility, setFilterFeasibility] = useState<string>('all')
|
||||
const [filterRisk, setFilterRisk] = useState<string>('all')
|
||||
|
||||
useEffect(() => {
|
||||
fetchAssessments()
|
||||
}, [])
|
||||
|
||||
async function fetchAssessments() {
|
||||
try {
|
||||
setLoading(true)
|
||||
const response = await fetch('/api/sdk/v1/ucca/assessments')
|
||||
if (!response.ok) {
|
||||
throw new Error('Fehler beim Laden der Assessments')
|
||||
}
|
||||
const data = await response.json()
|
||||
setAssessments(data.assessments || [])
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Unbekannter Fehler')
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
const filtered = assessments.filter(a => {
|
||||
if (filterFeasibility !== 'all' && a.feasibility !== filterFeasibility) return false
|
||||
if (filterRisk !== 'all' && a.risk_level !== filterRisk) return false
|
||||
return true
|
||||
})
|
||||
|
||||
const stats = {
|
||||
total: assessments.length,
|
||||
feasible: assessments.filter(a => a.feasibility === 'YES').length,
|
||||
conditional: assessments.filter(a => a.feasibility === 'CONDITIONAL').length,
|
||||
rejected: assessments.filter(a => a.feasibility === 'NO').length,
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<h1 className="text-2xl font-bold text-gray-900">Use Case Assessment</h1>
|
||||
<p className="mt-1 text-gray-500">
|
||||
KI-Anwendungsfaelle erfassen und auf Compliance pruefen
|
||||
</p>
|
||||
</div>
|
||||
<Link
|
||||
href="/sdk/use-cases/new"
|
||||
className="flex items-center gap-2 px-4 py-2 bg-purple-600 text-white rounded-lg hover:bg-purple-700 transition-colors"
|
||||
>
|
||||
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 6v6m0 0v6m0-6h6m-6 0H6" />
|
||||
</svg>
|
||||
Neues Assessment
|
||||
</Link>
|
||||
</div>
|
||||
|
||||
{/* Stats */}
|
||||
<div className="grid grid-cols-1 md:grid-cols-4 gap-4">
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
<div className="text-sm text-gray-500">Gesamt</div>
|
||||
<div className="text-3xl font-bold text-gray-900">{stats.total}</div>
|
||||
</div>
|
||||
<div className="bg-white rounded-xl border border-green-200 p-6">
|
||||
<div className="text-sm text-green-600">Machbar</div>
|
||||
<div className="text-3xl font-bold text-green-600">{stats.feasible}</div>
|
||||
</div>
|
||||
<div className="bg-white rounded-xl border border-yellow-200 p-6">
|
||||
<div className="text-sm text-yellow-600">Bedingt</div>
|
||||
<div className="text-3xl font-bold text-yellow-600">{stats.conditional}</div>
|
||||
</div>
|
||||
<div className="bg-white rounded-xl border border-red-200 p-6">
|
||||
<div className="text-sm text-red-600">Abgelehnt</div>
|
||||
<div className="text-3xl font-bold text-red-600">{stats.rejected}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Filters */}
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm text-gray-500">Machbarkeit:</span>
|
||||
{['all', 'YES', 'CONDITIONAL', 'NO'].map(f => (
|
||||
<button
|
||||
key={f}
|
||||
onClick={() => setFilterFeasibility(f)}
|
||||
className={`px-3 py-1 text-sm rounded-full transition-colors ${
|
||||
filterFeasibility === f
|
||||
? 'bg-purple-600 text-white'
|
||||
: 'bg-gray-100 text-gray-600 hover:bg-gray-200'
|
||||
}`}
|
||||
>
|
||||
{f === 'all' ? 'Alle' : FEASIBILITY_STYLES[f]?.label || f}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm text-gray-500">Risiko:</span>
|
||||
{['all', 'MINIMAL', 'LOW', 'MEDIUM', 'HIGH', 'UNACCEPTABLE'].map(f => (
|
||||
<button
|
||||
key={f}
|
||||
onClick={() => setFilterRisk(f)}
|
||||
className={`px-3 py-1 text-sm rounded-full transition-colors ${
|
||||
filterRisk === f
|
||||
? 'bg-purple-600 text-white'
|
||||
: 'bg-gray-100 text-gray-600 hover:bg-gray-200'
|
||||
}`}
|
||||
>
|
||||
{f === 'all' ? 'Alle' : f}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Error */}
|
||||
{error && (
|
||||
<div className="bg-red-50 border border-red-200 rounded-lg p-4 text-red-700">
|
||||
{error}
|
||||
<button onClick={fetchAssessments} className="ml-3 underline">Erneut versuchen</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Loading */}
|
||||
{loading && (
|
||||
<div className="text-center py-12 text-gray-500">Lade Assessments...</div>
|
||||
)}
|
||||
|
||||
{/* Assessment List */}
|
||||
{!loading && filtered.length > 0 && (
|
||||
<div className="space-y-4">
|
||||
{filtered.map(assessment => {
|
||||
const feasibility = FEASIBILITY_STYLES[assessment.feasibility] || FEASIBILITY_STYLES.YES
|
||||
return (
|
||||
<Link
|
||||
key={assessment.id}
|
||||
href={`/sdk/use-cases/${assessment.id}`}
|
||||
className="block bg-white rounded-xl border border-gray-200 p-6 hover:border-purple-300 hover:shadow-md transition-all"
|
||||
>
|
||||
<div className="flex items-center gap-6">
|
||||
<RiskScoreGauge score={assessment.risk_score} riskLevel={assessment.risk_level} size="sm" />
|
||||
<div className="flex-1">
|
||||
<div className="flex items-center gap-2 mb-1">
|
||||
<h3 className="text-lg font-semibold text-gray-900">{assessment.title || 'Unbenanntes Assessment'}</h3>
|
||||
<span className={`px-2 py-0.5 text-xs rounded-full ${feasibility.bg} ${feasibility.text}`}>
|
||||
{feasibility.label}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-4 text-sm text-gray-500">
|
||||
<span>{assessment.domain}</span>
|
||||
<span>{new Date(assessment.created_at).toLocaleDateString('de-DE')}</span>
|
||||
</div>
|
||||
</div>
|
||||
<svg className="w-5 h-5 text-gray-400" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9 5l7 7-7 7" />
|
||||
</svg>
|
||||
</div>
|
||||
</Link>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Empty State */}
|
||||
{!loading && filtered.length === 0 && !error && (
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-12 text-center">
|
||||
<div className="w-16 h-16 mx-auto bg-purple-100 rounded-full flex items-center justify-center mb-4">
|
||||
<svg className="w-8 h-8 text-purple-600" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9.663 17h4.673M12 3v1m6.364 1.636l-.707.707M21 12h-1M4 12H3m3.343-5.657l-.707-.707m2.828 9.9a5 5 0 117.072 0l-.548.547A3.374 3.374 0 0014 18.469V19a2 2 0 11-4 0v-.531c0-.895-.356-1.754-.988-2.386l-.548-.547z" />
|
||||
</svg>
|
||||
</div>
|
||||
<h3 className="text-lg font-semibold text-gray-900">Noch keine Assessments</h3>
|
||||
<p className="mt-2 text-gray-500 mb-4">
|
||||
Erstellen Sie Ihr erstes Use Case Assessment, um die Compliance-Bewertung zu starten.
|
||||
</p>
|
||||
<Link
|
||||
href="/sdk/use-cases/new"
|
||||
className="inline-flex items-center gap-2 px-4 py-2 bg-purple-600 text-white rounded-lg hover:bg-purple-700"
|
||||
>
|
||||
Erstes Assessment erstellen
|
||||
</Link>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
81
admin-compliance/app/api/sdk/v1/company-profile/route.ts
Normal file
81
admin-compliance/app/api/sdk/v1/company-profile/route.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.BACKEND_URL || 'http://localhost:8002'
|
||||
|
||||
/**
|
||||
* Proxy: GET /api/sdk/v1/company-profile → Backend GET /api/v1/company-profile
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(request.url)
|
||||
const tenantId = searchParams.get('tenant_id') || 'default'
|
||||
|
||||
const response = await fetch(
|
||||
`${BACKEND_URL}/api/v1/company-profile?tenant_id=${encodeURIComponent(tenantId)}`,
|
||||
{
|
||||
headers: {
|
||||
'X-Tenant-ID': tenantId,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) {
|
||||
return NextResponse.json(null, { status: 404 })
|
||||
}
|
||||
const errorText = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Backend error', details: errorText },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data)
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch company profile:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to connect to backend' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Proxy: POST /api/sdk/v1/company-profile → Backend POST /api/v1/company-profile
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json()
|
||||
const tenantId = body.tenant_id || 'default'
|
||||
|
||||
const response = await fetch(
|
||||
`${BACKEND_URL}/api/v1/company-profile?tenant_id=${encodeURIComponent(tenantId)}`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Tenant-ID': tenantId,
|
||||
},
|
||||
body: JSON.stringify(body),
|
||||
}
|
||||
)
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Backend error', details: errorText },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data)
|
||||
} catch (error) {
|
||||
console.error('Failed to save company profile:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to connect to backend' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
}
|
||||
36
admin-compliance/app/api/sdk/v1/import/analyze/route.ts
Normal file
36
admin-compliance/app/api/sdk/v1/import/analyze/route.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.BACKEND_URL || 'http://localhost:8002'
|
||||
|
||||
/**
|
||||
* Proxy: POST /api/sdk/v1/import/analyze → Backend POST /api/v1/import/analyze
|
||||
* Forwards multipart form data (PDF file upload) to the backend for analysis.
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const formData = await request.formData()
|
||||
|
||||
const response = await fetch(`${BACKEND_URL}/api/v1/import/analyze`, {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
console.error('Import analyze error:', errorText)
|
||||
return NextResponse.json(
|
||||
{ error: 'Backend error', details: errorText },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data)
|
||||
} catch (error) {
|
||||
console.error('Failed to call import analyze:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to connect to backend' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
}
|
||||
56
admin-compliance/app/api/sdk/v1/modules/route.ts
Normal file
56
admin-compliance/app/api/sdk/v1/modules/route.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.BACKEND_URL || 'http://localhost:8002'
|
||||
|
||||
/**
|
||||
* Proxy to backend-compliance /api/modules endpoint.
|
||||
* Returns the list of service modules from the database.
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(request.url)
|
||||
const params = new URLSearchParams()
|
||||
|
||||
// Forward filter params
|
||||
const serviceType = searchParams.get('service_type')
|
||||
const criticality = searchParams.get('criticality')
|
||||
const processesPii = searchParams.get('processes_pii')
|
||||
const aiComponents = searchParams.get('ai_components')
|
||||
|
||||
if (serviceType) params.set('service_type', serviceType)
|
||||
if (criticality) params.set('criticality', criticality)
|
||||
if (processesPii) params.set('processes_pii', processesPii)
|
||||
if (aiComponents) params.set('ai_components', aiComponents)
|
||||
|
||||
const queryString = params.toString()
|
||||
const url = `${BACKEND_URL}/api/modules${queryString ? `?${queryString}` : ''}`
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(request.headers.get('X-Tenant-ID') && {
|
||||
'X-Tenant-ID': request.headers.get('X-Tenant-ID') as string,
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
console.error('Backend modules error:', errorText)
|
||||
return NextResponse.json(
|
||||
{ error: 'Backend error', details: errorText },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data)
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch modules from backend:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to connect to backend' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
}
|
||||
36
admin-compliance/app/api/sdk/v1/screening/scan/route.ts
Normal file
36
admin-compliance/app/api/sdk/v1/screening/scan/route.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.BACKEND_URL || 'http://localhost:8002'
|
||||
|
||||
/**
|
||||
* Proxy: POST /api/sdk/v1/screening/scan → Backend POST /api/v1/screening/scan
|
||||
* Forwards multipart form data (dependency file upload) to the backend for scanning.
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const formData = await request.formData()
|
||||
|
||||
const response = await fetch(`${BACKEND_URL}/api/v1/screening/scan`, {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
console.error('Screening scan error:', errorText)
|
||||
return NextResponse.json(
|
||||
{ error: 'Backend error', details: errorText },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data)
|
||||
} catch (error) {
|
||||
console.error('Failed to call screening scan:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to connect to backend' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import { Pool } from 'pg'
|
||||
|
||||
/**
|
||||
* SDK State Management API
|
||||
@@ -11,7 +12,7 @@ import { NextRequest, NextResponse } from 'next/server'
|
||||
* - Versioning for optimistic locking
|
||||
* - Last-Modified headers
|
||||
* - ETag support for caching
|
||||
* - Prepared for PostgreSQL migration
|
||||
* - PostgreSQL persistence (with InMemory fallback)
|
||||
*/
|
||||
|
||||
// =============================================================================
|
||||
@@ -27,27 +28,9 @@ interface StoredState {
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// STORAGE LAYER (Abstract - Easy to swap to PostgreSQL)
|
||||
// STORAGE LAYER
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* In-memory storage for development
|
||||
* TODO: Replace with PostgreSQL implementation
|
||||
*
|
||||
* PostgreSQL Schema:
|
||||
* CREATE TABLE sdk_states (
|
||||
* id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
* tenant_id VARCHAR(255) NOT NULL UNIQUE,
|
||||
* user_id VARCHAR(255),
|
||||
* state JSONB NOT NULL,
|
||||
* version INTEGER DEFAULT 1,
|
||||
* created_at TIMESTAMP DEFAULT NOW(),
|
||||
* updated_at TIMESTAMP DEFAULT NOW()
|
||||
* );
|
||||
*
|
||||
* CREATE INDEX idx_sdk_states_tenant ON sdk_states(tenant_id);
|
||||
*/
|
||||
|
||||
interface StateStore {
|
||||
get(tenantId: string): Promise<StoredState | null>
|
||||
save(tenantId: string, state: unknown, userId?: string, expectedVersion?: number): Promise<StoredState>
|
||||
@@ -69,7 +52,6 @@ class InMemoryStateStore implements StateStore {
|
||||
): Promise<StoredState> {
|
||||
const existing = this.store.get(tenantId)
|
||||
|
||||
// Optimistic locking check
|
||||
if (expectedVersion !== undefined && existing && existing.version !== expectedVersion) {
|
||||
const error = new Error('Version conflict') as Error & { status: number }
|
||||
error.status = 409
|
||||
@@ -99,68 +81,94 @@ class InMemoryStateStore implements StateStore {
|
||||
}
|
||||
}
|
||||
|
||||
// Future PostgreSQL implementation would look like:
|
||||
// class PostgreSQLStateStore implements StateStore {
|
||||
// private db: Pool
|
||||
//
|
||||
// constructor(connectionString: string) {
|
||||
// this.db = new Pool({ connectionString })
|
||||
// }
|
||||
//
|
||||
// async get(tenantId: string): Promise<StoredState | null> {
|
||||
// const result = await this.db.query(
|
||||
// 'SELECT state, version, user_id, created_at, updated_at FROM sdk_states WHERE tenant_id = $1',
|
||||
// [tenantId]
|
||||
// )
|
||||
// if (result.rows.length === 0) return null
|
||||
// const row = result.rows[0]
|
||||
// return {
|
||||
// state: row.state,
|
||||
// version: row.version,
|
||||
// userId: row.user_id,
|
||||
// createdAt: row.created_at,
|
||||
// updatedAt: row.updated_at,
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// async save(tenantId: string, state: unknown, userId?: string, expectedVersion?: number): Promise<StoredState> {
|
||||
// // Use UPSERT with version check
|
||||
// const result = await this.db.query(`
|
||||
// INSERT INTO sdk_states (tenant_id, user_id, state, version)
|
||||
// VALUES ($1, $2, $3, 1)
|
||||
// ON CONFLICT (tenant_id) DO UPDATE SET
|
||||
// state = $3,
|
||||
// user_id = COALESCE($2, sdk_states.user_id),
|
||||
// version = sdk_states.version + 1,
|
||||
// updated_at = NOW()
|
||||
// WHERE ($4::int IS NULL OR sdk_states.version = $4)
|
||||
// RETURNING version, created_at, updated_at
|
||||
// `, [tenantId, userId, JSON.stringify(state), expectedVersion])
|
||||
//
|
||||
// if (result.rows.length === 0) {
|
||||
// throw new Error('Version conflict')
|
||||
// }
|
||||
//
|
||||
// return {
|
||||
// state,
|
||||
// version: result.rows[0].version,
|
||||
// userId,
|
||||
// createdAt: result.rows[0].created_at,
|
||||
// updatedAt: result.rows[0].updated_at,
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// async delete(tenantId: string): Promise<boolean> {
|
||||
// const result = await this.db.query(
|
||||
// 'DELETE FROM sdk_states WHERE tenant_id = $1',
|
||||
// [tenantId]
|
||||
// )
|
||||
// return result.rowCount > 0
|
||||
// }
|
||||
// }
|
||||
class PostgreSQLStateStore implements StateStore {
|
||||
private pool: Pool
|
||||
|
||||
// Use in-memory store for now
|
||||
const stateStore: StateStore = new InMemoryStateStore()
|
||||
constructor(connectionString: string) {
|
||||
this.pool = new Pool({
|
||||
connectionString,
|
||||
max: 5,
|
||||
// Set search_path for compliance schema
|
||||
options: '-c search_path=compliance,core,public',
|
||||
})
|
||||
}
|
||||
|
||||
async get(tenantId: string): Promise<StoredState | null> {
|
||||
const result = await this.pool.query(
|
||||
'SELECT state, version, user_id, created_at, updated_at FROM sdk_states WHERE tenant_id = $1',
|
||||
[tenantId]
|
||||
)
|
||||
if (result.rows.length === 0) return null
|
||||
const row = result.rows[0]
|
||||
return {
|
||||
state: row.state,
|
||||
version: row.version,
|
||||
userId: row.user_id,
|
||||
createdAt: row.created_at instanceof Date ? row.created_at.toISOString() : row.created_at,
|
||||
updatedAt: row.updated_at instanceof Date ? row.updated_at.toISOString() : row.updated_at,
|
||||
}
|
||||
}
|
||||
|
||||
async save(tenantId: string, state: unknown, userId?: string, expectedVersion?: number): Promise<StoredState> {
|
||||
const now = new Date().toISOString()
|
||||
const stateWithTimestamp = {
|
||||
...(state as object),
|
||||
lastModified: now,
|
||||
}
|
||||
|
||||
// Use UPSERT with version check
|
||||
const result = await this.pool.query(`
|
||||
INSERT INTO sdk_states (tenant_id, user_id, state, version, created_at, updated_at)
|
||||
VALUES ($1, $2, $3::jsonb, 1, NOW(), NOW())
|
||||
ON CONFLICT (tenant_id) DO UPDATE SET
|
||||
state = $3::jsonb,
|
||||
user_id = COALESCE($2, sdk_states.user_id),
|
||||
version = sdk_states.version + 1,
|
||||
updated_at = NOW()
|
||||
WHERE ($4::int IS NULL OR sdk_states.version = $4)
|
||||
RETURNING version, user_id, created_at, updated_at
|
||||
`, [tenantId, userId, JSON.stringify(stateWithTimestamp), expectedVersion ?? null])
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
const error = new Error('Version conflict') as Error & { status: number }
|
||||
error.status = 409
|
||||
throw error
|
||||
}
|
||||
|
||||
const row = result.rows[0]
|
||||
return {
|
||||
state: stateWithTimestamp,
|
||||
version: row.version,
|
||||
userId: row.user_id,
|
||||
createdAt: row.created_at instanceof Date ? row.created_at.toISOString() : row.created_at,
|
||||
updatedAt: row.updated_at instanceof Date ? row.updated_at.toISOString() : row.updated_at,
|
||||
}
|
||||
}
|
||||
|
||||
async delete(tenantId: string): Promise<boolean> {
|
||||
const result = await this.pool.query(
|
||||
'DELETE FROM sdk_states WHERE tenant_id = $1',
|
||||
[tenantId]
|
||||
)
|
||||
return (result.rowCount ?? 0) > 0
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// STORE INITIALIZATION
|
||||
// =============================================================================
|
||||
|
||||
function createStateStore(): StateStore {
|
||||
const databaseUrl = process.env.DATABASE_URL
|
||||
if (databaseUrl) {
|
||||
console.log('[SDK State] Using PostgreSQL state store')
|
||||
return new PostgreSQLStateStore(databaseUrl)
|
||||
}
|
||||
console.log('[SDK State] Using in-memory state store (no DATABASE_URL)')
|
||||
return new InMemoryStateStore()
|
||||
}
|
||||
|
||||
const stateStore: StateStore = createStateStore()
|
||||
|
||||
// =============================================================================
|
||||
// HELPER FUNCTIONS
|
||||
|
||||
41
admin-compliance/app/api/sdk/v1/ucca/assess/route.ts
Normal file
41
admin-compliance/app/api/sdk/v1/ucca/assess/route.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const SDK_URL = process.env.SDK_URL || 'http://ai-compliance-sdk:8090'
|
||||
|
||||
/**
|
||||
* Proxy: POST /api/sdk/v1/ucca/assess → Go Backend POST /sdk/v1/ucca/assess
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.json()
|
||||
|
||||
const response = await fetch(`${SDK_URL}/sdk/v1/ucca/assess`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(request.headers.get('X-Tenant-ID') && {
|
||||
'X-Tenant-ID': request.headers.get('X-Tenant-ID') as string,
|
||||
}),
|
||||
},
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
console.error('UCCA assess error:', errorText)
|
||||
return NextResponse.json(
|
||||
{ error: 'UCCA backend error', details: errorText },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data, { status: 201 })
|
||||
} catch (error) {
|
||||
console.error('Failed to call UCCA assess:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to connect to UCCA backend' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
}
|
||||
48
admin-compliance/app/api/sdk/v1/ucca/assessments/route.ts
Normal file
48
admin-compliance/app/api/sdk/v1/ucca/assessments/route.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const SDK_URL = process.env.SDK_URL || 'http://ai-compliance-sdk:8090'
|
||||
|
||||
/**
|
||||
* Proxy: GET /api/sdk/v1/ucca/assessments → Go Backend GET /sdk/v1/ucca/assessments
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(request.url)
|
||||
const params = new URLSearchParams()
|
||||
|
||||
// Forward filter params
|
||||
for (const [key, value] of searchParams.entries()) {
|
||||
params.set(key, value)
|
||||
}
|
||||
|
||||
const queryString = params.toString()
|
||||
const url = `${SDK_URL}/sdk/v1/ucca/assessments${queryString ? `?${queryString}` : ''}`
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(request.headers.get('X-Tenant-ID') && {
|
||||
'X-Tenant-ID': request.headers.get('X-Tenant-ID') as string,
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'UCCA backend error', details: errorText },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data)
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch UCCA assessments:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to connect to UCCA backend' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -148,7 +148,7 @@ export function Sidebar({ onRoleChange }: SidebarProps) {
|
||||
<div className="h-16 flex items-center justify-between px-4 border-b border-slate-700">
|
||||
{!collapsed && (
|
||||
<Link href="/dashboard" className="font-bold text-lg">
|
||||
Admin Lehrer KI
|
||||
Compliance Admin
|
||||
</Link>
|
||||
)}
|
||||
<button
|
||||
|
||||
@@ -0,0 +1,154 @@
|
||||
'use client'
|
||||
|
||||
import { RiskScoreGauge } from './RiskScoreGauge'
|
||||
|
||||
interface AssessmentResult {
|
||||
feasibility: string
|
||||
risk_level: string
|
||||
risk_score: number
|
||||
complexity: string
|
||||
dsfa_recommended: boolean
|
||||
art22_risk: boolean
|
||||
training_allowed: string
|
||||
summary: string
|
||||
recommendation: string
|
||||
alternative_approach?: string
|
||||
triggered_rules?: Array<{
|
||||
rule_code: string
|
||||
title: string
|
||||
severity: string
|
||||
gdpr_ref: string
|
||||
}>
|
||||
required_controls?: Array<{
|
||||
id: string
|
||||
title: string
|
||||
description: string
|
||||
effort: string
|
||||
}>
|
||||
recommended_architecture?: Array<{
|
||||
id: string
|
||||
title: string
|
||||
description: string
|
||||
benefit: string
|
||||
}>
|
||||
}
|
||||
|
||||
interface AssessmentResultCardProps {
|
||||
result: AssessmentResult
|
||||
}
|
||||
|
||||
const FEASIBILITY_STYLES: Record<string, { bg: string; text: string; label: string }> = {
|
||||
YES: { bg: 'bg-green-100', text: 'text-green-700', label: 'Machbar' },
|
||||
CONDITIONAL: { bg: 'bg-yellow-100', text: 'text-yellow-700', label: 'Bedingt machbar' },
|
||||
NO: { bg: 'bg-red-100', text: 'text-red-700', label: 'Nicht empfohlen' },
|
||||
}
|
||||
|
||||
const SEVERITY_STYLES: Record<string, string> = {
|
||||
INFO: 'bg-blue-100 text-blue-700',
|
||||
WARN: 'bg-yellow-100 text-yellow-700',
|
||||
BLOCK: 'bg-red-100 text-red-700',
|
||||
}
|
||||
|
||||
export function AssessmentResultCard({ result }: AssessmentResultCardProps) {
|
||||
const feasibility = FEASIBILITY_STYLES[result.feasibility] || FEASIBILITY_STYLES.YES
|
||||
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
{/* Header with Score and Feasibility */}
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
<div className="flex items-start gap-6">
|
||||
<RiskScoreGauge score={result.risk_score} riskLevel={result.risk_level} size="lg" />
|
||||
<div className="flex-1">
|
||||
<div className="flex items-center gap-3 mb-3">
|
||||
<span className={`px-3 py-1 rounded-full text-sm font-medium ${feasibility.bg} ${feasibility.text}`}>
|
||||
{feasibility.label}
|
||||
</span>
|
||||
<span className="px-3 py-1 rounded-full text-sm bg-gray-100 text-gray-700">
|
||||
Komplexitaet: {result.complexity}
|
||||
</span>
|
||||
{result.dsfa_recommended && (
|
||||
<span className="px-3 py-1 rounded-full text-sm bg-orange-100 text-orange-700">
|
||||
DSFA empfohlen
|
||||
</span>
|
||||
)}
|
||||
{result.art22_risk && (
|
||||
<span className="px-3 py-1 rounded-full text-sm bg-red-100 text-red-700">
|
||||
Art. 22 Risiko
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<p className="text-gray-700">{result.summary}</p>
|
||||
<p className="text-sm text-gray-500 mt-2">{result.recommendation}</p>
|
||||
{result.alternative_approach && (
|
||||
<div className="mt-3 p-3 bg-blue-50 rounded-lg text-sm text-blue-700">
|
||||
<span className="font-medium">Alternative: </span>
|
||||
{result.alternative_approach}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Triggered Rules */}
|
||||
{result.triggered_rules && result.triggered_rules.length > 0 && (
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
<h3 className="text-lg font-semibold text-gray-900 mb-4">
|
||||
Ausgeloeste Regeln ({result.triggered_rules.length})
|
||||
</h3>
|
||||
<div className="space-y-2">
|
||||
{result.triggered_rules.map((rule) => (
|
||||
<div key={rule.rule_code} className="flex items-center gap-3 p-3 bg-gray-50 rounded-lg">
|
||||
<span className={`px-2 py-1 text-xs rounded-full ${SEVERITY_STYLES[rule.severity] || 'bg-gray-100 text-gray-700'}`}>
|
||||
{rule.severity}
|
||||
</span>
|
||||
<span className="text-xs text-gray-400 font-mono">{rule.rule_code}</span>
|
||||
<span className="text-sm font-medium text-gray-800 flex-1">{rule.title}</span>
|
||||
<span className="text-xs text-purple-600">{rule.gdpr_ref}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Required Controls */}
|
||||
{result.required_controls && result.required_controls.length > 0 && (
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
<h3 className="text-lg font-semibold text-gray-900 mb-4">
|
||||
Erforderliche Kontrollen ({result.required_controls.length})
|
||||
</h3>
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-3">
|
||||
{result.required_controls.map((control) => (
|
||||
<div key={control.id} className="p-4 border border-gray-200 rounded-lg">
|
||||
<div className="flex items-center justify-between mb-1">
|
||||
<span className="font-medium text-gray-900 text-sm">{control.title}</span>
|
||||
<span className="px-2 py-0.5 text-xs bg-gray-100 text-gray-600 rounded">
|
||||
{control.effort}
|
||||
</span>
|
||||
</div>
|
||||
<p className="text-xs text-gray-500">{control.description}</p>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Recommended Architecture Patterns */}
|
||||
{result.recommended_architecture && result.recommended_architecture.length > 0 && (
|
||||
<div className="bg-white rounded-xl border border-gray-200 p-6">
|
||||
<h3 className="text-lg font-semibold text-gray-900 mb-4">
|
||||
Empfohlene Architektur-Patterns
|
||||
</h3>
|
||||
<div className="space-y-3">
|
||||
{result.recommended_architecture.map((pattern) => (
|
||||
<div key={pattern.id} className="p-4 bg-purple-50 border border-purple-200 rounded-lg">
|
||||
<h4 className="font-medium text-purple-900">{pattern.title}</h4>
|
||||
<p className="text-sm text-purple-700 mt-1">{pattern.description}</p>
|
||||
<p className="text-xs text-purple-600 mt-2">Vorteil: {pattern.benefit}</p>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
'use client'
|
||||
|
||||
interface RiskScoreGaugeProps {
|
||||
score: number // 0-100
|
||||
riskLevel: string
|
||||
size?: 'sm' | 'md' | 'lg'
|
||||
}
|
||||
|
||||
const RISK_COLORS: Record<string, string> = {
|
||||
MINIMAL: '#22c55e',
|
||||
LOW: '#84cc16',
|
||||
MEDIUM: '#eab308',
|
||||
HIGH: '#f97316',
|
||||
UNACCEPTABLE: '#ef4444',
|
||||
}
|
||||
|
||||
const RISK_LABELS: Record<string, string> = {
|
||||
MINIMAL: 'Minimal',
|
||||
LOW: 'Niedrig',
|
||||
MEDIUM: 'Mittel',
|
||||
HIGH: 'Hoch',
|
||||
UNACCEPTABLE: 'Unzulaessig',
|
||||
}
|
||||
|
||||
export function RiskScoreGauge({ score, riskLevel, size = 'md' }: RiskScoreGaugeProps) {
|
||||
const color = RISK_COLORS[riskLevel] || '#9ca3af'
|
||||
const label = RISK_LABELS[riskLevel] || riskLevel
|
||||
|
||||
const sizes = {
|
||||
sm: { w: 80, r: 30, stroke: 6, fontSize: '1rem', labelSize: '0.65rem' },
|
||||
md: { w: 120, r: 46, stroke: 8, fontSize: '1.5rem', labelSize: '0.75rem' },
|
||||
lg: { w: 160, r: 62, stroke: 10, fontSize: '2rem', labelSize: '0.875rem' },
|
||||
}
|
||||
|
||||
const s = sizes[size]
|
||||
const circumference = 2 * Math.PI * s.r
|
||||
const dashOffset = circumference - (score / 100) * circumference
|
||||
|
||||
return (
|
||||
<div className="flex flex-col items-center">
|
||||
<svg width={s.w} height={s.w} viewBox={`0 0 ${s.w} ${s.w}`}>
|
||||
{/* Background circle */}
|
||||
<circle
|
||||
cx={s.w / 2}
|
||||
cy={s.w / 2}
|
||||
r={s.r}
|
||||
fill="none"
|
||||
stroke="#e5e7eb"
|
||||
strokeWidth={s.stroke}
|
||||
/>
|
||||
{/* Score arc */}
|
||||
<circle
|
||||
cx={s.w / 2}
|
||||
cy={s.w / 2}
|
||||
r={s.r}
|
||||
fill="none"
|
||||
stroke={color}
|
||||
strokeWidth={s.stroke}
|
||||
strokeDasharray={circumference}
|
||||
strokeDashoffset={dashOffset}
|
||||
strokeLinecap="round"
|
||||
transform={`rotate(-90 ${s.w / 2} ${s.w / 2})`}
|
||||
className="transition-all duration-500"
|
||||
/>
|
||||
{/* Score text */}
|
||||
<text
|
||||
x={s.w / 2}
|
||||
y={s.w / 2}
|
||||
textAnchor="middle"
|
||||
dominantBaseline="central"
|
||||
fill={color}
|
||||
style={{ fontSize: s.fontSize, fontWeight: 700 }}
|
||||
>
|
||||
{score}
|
||||
</text>
|
||||
</svg>
|
||||
<span
|
||||
className="mt-1 font-medium"
|
||||
style={{ color, fontSize: s.labelSize }}
|
||||
>
|
||||
{label}
|
||||
</span>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
110
admin-compliance/lib/sdk/__tests__/api-client.test.ts
Normal file
110
admin-compliance/lib/sdk/__tests__/api-client.test.ts
Normal file
@@ -0,0 +1,110 @@
|
||||
/**
|
||||
* Tests for SDK API Client extensions (modules, UCCA, import, screening).
|
||||
*/
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest'
|
||||
|
||||
// Mock fetch globally
|
||||
const mockFetch = vi.fn()
|
||||
global.fetch = mockFetch
|
||||
|
||||
// Import after mocking
|
||||
import { sdkApiClient } from '../api-client'
|
||||
|
||||
describe('SDK API Client', () => {
|
||||
beforeEach(() => {
|
||||
mockFetch.mockReset()
|
||||
})
|
||||
|
||||
describe('getModules', () => {
|
||||
it('fetches modules from backend', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve([{ id: 'mod-1', name: 'DSGVO' }]),
|
||||
})
|
||||
|
||||
const result = await sdkApiClient.getModules()
|
||||
expect(result).toHaveLength(1)
|
||||
expect(result[0].name).toBe('DSGVO')
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
expect.stringContaining('/api/sdk/v1/modules'),
|
||||
expect.any(Object)
|
||||
)
|
||||
})
|
||||
|
||||
it('returns empty array on error', async () => {
|
||||
mockFetch.mockRejectedValueOnce(new Error('Network error'))
|
||||
const result = await sdkApiClient.getModules()
|
||||
expect(result).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe('analyzeDocument', () => {
|
||||
it('sends FormData to import analyze endpoint', async () => {
|
||||
const mockResponse = {
|
||||
document_id: 'doc-1',
|
||||
detected_type: 'DSFA',
|
||||
confidence: 0.85,
|
||||
}
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockResponse),
|
||||
})
|
||||
|
||||
const formData = new FormData()
|
||||
const result = await sdkApiClient.analyzeDocument(formData)
|
||||
expect(result.document_id).toBe('doc-1')
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
expect.stringContaining('/api/sdk/v1/import/analyze'),
|
||||
expect.objectContaining({ method: 'POST' })
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('scanDependencies', () => {
|
||||
it('sends FormData to screening scan endpoint', async () => {
|
||||
const mockResponse = {
|
||||
id: 'scan-1',
|
||||
status: 'completed',
|
||||
total_components: 10,
|
||||
total_issues: 2,
|
||||
}
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockResponse),
|
||||
})
|
||||
|
||||
const formData = new FormData()
|
||||
const result = await sdkApiClient.scanDependencies(formData)
|
||||
expect(result.id).toBe('scan-1')
|
||||
expect(result.total_components).toBe(10)
|
||||
})
|
||||
})
|
||||
|
||||
describe('assessUseCase', () => {
|
||||
it('sends intake data to UCCA assess endpoint', async () => {
|
||||
const mockResult = { id: 'assessment-1', feasibility: 'GREEN' }
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockResult),
|
||||
})
|
||||
|
||||
const result = await sdkApiClient.assessUseCase({
|
||||
name: 'Test Use Case',
|
||||
domain: 'education',
|
||||
})
|
||||
expect(result.feasibility).toBe('GREEN')
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAssessments', () => {
|
||||
it('fetches assessment list', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve([{ id: 'a1' }, { id: 'a2' }]),
|
||||
})
|
||||
|
||||
const result = await sdkApiClient.getAssessments()
|
||||
expect(result).toHaveLength(2)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -372,6 +372,153 @@ export class SDKApiClient {
|
||||
return response.data
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public Methods - Modules
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Get available compliance modules from backend
|
||||
*/
|
||||
async getModules(filters?: {
|
||||
serviceType?: string
|
||||
criticality?: string
|
||||
processesPii?: boolean
|
||||
aiComponents?: boolean
|
||||
}): Promise<{ modules: unknown[]; total: number }> {
|
||||
const params = new URLSearchParams()
|
||||
if (filters?.serviceType) params.set('service_type', filters.serviceType)
|
||||
if (filters?.criticality) params.set('criticality', filters.criticality)
|
||||
if (filters?.processesPii !== undefined) params.set('processes_pii', String(filters.processesPii))
|
||||
if (filters?.aiComponents !== undefined) params.set('ai_components', String(filters.aiComponents))
|
||||
|
||||
const queryString = params.toString()
|
||||
const url = `${this.baseUrl}/modules${queryString ? `?${queryString}` : ''}`
|
||||
|
||||
const response = await this.fetchWithRetry<{ modules: unknown[]; total: number }>(
|
||||
url,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
}
|
||||
)
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public Methods - UCCA (Use Case Compliance Assessment)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Assess a use case
|
||||
*/
|
||||
async assessUseCase(intake: unknown): Promise<unknown> {
|
||||
const response = await this.fetchWithRetry<APIResponse<unknown>>(
|
||||
`${this.baseUrl}/ucca/assess`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Tenant-ID': this.tenantId,
|
||||
},
|
||||
body: JSON.stringify(intake),
|
||||
}
|
||||
)
|
||||
return response
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all assessments
|
||||
*/
|
||||
async getAssessments(): Promise<unknown[]> {
|
||||
const response = await this.fetchWithRetry<APIResponse<unknown[]>>(
|
||||
`${this.baseUrl}/ucca/assessments?tenantId=${encodeURIComponent(this.tenantId)}`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Tenant-ID': this.tenantId,
|
||||
},
|
||||
}
|
||||
)
|
||||
return response.data || []
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a single assessment
|
||||
*/
|
||||
async getAssessment(id: string): Promise<unknown> {
|
||||
const response = await this.fetchWithRetry<APIResponse<unknown>>(
|
||||
`${this.baseUrl}/ucca/assessments/${id}`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Tenant-ID': this.tenantId,
|
||||
},
|
||||
}
|
||||
)
|
||||
return response.data
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete an assessment
|
||||
*/
|
||||
async deleteAssessment(id: string): Promise<void> {
|
||||
await this.fetchWithRetry<APIResponse<void>>(
|
||||
`${this.baseUrl}/ucca/assessments/${id}`,
|
||||
{
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Tenant-ID': this.tenantId,
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public Methods - Document Import
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Analyze an uploaded document
|
||||
*/
|
||||
async analyzeDocument(formData: FormData): Promise<unknown> {
|
||||
const response = await this.fetchWithRetry<APIResponse<unknown>>(
|
||||
`${this.baseUrl}/import/analyze`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'X-Tenant-ID': this.tenantId,
|
||||
},
|
||||
body: formData,
|
||||
}
|
||||
)
|
||||
return response.data
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public Methods - System Screening
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Scan a dependency file (package-lock.json, requirements.txt, etc.)
|
||||
*/
|
||||
async scanDependencies(formData: FormData): Promise<unknown> {
|
||||
const response = await this.fetchWithRetry<APIResponse<unknown>>(
|
||||
`${this.baseUrl}/screening/scan`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'X-Tenant-ID': this.tenantId,
|
||||
},
|
||||
body: formData,
|
||||
}
|
||||
)
|
||||
return response.data
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public Methods - Export
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@@ -65,6 +65,9 @@ const initialState: SDKState = {
|
||||
// Compliance Scope
|
||||
complianceScope: null,
|
||||
|
||||
// Source Policy
|
||||
sourcePolicy: null,
|
||||
|
||||
// Progress
|
||||
currentPhase: 1,
|
||||
currentStep: 'company-profile',
|
||||
|
||||
@@ -15,6 +15,7 @@ describe('StateProjector', () => {
|
||||
customerType: null,
|
||||
companyProfile: null,
|
||||
complianceScope: null,
|
||||
sourcePolicy: null,
|
||||
currentPhase: 1,
|
||||
currentStep: 'company-profile',
|
||||
completedSteps: [],
|
||||
|
||||
@@ -1483,6 +1483,14 @@ export interface SDKState {
|
||||
// Compliance Scope (determines depth level L1-L4)
|
||||
complianceScope: import('./compliance-scope-types').ComplianceScopeState | null
|
||||
|
||||
// Source Policy (checkpoint tracking — actual data in backend)
|
||||
sourcePolicy: {
|
||||
configured: boolean
|
||||
sourcesCount: number
|
||||
piiRulesCount: number
|
||||
lastAuditAt: string | null
|
||||
} | null
|
||||
|
||||
// Progress
|
||||
currentPhase: SDKPhase
|
||||
currentStep: string
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
"jszip": "^3.10.1",
|
||||
"lucide-react": "^0.468.0",
|
||||
"next": "^15.1.0",
|
||||
"pg": "^8.13.0",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"reactflow": "^11.11.4",
|
||||
|
||||
344
backend-compliance/compliance/api/company_profile_routes.py
Normal file
344
backend-compliance/compliance/api/company_profile_routes.py
Normal file
@@ -0,0 +1,344 @@
|
||||
"""
|
||||
FastAPI routes for Company Profile CRUD with audit logging.
|
||||
|
||||
Endpoints:
|
||||
- GET /v1/company-profile: Get company profile for a tenant
|
||||
- POST /v1/company-profile: Create or update company profile
|
||||
- GET /v1/company-profile/audit: Get audit log for a tenant
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Header
|
||||
from pydantic import BaseModel
|
||||
|
||||
from database import SessionLocal
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/v1/company-profile", tags=["company-profile"])
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# REQUEST/RESPONSE MODELS
|
||||
# =============================================================================
|
||||
|
||||
class CompanyProfileRequest(BaseModel):
|
||||
company_name: str = ""
|
||||
legal_form: str = "GmbH"
|
||||
industry: str = ""
|
||||
founded_year: Optional[int] = None
|
||||
business_model: str = "B2B"
|
||||
offerings: list[str] = []
|
||||
company_size: str = "small"
|
||||
employee_count: str = "1-9"
|
||||
annual_revenue: str = "< 2 Mio"
|
||||
headquarters_country: str = "DE"
|
||||
headquarters_city: str = ""
|
||||
has_international_locations: bool = False
|
||||
international_countries: list[str] = []
|
||||
target_markets: list[str] = ["DE"]
|
||||
primary_jurisdiction: str = "DE"
|
||||
is_data_controller: bool = True
|
||||
is_data_processor: bool = False
|
||||
uses_ai: bool = False
|
||||
ai_use_cases: list[str] = []
|
||||
dpo_name: Optional[str] = None
|
||||
dpo_email: Optional[str] = None
|
||||
legal_contact_name: Optional[str] = None
|
||||
legal_contact_email: Optional[str] = None
|
||||
machine_builder: Optional[dict] = None
|
||||
is_complete: bool = False
|
||||
|
||||
|
||||
class CompanyProfileResponse(BaseModel):
|
||||
id: str
|
||||
tenant_id: str
|
||||
company_name: str
|
||||
legal_form: str
|
||||
industry: str
|
||||
founded_year: Optional[int]
|
||||
business_model: str
|
||||
offerings: list[str]
|
||||
company_size: str
|
||||
employee_count: str
|
||||
annual_revenue: str
|
||||
headquarters_country: str
|
||||
headquarters_city: str
|
||||
has_international_locations: bool
|
||||
international_countries: list[str]
|
||||
target_markets: list[str]
|
||||
primary_jurisdiction: str
|
||||
is_data_controller: bool
|
||||
is_data_processor: bool
|
||||
uses_ai: bool
|
||||
ai_use_cases: list[str]
|
||||
dpo_name: Optional[str]
|
||||
dpo_email: Optional[str]
|
||||
legal_contact_name: Optional[str]
|
||||
legal_contact_email: Optional[str]
|
||||
machine_builder: Optional[dict]
|
||||
is_complete: bool
|
||||
completed_at: Optional[str]
|
||||
created_at: str
|
||||
updated_at: str
|
||||
|
||||
|
||||
class AuditEntryResponse(BaseModel):
|
||||
id: str
|
||||
action: str
|
||||
changed_fields: Optional[dict]
|
||||
changed_by: Optional[str]
|
||||
created_at: str
|
||||
|
||||
|
||||
class AuditListResponse(BaseModel):
|
||||
entries: list[AuditEntryResponse]
|
||||
total: int
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# HELPERS
|
||||
# =============================================================================
|
||||
|
||||
def row_to_response(row) -> CompanyProfileResponse:
|
||||
"""Convert a DB row to response model."""
|
||||
return CompanyProfileResponse(
|
||||
id=str(row[0]),
|
||||
tenant_id=row[1],
|
||||
company_name=row[2] or "",
|
||||
legal_form=row[3] or "GmbH",
|
||||
industry=row[4] or "",
|
||||
founded_year=row[5],
|
||||
business_model=row[6] or "B2B",
|
||||
offerings=row[7] if isinstance(row[7], list) else [],
|
||||
company_size=row[8] or "small",
|
||||
employee_count=row[9] or "1-9",
|
||||
annual_revenue=row[10] or "< 2 Mio",
|
||||
headquarters_country=row[11] or "DE",
|
||||
headquarters_city=row[12] or "",
|
||||
has_international_locations=row[13] or False,
|
||||
international_countries=row[14] if isinstance(row[14], list) else [],
|
||||
target_markets=row[15] if isinstance(row[15], list) else ["DE"],
|
||||
primary_jurisdiction=row[16] or "DE",
|
||||
is_data_controller=row[17] if row[17] is not None else True,
|
||||
is_data_processor=row[18] or False,
|
||||
uses_ai=row[19] or False,
|
||||
ai_use_cases=row[20] if isinstance(row[20], list) else [],
|
||||
dpo_name=row[21],
|
||||
dpo_email=row[22],
|
||||
legal_contact_name=row[23],
|
||||
legal_contact_email=row[24],
|
||||
machine_builder=row[25] if isinstance(row[25], dict) else None,
|
||||
is_complete=row[26] or False,
|
||||
completed_at=str(row[27]) if row[27] else None,
|
||||
created_at=str(row[28]),
|
||||
updated_at=str(row[29]),
|
||||
)
|
||||
|
||||
|
||||
def log_audit(db, tenant_id: str, action: str, changed_fields: dict | None, changed_by: str | None):
|
||||
"""Write an audit log entry."""
|
||||
try:
|
||||
db.execute(
|
||||
"""INSERT INTO compliance_company_profile_audit
|
||||
(tenant_id, action, changed_fields, changed_by)
|
||||
VALUES (:tenant_id, :action, :fields::jsonb, :changed_by)""",
|
||||
{
|
||||
"tenant_id": tenant_id,
|
||||
"action": action,
|
||||
"fields": json.dumps(changed_fields) if changed_fields else None,
|
||||
"changed_by": changed_by,
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to write audit log: {e}")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ROUTES
|
||||
# =============================================================================
|
||||
|
||||
@router.get("", response_model=CompanyProfileResponse)
|
||||
async def get_company_profile(
|
||||
tenant_id: str = "default",
|
||||
x_tenant_id: Optional[str] = Header(None, alias="X-Tenant-ID"),
|
||||
):
|
||||
"""Get company profile for a tenant."""
|
||||
tid = x_tenant_id or tenant_id
|
||||
db = SessionLocal()
|
||||
try:
|
||||
result = db.execute(
|
||||
"""SELECT id, tenant_id, company_name, legal_form, industry, founded_year,
|
||||
business_model, offerings, company_size, employee_count, annual_revenue,
|
||||
headquarters_country, headquarters_city, has_international_locations,
|
||||
international_countries, target_markets, primary_jurisdiction,
|
||||
is_data_controller, is_data_processor, uses_ai, ai_use_cases,
|
||||
dpo_name, dpo_email, legal_contact_name, legal_contact_email,
|
||||
machine_builder, is_complete, completed_at, created_at, updated_at
|
||||
FROM compliance_company_profiles WHERE tenant_id = :tenant_id""",
|
||||
{"tenant_id": tid},
|
||||
)
|
||||
row = result.fetchone()
|
||||
if not row:
|
||||
raise HTTPException(status_code=404, detail="Company profile not found")
|
||||
|
||||
return row_to_response(row)
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
@router.post("", response_model=CompanyProfileResponse)
|
||||
async def upsert_company_profile(
|
||||
profile: CompanyProfileRequest,
|
||||
tenant_id: str = "default",
|
||||
x_tenant_id: Optional[str] = Header(None, alias="X-Tenant-ID"),
|
||||
):
|
||||
"""Create or update company profile (upsert)."""
|
||||
tid = x_tenant_id or tenant_id
|
||||
db = SessionLocal()
|
||||
try:
|
||||
# Check if profile exists
|
||||
existing = db.execute(
|
||||
"SELECT id FROM compliance_company_profiles WHERE tenant_id = :tid",
|
||||
{"tid": tid},
|
||||
).fetchone()
|
||||
|
||||
action = "update" if existing else "create"
|
||||
|
||||
completed_at_clause = ", completed_at = NOW()" if profile.is_complete else ", completed_at = NULL"
|
||||
|
||||
db.execute(
|
||||
f"""INSERT INTO compliance_company_profiles
|
||||
(tenant_id, company_name, legal_form, industry, founded_year,
|
||||
business_model, offerings, company_size, employee_count, annual_revenue,
|
||||
headquarters_country, headquarters_city, has_international_locations,
|
||||
international_countries, target_markets, primary_jurisdiction,
|
||||
is_data_controller, is_data_processor, uses_ai, ai_use_cases,
|
||||
dpo_name, dpo_email, legal_contact_name, legal_contact_email,
|
||||
machine_builder, is_complete)
|
||||
VALUES (:tid, :company_name, :legal_form, :industry, :founded_year,
|
||||
:business_model, :offerings::jsonb, :company_size, :employee_count, :annual_revenue,
|
||||
:hq_country, :hq_city, :has_intl, :intl_countries::jsonb,
|
||||
:target_markets::jsonb, :jurisdiction,
|
||||
:is_controller, :is_processor, :uses_ai, :ai_use_cases::jsonb,
|
||||
:dpo_name, :dpo_email, :legal_name, :legal_email,
|
||||
:machine_builder::jsonb, :is_complete)
|
||||
ON CONFLICT (tenant_id) DO UPDATE SET
|
||||
company_name = EXCLUDED.company_name,
|
||||
legal_form = EXCLUDED.legal_form,
|
||||
industry = EXCLUDED.industry,
|
||||
founded_year = EXCLUDED.founded_year,
|
||||
business_model = EXCLUDED.business_model,
|
||||
offerings = EXCLUDED.offerings,
|
||||
company_size = EXCLUDED.company_size,
|
||||
employee_count = EXCLUDED.employee_count,
|
||||
annual_revenue = EXCLUDED.annual_revenue,
|
||||
headquarters_country = EXCLUDED.headquarters_country,
|
||||
headquarters_city = EXCLUDED.headquarters_city,
|
||||
has_international_locations = EXCLUDED.has_international_locations,
|
||||
international_countries = EXCLUDED.international_countries,
|
||||
target_markets = EXCLUDED.target_markets,
|
||||
primary_jurisdiction = EXCLUDED.primary_jurisdiction,
|
||||
is_data_controller = EXCLUDED.is_data_controller,
|
||||
is_data_processor = EXCLUDED.is_data_processor,
|
||||
uses_ai = EXCLUDED.uses_ai,
|
||||
ai_use_cases = EXCLUDED.ai_use_cases,
|
||||
dpo_name = EXCLUDED.dpo_name,
|
||||
dpo_email = EXCLUDED.dpo_email,
|
||||
legal_contact_name = EXCLUDED.legal_contact_name,
|
||||
legal_contact_email = EXCLUDED.legal_contact_email,
|
||||
machine_builder = EXCLUDED.machine_builder,
|
||||
is_complete = EXCLUDED.is_complete,
|
||||
updated_at = NOW()
|
||||
{completed_at_clause}""",
|
||||
{
|
||||
"tid": tid,
|
||||
"company_name": profile.company_name,
|
||||
"legal_form": profile.legal_form,
|
||||
"industry": profile.industry,
|
||||
"founded_year": profile.founded_year,
|
||||
"business_model": profile.business_model,
|
||||
"offerings": json.dumps(profile.offerings),
|
||||
"company_size": profile.company_size,
|
||||
"employee_count": profile.employee_count,
|
||||
"annual_revenue": profile.annual_revenue,
|
||||
"hq_country": profile.headquarters_country,
|
||||
"hq_city": profile.headquarters_city,
|
||||
"has_intl": profile.has_international_locations,
|
||||
"intl_countries": json.dumps(profile.international_countries),
|
||||
"target_markets": json.dumps(profile.target_markets),
|
||||
"jurisdiction": profile.primary_jurisdiction,
|
||||
"is_controller": profile.is_data_controller,
|
||||
"is_processor": profile.is_data_processor,
|
||||
"uses_ai": profile.uses_ai,
|
||||
"ai_use_cases": json.dumps(profile.ai_use_cases),
|
||||
"dpo_name": profile.dpo_name,
|
||||
"dpo_email": profile.dpo_email,
|
||||
"legal_name": profile.legal_contact_name,
|
||||
"legal_email": profile.legal_contact_email,
|
||||
"machine_builder": json.dumps(profile.machine_builder) if profile.machine_builder else None,
|
||||
"is_complete": profile.is_complete,
|
||||
},
|
||||
)
|
||||
|
||||
# Audit log
|
||||
log_audit(db, tid, action, profile.model_dump(), None)
|
||||
|
||||
db.commit()
|
||||
|
||||
# Fetch and return
|
||||
result = db.execute(
|
||||
"""SELECT id, tenant_id, company_name, legal_form, industry, founded_year,
|
||||
business_model, offerings, company_size, employee_count, annual_revenue,
|
||||
headquarters_country, headquarters_city, has_international_locations,
|
||||
international_countries, target_markets, primary_jurisdiction,
|
||||
is_data_controller, is_data_processor, uses_ai, ai_use_cases,
|
||||
dpo_name, dpo_email, legal_contact_name, legal_contact_email,
|
||||
machine_builder, is_complete, completed_at, created_at, updated_at
|
||||
FROM compliance_company_profiles WHERE tenant_id = :tid""",
|
||||
{"tid": tid},
|
||||
)
|
||||
row = result.fetchone()
|
||||
return row_to_response(row)
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
logger.error(f"Failed to upsert company profile: {e}")
|
||||
raise HTTPException(status_code=500, detail="Failed to save company profile")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
@router.get("/audit", response_model=AuditListResponse)
|
||||
async def get_audit_log(
|
||||
tenant_id: str = "default",
|
||||
x_tenant_id: Optional[str] = Header(None, alias="X-Tenant-ID"),
|
||||
):
|
||||
"""Get audit log for company profile changes."""
|
||||
tid = x_tenant_id or tenant_id
|
||||
db = SessionLocal()
|
||||
try:
|
||||
result = db.execute(
|
||||
"""SELECT id, action, changed_fields, changed_by, created_at
|
||||
FROM compliance_company_profile_audit
|
||||
WHERE tenant_id = :tid
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 100""",
|
||||
{"tid": tid},
|
||||
)
|
||||
rows = result.fetchall()
|
||||
entries = [
|
||||
AuditEntryResponse(
|
||||
id=str(r[0]),
|
||||
action=r[1],
|
||||
changed_fields=r[2] if isinstance(r[2], dict) else None,
|
||||
changed_by=r[3],
|
||||
created_at=str(r[4]),
|
||||
)
|
||||
for r in rows
|
||||
]
|
||||
return AuditListResponse(entries=entries, total=len(entries))
|
||||
finally:
|
||||
db.close()
|
||||
380
backend-compliance/compliance/api/import_routes.py
Normal file
380
backend-compliance/compliance/api/import_routes.py
Normal file
@@ -0,0 +1,380 @@
|
||||
"""
|
||||
FastAPI routes for Document Import and Gap Analysis.
|
||||
|
||||
Endpoints:
|
||||
- POST /v1/import/analyze: Upload and analyze a compliance document
|
||||
- GET /v1/import/documents: List imported documents for a tenant
|
||||
- GET /v1/import/gap-analysis/{document_id}: Get gap analysis for a document
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
from fastapi import APIRouter, File, Form, UploadFile, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
from database import SessionLocal
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/v1/import", tags=["document-import"])
|
||||
|
||||
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://host.docker.internal:11434")
|
||||
LLM_MODEL = os.getenv("COMPLIANCE_LLM_MODEL", "qwen3:30b-a3b")
|
||||
|
||||
# =============================================================================
|
||||
# DOCUMENT TYPE DETECTION
|
||||
# =============================================================================
|
||||
|
||||
DOCUMENT_TYPE_KEYWORDS = {
|
||||
"DSFA": ["datenschutz-folgenabschaetzung", "dsfa", "dpia", "privacy impact"],
|
||||
"TOM": ["technisch-organisatorische", "tom", "massnahmen", "technical measures"],
|
||||
"VVT": ["verarbeitungsverzeichnis", "vvt", "processing activities", "art. 30"],
|
||||
"PRIVACY_POLICY": ["datenschutzerklaerung", "privacy policy", "datenschutzhinweis"],
|
||||
"AGB": ["allgemeine geschaeftsbedingungen", "agb", "terms and conditions"],
|
||||
"COOKIE_POLICY": ["cookie", "tracking", "einwilligung"],
|
||||
"RISK_ASSESSMENT": ["risikobewertung", "risk assessment", "risikoanalyse"],
|
||||
"AUDIT_REPORT": ["audit", "pruefbericht", "zertifizierung"],
|
||||
}
|
||||
|
||||
|
||||
def detect_document_type(text: str) -> tuple[str, float]:
|
||||
"""Detect document type from extracted text using keyword matching."""
|
||||
text_lower = text.lower()
|
||||
scores: dict[str, int] = {}
|
||||
|
||||
for doc_type, keywords in DOCUMENT_TYPE_KEYWORDS.items():
|
||||
score = sum(1 for kw in keywords if kw in text_lower)
|
||||
if score > 0:
|
||||
scores[doc_type] = score
|
||||
|
||||
if not scores:
|
||||
return "OTHER", 0.3
|
||||
|
||||
best_type = max(scores, key=scores.get)
|
||||
confidence = min(0.95, 0.5 + scores[best_type] * 0.15)
|
||||
return best_type, confidence
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# GAP ANALYSIS
|
||||
# =============================================================================
|
||||
|
||||
GAP_RULES = [
|
||||
{
|
||||
"category": "AI Act Compliance",
|
||||
"regulation": "EU AI Act Art. 6",
|
||||
"check_keywords": ["ki", "ai", "kuenstliche intelligenz", "machine learning"],
|
||||
"gap_if_missing": ["risikoklassifizierung", "risk classification", "risikokategorie"],
|
||||
"severity": "CRITICAL",
|
||||
"action": "Risikoklassifizierung fuer KI-Systeme durchfuehren",
|
||||
},
|
||||
{
|
||||
"category": "Transparenz",
|
||||
"regulation": "DSGVO Art. 13, 14, 22",
|
||||
"check_keywords": ["automatisiert", "automated", "profiling"],
|
||||
"gap_if_missing": ["informationspflicht", "information obligation", "transparenz"],
|
||||
"severity": "HIGH",
|
||||
"action": "Informationspflichten bei automatisierten Entscheidungen ergaenzen",
|
||||
},
|
||||
{
|
||||
"category": "TOMs",
|
||||
"regulation": "DSGVO Art. 32",
|
||||
"check_keywords": ["ki", "ai", "cloud", "saas"],
|
||||
"gap_if_missing": ["technische massnahmen", "verschluesselung", "encryption"],
|
||||
"severity": "MEDIUM",
|
||||
"action": "Technisch-organisatorische Massnahmen um KI-Aspekte erweitern",
|
||||
},
|
||||
{
|
||||
"category": "VVT",
|
||||
"regulation": "DSGVO Art. 30",
|
||||
"check_keywords": ["verarbeitung", "processing", "daten"],
|
||||
"gap_if_missing": ["verarbeitungsverzeichnis", "vvt", "processing activities"],
|
||||
"severity": "HIGH",
|
||||
"action": "Verarbeitungsverzeichnis aktualisieren",
|
||||
},
|
||||
{
|
||||
"category": "Menschliche Aufsicht",
|
||||
"regulation": "EU AI Act Art. 14",
|
||||
"check_keywords": ["ki", "ai", "autonom", "autonomous"],
|
||||
"gap_if_missing": ["menschliche aufsicht", "human oversight", "human-in-the-loop"],
|
||||
"severity": "MEDIUM",
|
||||
"action": "Prozesse fuer menschliche Aufsicht definieren",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def analyze_gaps(text: str, doc_type: str) -> list[dict]:
|
||||
"""Analyze document text for compliance gaps."""
|
||||
text_lower = text.lower()
|
||||
gaps = []
|
||||
|
||||
for rule in GAP_RULES:
|
||||
# Check if rule applies (keywords present in document)
|
||||
applies = any(kw in text_lower for kw in rule["check_keywords"])
|
||||
if not applies:
|
||||
continue
|
||||
|
||||
# Check if gap exists (required elements missing)
|
||||
has_gap = not any(kw in text_lower for kw in rule["gap_if_missing"])
|
||||
if has_gap:
|
||||
gaps.append({
|
||||
"id": f"gap-{uuid.uuid4().hex[:8]}",
|
||||
"category": rule["category"],
|
||||
"description": f"{rule['category']}: Luecke erkannt",
|
||||
"severity": rule["severity"],
|
||||
"regulation": rule["regulation"],
|
||||
"required_action": rule["action"],
|
||||
"related_step_id": doc_type.lower(),
|
||||
})
|
||||
|
||||
return gaps
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TEXT EXTRACTION
|
||||
# =============================================================================
|
||||
|
||||
def extract_text_from_pdf(content: bytes) -> str:
|
||||
"""Extract text from PDF using PyMuPDF (fitz)."""
|
||||
try:
|
||||
import fitz
|
||||
doc = fitz.open(stream=content, filetype="pdf")
|
||||
text_parts = []
|
||||
for page in doc:
|
||||
text_parts.append(page.get_text())
|
||||
doc.close()
|
||||
return "\n".join(text_parts)
|
||||
except ImportError:
|
||||
logger.warning("PyMuPDF not available, returning empty text")
|
||||
return ""
|
||||
except Exception as e:
|
||||
logger.error(f"PDF extraction failed: {e}")
|
||||
return ""
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# LLM CLASSIFICATION (optional enhancement)
|
||||
# =============================================================================
|
||||
|
||||
async def classify_with_llm(text: str) -> Optional[tuple[str, float]]:
|
||||
"""Use Ollama LLM to classify document type (optional, falls back to keywords)."""
|
||||
try:
|
||||
prompt = f"""Klassifiziere das folgende Dokument in eine dieser Kategorien:
|
||||
DSFA, TOM, VVT, PRIVACY_POLICY, AGB, COOKIE_POLICY, RISK_ASSESSMENT, AUDIT_REPORT, OTHER
|
||||
|
||||
Antworte NUR mit dem Kategorienamen, nichts anderes.
|
||||
|
||||
Dokumenttext (erste 2000 Zeichen):
|
||||
{text[:2000]}"""
|
||||
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
response = await client.post(
|
||||
f"{OLLAMA_URL}/api/generate",
|
||||
json={
|
||||
"model": LLM_MODEL,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.1, "num_predict": 20},
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
answer = result.get("response", "").strip().upper()
|
||||
# Validate answer
|
||||
valid_types = {"DSFA", "TOM", "VVT", "PRIVACY_POLICY", "AGB",
|
||||
"COOKIE_POLICY", "RISK_ASSESSMENT", "AUDIT_REPORT", "OTHER"}
|
||||
if answer in valid_types:
|
||||
return answer, 0.85
|
||||
except Exception as e:
|
||||
logger.warning(f"LLM classification failed, using keyword fallback: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# RESPONSE MODELS
|
||||
# =============================================================================
|
||||
|
||||
class DocumentAnalysisResponse(BaseModel):
|
||||
document_id: str
|
||||
filename: str
|
||||
detected_type: str
|
||||
confidence: float
|
||||
extracted_entities: list[str]
|
||||
recommendations: list[str]
|
||||
gap_analysis: dict
|
||||
|
||||
|
||||
class DocumentListResponse(BaseModel):
|
||||
documents: list[dict]
|
||||
total: int
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ROUTES
|
||||
# =============================================================================
|
||||
|
||||
@router.post("/analyze", response_model=DocumentAnalysisResponse)
|
||||
async def analyze_document(
|
||||
file: UploadFile = File(...),
|
||||
document_type: str = Form("OTHER"),
|
||||
tenant_id: str = Form("default"),
|
||||
):
|
||||
"""Upload and analyze a compliance document."""
|
||||
if not file.filename:
|
||||
raise HTTPException(status_code=400, detail="No file provided")
|
||||
|
||||
# Read file content
|
||||
content = await file.read()
|
||||
file_size = len(content)
|
||||
|
||||
# Extract text
|
||||
if file.content_type == "application/pdf" or (file.filename and file.filename.endswith(".pdf")):
|
||||
text = extract_text_from_pdf(content)
|
||||
else:
|
||||
# Try to decode as text
|
||||
try:
|
||||
text = content.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
text = ""
|
||||
|
||||
# Detect document type
|
||||
if document_type == "OTHER" and text:
|
||||
# Try LLM first, fallback to keywords
|
||||
llm_result = await classify_with_llm(text)
|
||||
if llm_result:
|
||||
detected_type, confidence = llm_result
|
||||
else:
|
||||
detected_type, confidence = detect_document_type(text)
|
||||
else:
|
||||
detected_type = document_type
|
||||
confidence = 1.0
|
||||
|
||||
# Extract key entities
|
||||
entities = []
|
||||
entity_keywords = ["DSGVO", "AI Act", "ISO 27001", "NIS2", "BDSG",
|
||||
"Personenbezogene Daten", "Auftragsverarbeitung", "DSFA"]
|
||||
for kw in entity_keywords:
|
||||
if kw.lower() in text.lower():
|
||||
entities.append(kw)
|
||||
|
||||
# Analyze gaps
|
||||
gaps = analyze_gaps(text, detected_type)
|
||||
|
||||
# Generate recommendations
|
||||
recommendations = []
|
||||
if gaps:
|
||||
recommendations = [g["required_action"] for g in gaps[:5]]
|
||||
if not recommendations:
|
||||
recommendations = ["Dokument erscheint vollstaendig"]
|
||||
|
||||
# Persist to database
|
||||
doc_id = str(uuid.uuid4())
|
||||
db = SessionLocal()
|
||||
try:
|
||||
db.execute(
|
||||
"""INSERT INTO compliance_imported_documents
|
||||
(id, tenant_id, filename, file_type, file_size, detected_type, detection_confidence,
|
||||
extracted_text, extracted_entities, recommendations, status, analyzed_at)
|
||||
VALUES (:id, :tenant_id, :filename, :file_type, :file_size, :detected_type, :confidence,
|
||||
:text, :entities::jsonb, :recommendations::jsonb, 'analyzed', NOW())""",
|
||||
{
|
||||
"id": doc_id,
|
||||
"tenant_id": tenant_id,
|
||||
"filename": file.filename,
|
||||
"file_type": file.content_type or "unknown",
|
||||
"file_size": file_size,
|
||||
"detected_type": detected_type,
|
||||
"confidence": confidence,
|
||||
"text": text[:50000], # Limit stored text
|
||||
"entities": str(entities).replace("'", '"'),
|
||||
"recommendations": str(recommendations).replace("'", '"'),
|
||||
},
|
||||
)
|
||||
|
||||
# Save gap analysis
|
||||
total_gaps = len(gaps)
|
||||
gap_analysis_result = {
|
||||
"id": f"analysis-{doc_id[:8]}",
|
||||
"total_gaps": total_gaps,
|
||||
"critical_gaps": len([g for g in gaps if g["severity"] == "CRITICAL"]),
|
||||
"high_gaps": len([g for g in gaps if g["severity"] == "HIGH"]),
|
||||
"medium_gaps": len([g for g in gaps if g["severity"] == "MEDIUM"]),
|
||||
"low_gaps": len([g for g in gaps if g["severity"] == "LOW"]),
|
||||
"gaps": gaps,
|
||||
"recommended_packages": ["analyse", "dokumentation"] if total_gaps > 0 else [],
|
||||
}
|
||||
|
||||
if total_gaps > 0:
|
||||
import json
|
||||
db.execute(
|
||||
"""INSERT INTO compliance_gap_analyses
|
||||
(tenant_id, document_id, total_gaps, critical_gaps, high_gaps, medium_gaps, low_gaps, gaps, recommended_packages)
|
||||
VALUES (:tenant_id, :document_id, :total, :critical, :high, :medium, :low, :gaps::jsonb, :packages::jsonb)""",
|
||||
{
|
||||
"tenant_id": tenant_id,
|
||||
"document_id": doc_id,
|
||||
"total": gap_analysis_result["total_gaps"],
|
||||
"critical": gap_analysis_result["critical_gaps"],
|
||||
"high": gap_analysis_result["high_gaps"],
|
||||
"medium": gap_analysis_result["medium_gaps"],
|
||||
"low": gap_analysis_result["low_gaps"],
|
||||
"gaps": json.dumps(gaps),
|
||||
"packages": json.dumps(gap_analysis_result["recommended_packages"]),
|
||||
},
|
||||
)
|
||||
|
||||
db.commit()
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
logger.error(f"Failed to persist document analysis: {e}")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
return DocumentAnalysisResponse(
|
||||
document_id=doc_id,
|
||||
filename=file.filename or "unknown",
|
||||
detected_type=detected_type,
|
||||
confidence=confidence,
|
||||
extracted_entities=entities,
|
||||
recommendations=recommendations,
|
||||
gap_analysis=gap_analysis_result,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/documents", response_model=DocumentListResponse)
|
||||
async def list_documents(tenant_id: str = "default"):
|
||||
"""List all imported documents for a tenant."""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
result = db.execute(
|
||||
"""SELECT id, filename, file_type, file_size, detected_type, detection_confidence,
|
||||
extracted_entities, recommendations, status, analyzed_at, created_at
|
||||
FROM compliance_imported_documents
|
||||
WHERE tenant_id = :tenant_id
|
||||
ORDER BY created_at DESC""",
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
rows = result.fetchall()
|
||||
documents = []
|
||||
for row in rows:
|
||||
documents.append({
|
||||
"id": str(row[0]),
|
||||
"filename": row[1],
|
||||
"file_type": row[2],
|
||||
"file_size": row[3],
|
||||
"detected_type": row[4],
|
||||
"confidence": row[5],
|
||||
"extracted_entities": row[6] or [],
|
||||
"recommendations": row[7] or [],
|
||||
"status": row[8],
|
||||
"analyzed_at": str(row[9]) if row[9] else None,
|
||||
"created_at": str(row[10]),
|
||||
})
|
||||
return DocumentListResponse(documents=documents, total=len(documents))
|
||||
finally:
|
||||
db.close()
|
||||
608
backend-compliance/compliance/api/screening_routes.py
Normal file
608
backend-compliance/compliance/api/screening_routes.py
Normal file
@@ -0,0 +1,608 @@
|
||||
"""
|
||||
FastAPI routes for System Screening (SBOM Generation + Vulnerability Scan).
|
||||
|
||||
Endpoints:
|
||||
- POST /v1/screening/scan: Upload dependency file, generate SBOM, scan for vulnerabilities
|
||||
- GET /v1/screening/{screening_id}: Get screening result by ID
|
||||
- GET /v1/screening: List screenings for a tenant
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
from fastapi import APIRouter, File, Form, UploadFile, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
from database import SessionLocal
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/v1/screening", tags=["system-screening"])
|
||||
|
||||
OSV_API_URL = "https://api.osv.dev/v1/query"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# RESPONSE MODELS
|
||||
# =============================================================================
|
||||
|
||||
class SecurityIssueResponse(BaseModel):
|
||||
id: str
|
||||
severity: str
|
||||
title: str
|
||||
description: Optional[str] = None
|
||||
cve: Optional[str] = None
|
||||
cvss: Optional[float] = None
|
||||
affected_component: str
|
||||
affected_version: Optional[str] = None
|
||||
fixed_in: Optional[str] = None
|
||||
remediation: Optional[str] = None
|
||||
status: str = "OPEN"
|
||||
|
||||
|
||||
class SBOMComponentResponse(BaseModel):
|
||||
name: str
|
||||
version: str
|
||||
type: str
|
||||
purl: str
|
||||
licenses: list[str]
|
||||
vulnerabilities: list[dict]
|
||||
|
||||
|
||||
class ScreeningResponse(BaseModel):
|
||||
id: str
|
||||
status: str
|
||||
sbom_format: str
|
||||
sbom_version: str
|
||||
total_components: int
|
||||
total_issues: int
|
||||
critical_issues: int
|
||||
high_issues: int
|
||||
medium_issues: int
|
||||
low_issues: int
|
||||
components: list[SBOMComponentResponse]
|
||||
issues: list[SecurityIssueResponse]
|
||||
started_at: Optional[str] = None
|
||||
completed_at: Optional[str] = None
|
||||
|
||||
|
||||
class ScreeningListResponse(BaseModel):
|
||||
screenings: list[dict]
|
||||
total: int
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# DEPENDENCY PARSING
|
||||
# =============================================================================
|
||||
|
||||
def parse_package_lock(content: str) -> list[dict]:
|
||||
"""Parse package-lock.json and extract dependencies."""
|
||||
try:
|
||||
data = json.loads(content)
|
||||
except json.JSONDecodeError:
|
||||
return []
|
||||
|
||||
components = []
|
||||
|
||||
# package-lock.json v2/v3 format (packages field)
|
||||
packages = data.get("packages", {})
|
||||
if packages:
|
||||
for path, info in packages.items():
|
||||
if not path: # Skip root
|
||||
continue
|
||||
name = path.split("node_modules/")[-1] if "node_modules/" in path else path
|
||||
version = info.get("version", "unknown")
|
||||
if name and version != "unknown":
|
||||
components.append({
|
||||
"name": name,
|
||||
"version": version,
|
||||
"type": "library",
|
||||
"ecosystem": "npm",
|
||||
"license": info.get("license", "unknown"),
|
||||
})
|
||||
|
||||
# Fallback: v1 format (dependencies field)
|
||||
if not components:
|
||||
dependencies = data.get("dependencies", {})
|
||||
for name, info in dependencies.items():
|
||||
if isinstance(info, dict):
|
||||
components.append({
|
||||
"name": name,
|
||||
"version": info.get("version", "unknown"),
|
||||
"type": "library",
|
||||
"ecosystem": "npm",
|
||||
"license": "unknown",
|
||||
})
|
||||
|
||||
return components
|
||||
|
||||
|
||||
def parse_requirements_txt(content: str) -> list[dict]:
|
||||
"""Parse requirements.txt and extract dependencies."""
|
||||
components = []
|
||||
for line in content.strip().split("\n"):
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#") or line.startswith("-"):
|
||||
continue
|
||||
|
||||
# Match patterns: package==version, package>=version, package~=version
|
||||
match = re.match(r'^([a-zA-Z0-9_.-]+)\s*([>=<~!]+)\s*([a-zA-Z0-9_.*-]+)', line)
|
||||
if match:
|
||||
components.append({
|
||||
"name": match.group(1),
|
||||
"version": match.group(3),
|
||||
"type": "library",
|
||||
"ecosystem": "PyPI",
|
||||
"license": "unknown",
|
||||
})
|
||||
elif re.match(r'^[a-zA-Z0-9_.-]+$', line):
|
||||
components.append({
|
||||
"name": line,
|
||||
"version": "latest",
|
||||
"type": "library",
|
||||
"ecosystem": "PyPI",
|
||||
"license": "unknown",
|
||||
})
|
||||
|
||||
return components
|
||||
|
||||
|
||||
def parse_yarn_lock(content: str) -> list[dict]:
|
||||
"""Parse yarn.lock and extract dependencies (basic)."""
|
||||
components = []
|
||||
current_name = None
|
||||
for line in content.split("\n"):
|
||||
# Match: "package@version":
|
||||
match = re.match(r'^"?([^@]+)@[^"]*"?:', line)
|
||||
if match:
|
||||
current_name = match.group(1).strip()
|
||||
elif current_name and line.strip().startswith("version "):
|
||||
version_match = re.match(r'\s+version\s+"?([^"]+)"?', line)
|
||||
if version_match:
|
||||
components.append({
|
||||
"name": current_name,
|
||||
"version": version_match.group(1),
|
||||
"type": "library",
|
||||
"ecosystem": "npm",
|
||||
"license": "unknown",
|
||||
})
|
||||
current_name = None
|
||||
|
||||
return components
|
||||
|
||||
|
||||
def detect_and_parse(filename: str, content: str) -> tuple[list[dict], str]:
|
||||
"""Detect file type and parse accordingly."""
|
||||
fname = filename.lower()
|
||||
|
||||
if "package-lock" in fname or fname.endswith("package-lock.json"):
|
||||
return parse_package_lock(content), "npm"
|
||||
elif fname == "requirements.txt" or fname.endswith("/requirements.txt"):
|
||||
return parse_requirements_txt(content), "PyPI"
|
||||
elif "yarn.lock" in fname:
|
||||
return parse_yarn_lock(content), "npm"
|
||||
elif fname.endswith(".json"):
|
||||
# Try package-lock format
|
||||
comps = parse_package_lock(content)
|
||||
if comps:
|
||||
return comps, "npm"
|
||||
|
||||
# Fallback: try requirements.txt format
|
||||
comps = parse_requirements_txt(content)
|
||||
if comps:
|
||||
return comps, "PyPI"
|
||||
|
||||
return [], "unknown"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# SBOM GENERATION (CycloneDX format)
|
||||
# =============================================================================
|
||||
|
||||
def generate_sbom(components: list[dict], ecosystem: str) -> dict:
|
||||
"""Generate a CycloneDX 1.5 SBOM from parsed components."""
|
||||
sbom_components = []
|
||||
for comp in components:
|
||||
purl = f"pkg:{ecosystem.lower()}/{comp['name']}@{comp['version']}"
|
||||
sbom_components.append({
|
||||
"type": "library",
|
||||
"name": comp["name"],
|
||||
"version": comp["version"],
|
||||
"purl": purl,
|
||||
"licenses": [comp.get("license", "unknown")] if comp.get("license") != "unknown" else [],
|
||||
})
|
||||
|
||||
return {
|
||||
"bomFormat": "CycloneDX",
|
||||
"specVersion": "1.5",
|
||||
"version": 1,
|
||||
"metadata": {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"tools": [{"name": "breakpilot-screening", "version": "1.0.0"}],
|
||||
},
|
||||
"components": sbom_components,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# VULNERABILITY SCANNING (OSV.dev API)
|
||||
# =============================================================================
|
||||
|
||||
async def query_osv(name: str, version: str, ecosystem: str) -> list[dict]:
|
||||
"""Query OSV.dev API for vulnerabilities of a single package."""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
response = await client.post(
|
||||
OSV_API_URL,
|
||||
json={
|
||||
"package": {"name": name, "ecosystem": ecosystem},
|
||||
"version": version,
|
||||
},
|
||||
)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get("vulns", [])
|
||||
except Exception as e:
|
||||
logger.warning(f"OSV query failed for {name}@{version}: {e}")
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def map_osv_severity(vuln: dict) -> tuple[str, float]:
|
||||
"""Extract severity and CVSS from OSV vulnerability data."""
|
||||
severity = "MEDIUM"
|
||||
cvss = 5.0
|
||||
|
||||
# Check severity array
|
||||
for sev in vuln.get("severity", []):
|
||||
if sev.get("type") == "CVSS_V3":
|
||||
score_str = sev.get("score", "")
|
||||
# Extract base score from CVSS vector
|
||||
try:
|
||||
import re as _re
|
||||
# CVSS vectors don't contain the score directly, try database_specific
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Check database_specific for severity
|
||||
db_specific = vuln.get("database_specific", {})
|
||||
if "severity" in db_specific:
|
||||
sev_str = db_specific["severity"].upper()
|
||||
if sev_str in ("CRITICAL", "HIGH", "MEDIUM", "LOW"):
|
||||
severity = sev_str
|
||||
|
||||
# Derive CVSS from severity if not found
|
||||
cvss_map = {"CRITICAL": 9.5, "HIGH": 7.5, "MEDIUM": 5.0, "LOW": 2.5}
|
||||
cvss = cvss_map.get(severity, 5.0)
|
||||
|
||||
return severity, cvss
|
||||
|
||||
|
||||
def extract_fix_version(vuln: dict, package_name: str) -> Optional[str]:
|
||||
"""Extract the fixed-in version from OSV data."""
|
||||
for affected in vuln.get("affected", []):
|
||||
pkg = affected.get("package", {})
|
||||
if pkg.get("name", "").lower() == package_name.lower():
|
||||
for rng in affected.get("ranges", []):
|
||||
for event in rng.get("events", []):
|
||||
if "fixed" in event:
|
||||
return event["fixed"]
|
||||
return None
|
||||
|
||||
|
||||
async def scan_vulnerabilities(components: list[dict], ecosystem: str) -> list[dict]:
|
||||
"""Scan all components for vulnerabilities via OSV.dev."""
|
||||
issues = []
|
||||
|
||||
# Batch: scan up to 50 components to avoid timeouts
|
||||
scan_limit = min(len(components), 50)
|
||||
|
||||
for comp in components[:scan_limit]:
|
||||
if comp["version"] in ("latest", "unknown", "*"):
|
||||
continue
|
||||
|
||||
vulns = await query_osv(comp["name"], comp["version"], ecosystem)
|
||||
|
||||
for vuln in vulns:
|
||||
vuln_id = vuln.get("id", f"OSV-{uuid.uuid4().hex[:8]}")
|
||||
aliases = vuln.get("aliases", [])
|
||||
cve = next((a for a in aliases if a.startswith("CVE-")), None)
|
||||
severity, cvss = map_osv_severity(vuln)
|
||||
fixed_in = extract_fix_version(vuln, comp["name"])
|
||||
|
||||
issues.append({
|
||||
"id": str(uuid.uuid4()),
|
||||
"severity": severity,
|
||||
"title": vuln.get("summary", vuln_id),
|
||||
"description": vuln.get("details", "")[:500],
|
||||
"cve": cve,
|
||||
"cvss": cvss,
|
||||
"affected_component": comp["name"],
|
||||
"affected_version": comp["version"],
|
||||
"fixed_in": fixed_in,
|
||||
"remediation": f"Upgrade {comp['name']} to {fixed_in}" if fixed_in else f"Check {vuln_id} for remediation steps",
|
||||
"status": "OPEN",
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ROUTES
|
||||
# =============================================================================
|
||||
|
||||
@router.post("/scan", response_model=ScreeningResponse)
|
||||
async def scan_dependencies(
|
||||
file: UploadFile = File(...),
|
||||
tenant_id: str = Form("default"),
|
||||
):
|
||||
"""Upload a dependency file, generate SBOM, and scan for vulnerabilities."""
|
||||
if not file.filename:
|
||||
raise HTTPException(status_code=400, detail="No file provided")
|
||||
|
||||
content = await file.read()
|
||||
try:
|
||||
text = content.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
raise HTTPException(status_code=400, detail="File must be a text-based dependency file")
|
||||
|
||||
# Parse dependencies
|
||||
components, ecosystem = detect_and_parse(file.filename, text)
|
||||
if not components:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Could not parse dependencies. Supported: package-lock.json, requirements.txt, yarn.lock",
|
||||
)
|
||||
|
||||
# Generate SBOM
|
||||
sbom = generate_sbom(components, ecosystem)
|
||||
|
||||
# Scan for vulnerabilities
|
||||
started_at = datetime.now(timezone.utc)
|
||||
issues = await scan_vulnerabilities(components, ecosystem)
|
||||
completed_at = datetime.now(timezone.utc)
|
||||
|
||||
# Count severities
|
||||
critical = len([i for i in issues if i["severity"] == "CRITICAL"])
|
||||
high = len([i for i in issues if i["severity"] == "HIGH"])
|
||||
medium = len([i for i in issues if i["severity"] == "MEDIUM"])
|
||||
low = len([i for i in issues if i["severity"] == "LOW"])
|
||||
|
||||
# Persist to database
|
||||
screening_id = str(uuid.uuid4())
|
||||
db = SessionLocal()
|
||||
try:
|
||||
db.execute(
|
||||
"""INSERT INTO compliance_screenings
|
||||
(id, tenant_id, status, sbom_format, sbom_version,
|
||||
total_components, total_issues, critical_issues, high_issues, medium_issues, low_issues,
|
||||
sbom_data, started_at, completed_at)
|
||||
VALUES (:id, :tenant_id, 'completed', 'CycloneDX', '1.5',
|
||||
:total_components, :total_issues, :critical, :high, :medium, :low,
|
||||
:sbom_data::jsonb, :started_at, :completed_at)""",
|
||||
{
|
||||
"id": screening_id,
|
||||
"tenant_id": tenant_id,
|
||||
"total_components": len(components),
|
||||
"total_issues": len(issues),
|
||||
"critical": critical,
|
||||
"high": high,
|
||||
"medium": medium,
|
||||
"low": low,
|
||||
"sbom_data": json.dumps(sbom),
|
||||
"started_at": started_at,
|
||||
"completed_at": completed_at,
|
||||
},
|
||||
)
|
||||
|
||||
# Persist security issues
|
||||
for issue in issues:
|
||||
db.execute(
|
||||
"""INSERT INTO compliance_security_issues
|
||||
(id, screening_id, severity, title, description, cve, cvss,
|
||||
affected_component, affected_version, fixed_in, remediation, status)
|
||||
VALUES (:id, :screening_id, :severity, :title, :description, :cve, :cvss,
|
||||
:component, :version, :fixed_in, :remediation, :status)""",
|
||||
{
|
||||
"id": issue["id"],
|
||||
"screening_id": screening_id,
|
||||
"severity": issue["severity"],
|
||||
"title": issue["title"][:500],
|
||||
"description": issue.get("description", "")[:1000],
|
||||
"cve": issue.get("cve"),
|
||||
"cvss": issue.get("cvss"),
|
||||
"component": issue["affected_component"],
|
||||
"version": issue.get("affected_version"),
|
||||
"fixed_in": issue.get("fixed_in"),
|
||||
"remediation": issue.get("remediation"),
|
||||
"status": issue["status"],
|
||||
},
|
||||
)
|
||||
|
||||
db.commit()
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
logger.error(f"Failed to persist screening: {e}")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
# Build response
|
||||
sbom_components = []
|
||||
comp_vulns: dict[str, list[dict]] = {}
|
||||
for issue in issues:
|
||||
comp_name = issue["affected_component"]
|
||||
if comp_name not in comp_vulns:
|
||||
comp_vulns[comp_name] = []
|
||||
comp_vulns[comp_name].append({
|
||||
"id": issue.get("cve") or issue["id"],
|
||||
"cve": issue.get("cve"),
|
||||
"severity": issue["severity"],
|
||||
"title": issue["title"],
|
||||
"cvss": issue.get("cvss"),
|
||||
"fixedIn": issue.get("fixed_in"),
|
||||
})
|
||||
|
||||
for sc in sbom["components"]:
|
||||
sbom_components.append(SBOMComponentResponse(
|
||||
name=sc["name"],
|
||||
version=sc["version"],
|
||||
type=sc["type"],
|
||||
purl=sc["purl"],
|
||||
licenses=sc.get("licenses", []),
|
||||
vulnerabilities=comp_vulns.get(sc["name"], []),
|
||||
))
|
||||
|
||||
issue_responses = [
|
||||
SecurityIssueResponse(
|
||||
id=i["id"],
|
||||
severity=i["severity"],
|
||||
title=i["title"],
|
||||
description=i.get("description"),
|
||||
cve=i.get("cve"),
|
||||
cvss=i.get("cvss"),
|
||||
affected_component=i["affected_component"],
|
||||
affected_version=i.get("affected_version"),
|
||||
fixed_in=i.get("fixed_in"),
|
||||
remediation=i.get("remediation"),
|
||||
status=i["status"],
|
||||
)
|
||||
for i in issues
|
||||
]
|
||||
|
||||
return ScreeningResponse(
|
||||
id=screening_id,
|
||||
status="completed",
|
||||
sbom_format="CycloneDX",
|
||||
sbom_version="1.5",
|
||||
total_components=len(components),
|
||||
total_issues=len(issues),
|
||||
critical_issues=critical,
|
||||
high_issues=high,
|
||||
medium_issues=medium,
|
||||
low_issues=low,
|
||||
components=sbom_components,
|
||||
issues=issue_responses,
|
||||
started_at=started_at.isoformat(),
|
||||
completed_at=completed_at.isoformat(),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{screening_id}", response_model=ScreeningResponse)
|
||||
async def get_screening(screening_id: str):
|
||||
"""Get a screening result by ID."""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
result = db.execute(
|
||||
"""SELECT id, status, sbom_format, sbom_version,
|
||||
total_components, total_issues, critical_issues, high_issues,
|
||||
medium_issues, low_issues, sbom_data, started_at, completed_at
|
||||
FROM compliance_screenings WHERE id = :id""",
|
||||
{"id": screening_id},
|
||||
)
|
||||
row = result.fetchone()
|
||||
if not row:
|
||||
raise HTTPException(status_code=404, detail="Screening not found")
|
||||
|
||||
# Fetch issues
|
||||
issues_result = db.execute(
|
||||
"""SELECT id, severity, title, description, cve, cvss,
|
||||
affected_component, affected_version, fixed_in, remediation, status
|
||||
FROM compliance_security_issues WHERE screening_id = :id""",
|
||||
{"id": screening_id},
|
||||
)
|
||||
issues_rows = issues_result.fetchall()
|
||||
|
||||
issues = [
|
||||
SecurityIssueResponse(
|
||||
id=str(r[0]), severity=r[1], title=r[2], description=r[3],
|
||||
cve=r[4], cvss=r[5], affected_component=r[6],
|
||||
affected_version=r[7], fixed_in=r[8], remediation=r[9], status=r[10],
|
||||
)
|
||||
for r in issues_rows
|
||||
]
|
||||
|
||||
# Reconstruct components from SBOM data
|
||||
sbom_data = row[10] or {}
|
||||
components = []
|
||||
comp_vulns: dict[str, list[dict]] = {}
|
||||
for issue in issues:
|
||||
if issue.affected_component not in comp_vulns:
|
||||
comp_vulns[issue.affected_component] = []
|
||||
comp_vulns[issue.affected_component].append({
|
||||
"id": issue.cve or issue.id,
|
||||
"cve": issue.cve,
|
||||
"severity": issue.severity,
|
||||
"title": issue.title,
|
||||
"cvss": issue.cvss,
|
||||
"fixedIn": issue.fixed_in,
|
||||
})
|
||||
|
||||
for sc in sbom_data.get("components", []):
|
||||
components.append(SBOMComponentResponse(
|
||||
name=sc["name"],
|
||||
version=sc["version"],
|
||||
type=sc.get("type", "library"),
|
||||
purl=sc.get("purl", ""),
|
||||
licenses=sc.get("licenses", []),
|
||||
vulnerabilities=comp_vulns.get(sc["name"], []),
|
||||
))
|
||||
|
||||
return ScreeningResponse(
|
||||
id=str(row[0]),
|
||||
status=row[1],
|
||||
sbom_format=row[2] or "CycloneDX",
|
||||
sbom_version=row[3] or "1.5",
|
||||
total_components=row[4] or 0,
|
||||
total_issues=row[5] or 0,
|
||||
critical_issues=row[6] or 0,
|
||||
high_issues=row[7] or 0,
|
||||
medium_issues=row[8] or 0,
|
||||
low_issues=row[9] or 0,
|
||||
components=components,
|
||||
issues=issues,
|
||||
started_at=str(row[11]) if row[11] else None,
|
||||
completed_at=str(row[12]) if row[12] else None,
|
||||
)
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
@router.get("", response_model=ScreeningListResponse)
|
||||
async def list_screenings(tenant_id: str = "default"):
|
||||
"""List all screenings for a tenant."""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
result = db.execute(
|
||||
"""SELECT id, status, total_components, total_issues,
|
||||
critical_issues, high_issues, medium_issues, low_issues,
|
||||
started_at, completed_at, created_at
|
||||
FROM compliance_screenings
|
||||
WHERE tenant_id = :tenant_id
|
||||
ORDER BY created_at DESC""",
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
rows = result.fetchall()
|
||||
screenings = [
|
||||
{
|
||||
"id": str(r[0]),
|
||||
"status": r[1],
|
||||
"total_components": r[2],
|
||||
"total_issues": r[3],
|
||||
"critical_issues": r[4],
|
||||
"high_issues": r[5],
|
||||
"medium_issues": r[6],
|
||||
"low_issues": r[7],
|
||||
"started_at": str(r[8]) if r[8] else None,
|
||||
"completed_at": str(r[9]) if r[9] else None,
|
||||
"created_at": str(r[10]),
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
return ScreeningListResponse(screenings=screenings, total=len(screenings))
|
||||
finally:
|
||||
db.close()
|
||||
@@ -24,6 +24,13 @@ from compliance.api import router as compliance_framework_router
|
||||
# Source Policy
|
||||
from compliance.api.source_policy_router import router as source_policy_router
|
||||
|
||||
# Document Import & Screening
|
||||
from compliance.api.import_routes import router as import_router
|
||||
from compliance.api.screening_routes import router as screening_router
|
||||
|
||||
# Company Profile
|
||||
from compliance.api.company_profile_routes import router as company_profile_router
|
||||
|
||||
# Middleware
|
||||
from middleware import (
|
||||
RequestIDMiddleware,
|
||||
@@ -91,6 +98,15 @@ app.include_router(compliance_framework_router, prefix="/api")
|
||||
# Source Policy (allowed sources, PII rules, audit)
|
||||
app.include_router(source_policy_router, prefix="/api")
|
||||
|
||||
# Document Import (PDF analysis, gap detection)
|
||||
app.include_router(import_router, prefix="/api")
|
||||
|
||||
# System Screening (SBOM generation, vulnerability scan)
|
||||
app.include_router(screening_router, prefix="/api")
|
||||
|
||||
# Company Profile (CRUD with audit logging)
|
||||
app.include_router(company_profile_router, prefix="/api")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
19
backend-compliance/migrations/002_sdk_states.sql
Normal file
19
backend-compliance/migrations/002_sdk_states.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
-- =============================================================================
|
||||
-- Migration 002: SDK States Table
|
||||
--
|
||||
-- Persistent storage for SDK state management.
|
||||
-- Replaces the in-memory store used during development.
|
||||
-- =============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sdk_states (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id VARCHAR(255) NOT NULL UNIQUE,
|
||||
user_id VARCHAR(255),
|
||||
state JSONB NOT NULL,
|
||||
version INTEGER DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_sdk_states_tenant ON sdk_states(tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_sdk_states_updated ON sdk_states(updated_at);
|
||||
41
backend-compliance/migrations/003_document_import.sql
Normal file
41
backend-compliance/migrations/003_document_import.sql
Normal file
@@ -0,0 +1,41 @@
|
||||
-- =============================================================================
|
||||
-- Migration 003: Document Import Tables
|
||||
--
|
||||
-- Tables for imported compliance documents and gap analysis results.
|
||||
-- =============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS compliance_imported_documents (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id VARCHAR(255) NOT NULL,
|
||||
filename VARCHAR(500) NOT NULL,
|
||||
file_type VARCHAR(50) NOT NULL,
|
||||
file_size INTEGER,
|
||||
detected_type VARCHAR(50),
|
||||
detection_confidence FLOAT,
|
||||
extracted_text TEXT,
|
||||
extracted_entities JSONB DEFAULT '[]',
|
||||
recommendations JSONB DEFAULT '[]',
|
||||
status VARCHAR(20) DEFAULT 'pending',
|
||||
analyzed_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_imported_docs_tenant ON compliance_imported_documents(tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_imported_docs_status ON compliance_imported_documents(status);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS compliance_gap_analyses (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id VARCHAR(255) NOT NULL,
|
||||
document_id UUID REFERENCES compliance_imported_documents(id) ON DELETE CASCADE,
|
||||
total_gaps INTEGER DEFAULT 0,
|
||||
critical_gaps INTEGER DEFAULT 0,
|
||||
high_gaps INTEGER DEFAULT 0,
|
||||
medium_gaps INTEGER DEFAULT 0,
|
||||
low_gaps INTEGER DEFAULT 0,
|
||||
gaps JSONB DEFAULT '[]',
|
||||
recommended_packages JSONB DEFAULT '[]',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_gap_analyses_tenant ON compliance_gap_analyses(tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_gap_analyses_document ON compliance_gap_analyses(document_id);
|
||||
45
backend-compliance/migrations/004_screening.sql
Normal file
45
backend-compliance/migrations/004_screening.sql
Normal file
@@ -0,0 +1,45 @@
|
||||
-- =============================================================================
|
||||
-- Migration 004: System Screening Tables
|
||||
--
|
||||
-- Tables for SBOM generation and vulnerability scanning results.
|
||||
-- =============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS compliance_screenings (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id VARCHAR(255) NOT NULL,
|
||||
status VARCHAR(20) DEFAULT 'pending',
|
||||
sbom_format VARCHAR(50) DEFAULT 'CycloneDX',
|
||||
sbom_version VARCHAR(20) DEFAULT '1.5',
|
||||
total_components INTEGER DEFAULT 0,
|
||||
total_issues INTEGER DEFAULT 0,
|
||||
critical_issues INTEGER DEFAULT 0,
|
||||
high_issues INTEGER DEFAULT 0,
|
||||
medium_issues INTEGER DEFAULT 0,
|
||||
low_issues INTEGER DEFAULT 0,
|
||||
sbom_data JSONB,
|
||||
started_at TIMESTAMPTZ,
|
||||
completed_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_screenings_tenant ON compliance_screenings(tenant_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_screenings_status ON compliance_screenings(status);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS compliance_security_issues (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
screening_id UUID NOT NULL REFERENCES compliance_screenings(id) ON DELETE CASCADE,
|
||||
severity VARCHAR(20) NOT NULL,
|
||||
title VARCHAR(500) NOT NULL,
|
||||
description TEXT,
|
||||
cve VARCHAR(50),
|
||||
cvss FLOAT,
|
||||
affected_component VARCHAR(255),
|
||||
affected_version VARCHAR(100),
|
||||
fixed_in VARCHAR(100),
|
||||
remediation TEXT,
|
||||
status VARCHAR(20) DEFAULT 'OPEN',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_security_issues_screening ON compliance_security_issues(screening_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_security_issues_severity ON compliance_security_issues(severity);
|
||||
74
backend-compliance/migrations/005_company_profile.sql
Normal file
74
backend-compliance/migrations/005_company_profile.sql
Normal file
@@ -0,0 +1,74 @@
|
||||
-- =============================================================================
|
||||
-- Migration 005: Company Profile Table
|
||||
--
|
||||
-- Dedicated table for company profiles with audit logging.
|
||||
-- =============================================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS compliance_company_profiles (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id VARCHAR(255) NOT NULL UNIQUE,
|
||||
|
||||
-- Basic Info
|
||||
company_name VARCHAR(500) NOT NULL DEFAULT '',
|
||||
legal_form VARCHAR(50) DEFAULT 'GmbH',
|
||||
industry VARCHAR(255) DEFAULT '',
|
||||
founded_year INTEGER,
|
||||
|
||||
-- Business Model
|
||||
business_model VARCHAR(20) DEFAULT 'B2B',
|
||||
offerings JSONB DEFAULT '[]'::jsonb,
|
||||
|
||||
-- Size & Scope
|
||||
company_size VARCHAR(20) DEFAULT 'small',
|
||||
employee_count VARCHAR(20) DEFAULT '1-9',
|
||||
annual_revenue VARCHAR(50) DEFAULT '< 2 Mio',
|
||||
|
||||
-- Locations
|
||||
headquarters_country VARCHAR(10) DEFAULT 'DE',
|
||||
headquarters_city VARCHAR(255) DEFAULT '',
|
||||
has_international_locations BOOLEAN DEFAULT FALSE,
|
||||
international_countries JSONB DEFAULT '[]'::jsonb,
|
||||
|
||||
-- Target Markets & Legal Scope
|
||||
target_markets JSONB DEFAULT '["DE"]'::jsonb,
|
||||
primary_jurisdiction VARCHAR(10) DEFAULT 'DE',
|
||||
|
||||
-- Data Processing Role
|
||||
is_data_controller BOOLEAN DEFAULT TRUE,
|
||||
is_data_processor BOOLEAN DEFAULT FALSE,
|
||||
|
||||
-- AI Usage
|
||||
uses_ai BOOLEAN DEFAULT FALSE,
|
||||
ai_use_cases JSONB DEFAULT '[]'::jsonb,
|
||||
|
||||
-- Contact Persons
|
||||
dpo_name VARCHAR(255),
|
||||
dpo_email VARCHAR(255),
|
||||
legal_contact_name VARCHAR(255),
|
||||
legal_contact_email VARCHAR(255),
|
||||
|
||||
-- Machine Builder Profile (optional)
|
||||
machine_builder JSONB,
|
||||
|
||||
-- Completion
|
||||
is_complete BOOLEAN DEFAULT FALSE,
|
||||
completed_at TIMESTAMPTZ,
|
||||
|
||||
-- Timestamps
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_company_profiles_tenant ON compliance_company_profiles(tenant_id);
|
||||
|
||||
-- Audit log for company profile changes
|
||||
CREATE TABLE IF NOT EXISTS compliance_company_profile_audit (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id VARCHAR(255) NOT NULL,
|
||||
action VARCHAR(20) NOT NULL,
|
||||
changed_fields JSONB,
|
||||
changed_by VARCHAR(255),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_company_profile_audit_tenant ON compliance_company_profile_audit(tenant_id);
|
||||
@@ -30,6 +30,9 @@ Jinja2==3.1.6
|
||||
mammoth==1.11.0
|
||||
Markdown==3.9
|
||||
|
||||
# PDF Text Extraction (document import analysis)
|
||||
PyMuPDF==1.25.3
|
||||
|
||||
# Utilities
|
||||
python-dateutil==2.9.0.post0
|
||||
|
||||
|
||||
134
backend-compliance/tests/test_company_profile_routes.py
Normal file
134
backend-compliance/tests/test_company_profile_routes.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""Tests for Company Profile routes (company_profile_routes.py)."""
|
||||
|
||||
import json
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from compliance.api.company_profile_routes import (
|
||||
CompanyProfileRequest,
|
||||
row_to_response,
|
||||
log_audit,
|
||||
)
|
||||
|
||||
|
||||
class TestCompanyProfileRequest:
|
||||
"""Tests for request model defaults."""
|
||||
|
||||
def test_default_values(self):
|
||||
req = CompanyProfileRequest()
|
||||
assert req.company_name == ""
|
||||
assert req.legal_form == "GmbH"
|
||||
assert req.business_model == "B2B"
|
||||
assert req.company_size == "small"
|
||||
assert req.headquarters_country == "DE"
|
||||
assert req.is_data_controller is True
|
||||
assert req.is_data_processor is False
|
||||
assert req.uses_ai is False
|
||||
assert req.is_complete is False
|
||||
|
||||
def test_custom_values(self):
|
||||
req = CompanyProfileRequest(
|
||||
company_name="Test GmbH",
|
||||
industry="Software",
|
||||
uses_ai=True,
|
||||
ai_use_cases=["Chatbot", "Analytics"],
|
||||
offerings=["app_web", "software_saas"],
|
||||
)
|
||||
assert req.company_name == "Test GmbH"
|
||||
assert req.uses_ai is True
|
||||
assert len(req.ai_use_cases) == 2
|
||||
assert len(req.offerings) == 2
|
||||
|
||||
def test_serialization(self):
|
||||
req = CompanyProfileRequest(company_name="Test")
|
||||
data = req.model_dump()
|
||||
assert data["company_name"] == "Test"
|
||||
assert isinstance(data["target_markets"], list)
|
||||
|
||||
|
||||
class TestRowToResponse:
|
||||
"""Tests for DB row to response conversion."""
|
||||
|
||||
def _make_row(self, **overrides):
|
||||
"""Create a mock DB row with 30 fields."""
|
||||
defaults = [
|
||||
"uuid-123", # 0: id
|
||||
"default", # 1: tenant_id
|
||||
"Test GmbH", # 2: company_name
|
||||
"GmbH", # 3: legal_form
|
||||
"IT", # 4: industry
|
||||
2020, # 5: founded_year
|
||||
"B2B", # 6: business_model
|
||||
["app_web"], # 7: offerings
|
||||
"small", # 8: company_size
|
||||
"10-49", # 9: employee_count
|
||||
"2-10 Mio", # 10: annual_revenue
|
||||
"DE", # 11: headquarters_country
|
||||
"Berlin", # 12: headquarters_city
|
||||
False, # 13: has_international_locations
|
||||
[], # 14: international_countries
|
||||
["DE", "AT"], # 15: target_markets
|
||||
"DE", # 16: primary_jurisdiction
|
||||
True, # 17: is_data_controller
|
||||
False, # 18: is_data_processor
|
||||
False, # 19: uses_ai
|
||||
[], # 20: ai_use_cases
|
||||
"Max Muster", # 21: dpo_name
|
||||
"dpo@test.de", # 22: dpo_email
|
||||
None, # 23: legal_contact_name
|
||||
None, # 24: legal_contact_email
|
||||
None, # 25: machine_builder
|
||||
True, # 26: is_complete
|
||||
"2026-01-01", # 27: completed_at
|
||||
"2026-01-01", # 28: created_at
|
||||
"2026-01-01", # 29: updated_at
|
||||
]
|
||||
return tuple(defaults)
|
||||
|
||||
def test_basic_conversion(self):
|
||||
row = self._make_row()
|
||||
response = row_to_response(row)
|
||||
assert response.id == "uuid-123"
|
||||
assert response.tenant_id == "default"
|
||||
assert response.company_name == "Test GmbH"
|
||||
assert response.is_complete is True
|
||||
|
||||
def test_none_values_handled(self):
|
||||
row = list(self._make_row())
|
||||
row[5] = None # founded_year
|
||||
row[21] = None # dpo_name
|
||||
row[25] = None # machine_builder
|
||||
row[27] = None # completed_at
|
||||
response = row_to_response(tuple(row))
|
||||
assert response.founded_year is None
|
||||
assert response.dpo_name is None
|
||||
assert response.machine_builder is None
|
||||
assert response.completed_at is None
|
||||
|
||||
def test_non_list_jsonb_handled(self):
|
||||
row = list(self._make_row())
|
||||
row[7] = None # offerings (JSONB could be None)
|
||||
row[14] = None # international_countries
|
||||
response = row_to_response(tuple(row))
|
||||
assert response.offerings == []
|
||||
assert response.international_countries == []
|
||||
|
||||
|
||||
class TestLogAudit:
|
||||
"""Tests for audit logging helper."""
|
||||
|
||||
def test_log_audit_success(self):
|
||||
db = MagicMock()
|
||||
log_audit(db, "tenant-1", "create", {"company_name": "Test"}, "admin")
|
||||
db.execute.assert_called_once()
|
||||
|
||||
def test_log_audit_with_none_fields(self):
|
||||
db = MagicMock()
|
||||
log_audit(db, "tenant-1", "update", None, None)
|
||||
db.execute.assert_called_once()
|
||||
|
||||
def test_log_audit_db_error_handled(self):
|
||||
db = MagicMock()
|
||||
db.execute.side_effect = Exception("DB error")
|
||||
# Should not raise
|
||||
log_audit(db, "tenant-1", "create", {}, "admin")
|
||||
123
backend-compliance/tests/test_import_routes.py
Normal file
123
backend-compliance/tests/test_import_routes.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""Tests for Document Import routes (import_routes.py)."""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, AsyncMock
|
||||
|
||||
from compliance.api.import_routes import (
|
||||
detect_document_type,
|
||||
analyze_gaps,
|
||||
extract_text_from_pdf,
|
||||
)
|
||||
|
||||
|
||||
class TestDetectDocumentType:
|
||||
"""Tests for keyword-based document type detection."""
|
||||
|
||||
def test_dsfa_detection(self):
|
||||
text = "Dies ist eine Datenschutz-Folgenabschaetzung (DSFA) nach Art. 35 DSGVO"
|
||||
doc_type, confidence = detect_document_type(text)
|
||||
assert doc_type == "DSFA"
|
||||
assert confidence >= 0.5
|
||||
|
||||
def test_tom_detection(self):
|
||||
text = "Technisch-organisatorische Massnahmen (TOM) zum Schutz personenbezogener Daten"
|
||||
doc_type, confidence = detect_document_type(text)
|
||||
assert doc_type == "TOM"
|
||||
assert confidence >= 0.5
|
||||
|
||||
def test_vvt_detection(self):
|
||||
text = "Verarbeitungsverzeichnis nach Art. 30 DSGVO - VVT processing activities"
|
||||
doc_type, confidence = detect_document_type(text)
|
||||
assert doc_type == "VVT"
|
||||
assert confidence >= 0.5
|
||||
|
||||
def test_privacy_policy_detection(self):
|
||||
text = "Datenschutzerklaerung - Privacy Policy fuer unsere Nutzer"
|
||||
doc_type, confidence = detect_document_type(text)
|
||||
assert doc_type == "PRIVACY_POLICY"
|
||||
assert confidence >= 0.5
|
||||
|
||||
def test_unknown_document(self):
|
||||
text = "Lorem ipsum dolor sit amet"
|
||||
doc_type, confidence = detect_document_type(text)
|
||||
assert doc_type == "OTHER"
|
||||
assert confidence == 0.3
|
||||
|
||||
def test_empty_text(self):
|
||||
doc_type, confidence = detect_document_type("")
|
||||
assert doc_type == "OTHER"
|
||||
assert confidence == 0.3
|
||||
|
||||
def test_confidence_increases_with_more_keywords(self):
|
||||
text_single = "dsfa"
|
||||
text_multi = "dsfa dpia datenschutz-folgenabschaetzung privacy impact"
|
||||
_, conf_single = detect_document_type(text_single)
|
||||
_, conf_multi = detect_document_type(text_multi)
|
||||
assert conf_multi > conf_single
|
||||
|
||||
def test_confidence_capped_at_095(self):
|
||||
text = "dsfa dpia datenschutz-folgenabschaetzung privacy impact assessment report analysis"
|
||||
_, confidence = detect_document_type(text)
|
||||
assert confidence <= 0.95
|
||||
|
||||
|
||||
class TestAnalyzeGaps:
|
||||
"""Tests for gap analysis rules."""
|
||||
|
||||
def test_ai_gap_detected(self):
|
||||
text = "Wir setzen KI und AI in unserer Anwendung ein"
|
||||
gaps = analyze_gaps(text, "OTHER")
|
||||
# Should detect AI Act gap (missing risk classification)
|
||||
ai_gaps = [g for g in gaps if g["category"] == "AI Act Compliance"]
|
||||
assert len(ai_gaps) > 0
|
||||
assert ai_gaps[0]["severity"] == "CRITICAL"
|
||||
|
||||
def test_no_gap_when_requirement_present(self):
|
||||
text = "KI-System mit Risikoklassifizierung nach EU AI Act"
|
||||
gaps = analyze_gaps(text, "OTHER")
|
||||
ai_gaps = [g for g in gaps if g["category"] == "AI Act Compliance"]
|
||||
assert len(ai_gaps) == 0
|
||||
|
||||
def test_tom_gap_detected(self):
|
||||
text = "Cloud-basiertes SaaS-System mit KI-Funktionen"
|
||||
gaps = analyze_gaps(text, "OTHER")
|
||||
tom_gaps = [g for g in gaps if g["category"] == "TOMs"]
|
||||
assert len(tom_gaps) > 0
|
||||
|
||||
def test_no_gaps_for_irrelevant_text(self):
|
||||
text = "Ein einfacher Flyer ohne Datenbezug"
|
||||
gaps = analyze_gaps(text, "OTHER")
|
||||
assert len(gaps) == 0
|
||||
|
||||
def test_gap_has_required_fields(self):
|
||||
text = "KI-System mit automatisierten Entscheidungen"
|
||||
gaps = analyze_gaps(text, "OTHER")
|
||||
assert len(gaps) > 0
|
||||
for gap in gaps:
|
||||
assert "id" in gap
|
||||
assert "category" in gap
|
||||
assert "severity" in gap
|
||||
assert "regulation" in gap
|
||||
assert "required_action" in gap
|
||||
|
||||
|
||||
class TestExtractTextFromPdf:
|
||||
"""Tests for PDF text extraction."""
|
||||
|
||||
def test_empty_bytes_returns_empty(self):
|
||||
result = extract_text_from_pdf(b"")
|
||||
assert result == ""
|
||||
|
||||
def test_invalid_pdf_returns_empty(self):
|
||||
result = extract_text_from_pdf(b"not a pdf")
|
||||
assert result == ""
|
||||
|
||||
@patch("compliance.api.import_routes.fitz")
|
||||
def test_fitz_import_error(self, mock_fitz):
|
||||
"""When fitz is not available, returns empty string."""
|
||||
mock_fitz.open.side_effect = ImportError("No module")
|
||||
# The actual function catches ImportError internally
|
||||
result = extract_text_from_pdf(b"test")
|
||||
# Since we mocked fitz at module level it will raise differently,
|
||||
# but the function should handle it gracefully
|
||||
assert isinstance(result, str)
|
||||
191
backend-compliance/tests/test_screening_routes.py
Normal file
191
backend-compliance/tests/test_screening_routes.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""Tests for System Screening routes (screening_routes.py)."""
|
||||
|
||||
import json
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
from compliance.api.screening_routes import (
|
||||
parse_package_lock,
|
||||
parse_requirements_txt,
|
||||
parse_yarn_lock,
|
||||
detect_and_parse,
|
||||
generate_sbom,
|
||||
map_osv_severity,
|
||||
extract_fix_version,
|
||||
)
|
||||
|
||||
|
||||
class TestParsePackageLock:
|
||||
"""Tests for package-lock.json parsing."""
|
||||
|
||||
def test_v2_format(self):
|
||||
data = json.dumps({
|
||||
"packages": {
|
||||
"": {"name": "my-app", "version": "1.0.0"},
|
||||
"node_modules/react": {"version": "18.3.0", "license": "MIT"},
|
||||
"node_modules/lodash": {"version": "4.17.21", "license": "MIT"},
|
||||
}
|
||||
})
|
||||
components = parse_package_lock(data)
|
||||
assert len(components) == 2
|
||||
names = [c["name"] for c in components]
|
||||
assert "react" in names
|
||||
assert "lodash" in names
|
||||
|
||||
def test_v1_format(self):
|
||||
data = json.dumps({
|
||||
"dependencies": {
|
||||
"express": {"version": "4.18.2"},
|
||||
"cors": {"version": "2.8.5"},
|
||||
}
|
||||
})
|
||||
components = parse_package_lock(data)
|
||||
assert len(components) == 2
|
||||
|
||||
def test_empty_json(self):
|
||||
assert parse_package_lock("{}") == []
|
||||
|
||||
def test_invalid_json(self):
|
||||
assert parse_package_lock("not json") == []
|
||||
|
||||
def test_root_package_skipped(self):
|
||||
data = json.dumps({
|
||||
"packages": {
|
||||
"": {"name": "root", "version": "1.0.0"},
|
||||
}
|
||||
})
|
||||
components = parse_package_lock(data)
|
||||
assert len(components) == 0
|
||||
|
||||
|
||||
class TestParseRequirementsTxt:
|
||||
"""Tests for requirements.txt parsing."""
|
||||
|
||||
def test_pinned_versions(self):
|
||||
content = "fastapi==0.123.9\nuvicorn==0.38.0\npydantic==2.12.5"
|
||||
components = parse_requirements_txt(content)
|
||||
assert len(components) == 3
|
||||
assert components[0]["name"] == "fastapi"
|
||||
assert components[0]["version"] == "0.123.9"
|
||||
assert components[0]["ecosystem"] == "PyPI"
|
||||
|
||||
def test_minimum_versions(self):
|
||||
content = "idna>=3.7\ncryptography>=42.0.0"
|
||||
components = parse_requirements_txt(content)
|
||||
assert len(components) == 2
|
||||
assert components[0]["version"] == "3.7"
|
||||
|
||||
def test_comments_and_blanks_ignored(self):
|
||||
content = "# Comment\n\nfastapi==1.0.0\n# Another comment\n-r base.txt"
|
||||
components = parse_requirements_txt(content)
|
||||
assert len(components) == 1
|
||||
|
||||
def test_bare_package_name(self):
|
||||
content = "requests"
|
||||
components = parse_requirements_txt(content)
|
||||
assert len(components) == 1
|
||||
assert components[0]["version"] == "latest"
|
||||
|
||||
def test_empty_content(self):
|
||||
assert parse_requirements_txt("") == []
|
||||
|
||||
|
||||
class TestParseYarnLock:
|
||||
"""Tests for yarn.lock parsing (basic)."""
|
||||
|
||||
def test_basic_format(self):
|
||||
content = '"react@^18.0.0":\n version "18.3.0"\n"lodash@^4.17.0":\n version "4.17.21"'
|
||||
components = parse_yarn_lock(content)
|
||||
assert len(components) == 2
|
||||
|
||||
|
||||
class TestDetectAndParse:
|
||||
"""Tests for file type detection and parsing."""
|
||||
|
||||
def test_package_lock_detection(self):
|
||||
data = json.dumps({"packages": {"node_modules/x": {"version": "1.0"}}})
|
||||
components, ecosystem = detect_and_parse("package-lock.json", data)
|
||||
assert ecosystem == "npm"
|
||||
assert len(components) == 1
|
||||
|
||||
def test_requirements_detection(self):
|
||||
components, ecosystem = detect_and_parse("requirements.txt", "flask==2.0.0")
|
||||
assert ecosystem == "PyPI"
|
||||
assert len(components) == 1
|
||||
|
||||
def test_unknown_format(self):
|
||||
components, ecosystem = detect_and_parse("readme.md", "Hello World")
|
||||
assert len(components) == 0
|
||||
|
||||
|
||||
class TestGenerateSbom:
|
||||
"""Tests for CycloneDX SBOM generation."""
|
||||
|
||||
def test_sbom_structure(self):
|
||||
components = [
|
||||
{"name": "react", "version": "18.3.0", "type": "library", "ecosystem": "npm", "license": "MIT"},
|
||||
]
|
||||
sbom = generate_sbom(components, "npm")
|
||||
assert sbom["bomFormat"] == "CycloneDX"
|
||||
assert sbom["specVersion"] == "1.5"
|
||||
assert len(sbom["components"]) == 1
|
||||
assert sbom["components"][0]["purl"] == "pkg:npm/react@18.3.0"
|
||||
|
||||
def test_sbom_empty_components(self):
|
||||
sbom = generate_sbom([], "npm")
|
||||
assert sbom["components"] == []
|
||||
|
||||
def test_sbom_unknown_license_excluded(self):
|
||||
components = [
|
||||
{"name": "x", "version": "1.0", "type": "library", "ecosystem": "npm", "license": "unknown"},
|
||||
]
|
||||
sbom = generate_sbom(components, "npm")
|
||||
assert sbom["components"][0]["licenses"] == []
|
||||
|
||||
|
||||
class TestMapOsvSeverity:
|
||||
"""Tests for OSV severity mapping."""
|
||||
|
||||
def test_critical_severity(self):
|
||||
vuln = {"database_specific": {"severity": "CRITICAL"}}
|
||||
severity, cvss = map_osv_severity(vuln)
|
||||
assert severity == "CRITICAL"
|
||||
assert cvss == 9.5
|
||||
|
||||
def test_medium_default(self):
|
||||
vuln = {}
|
||||
severity, cvss = map_osv_severity(vuln)
|
||||
assert severity == "MEDIUM"
|
||||
assert cvss == 5.0
|
||||
|
||||
def test_low_severity(self):
|
||||
vuln = {"database_specific": {"severity": "LOW"}}
|
||||
severity, cvss = map_osv_severity(vuln)
|
||||
assert severity == "LOW"
|
||||
assert cvss == 2.5
|
||||
|
||||
|
||||
class TestExtractFixVersion:
|
||||
"""Tests for extracting fix version from OSV data."""
|
||||
|
||||
def test_fix_version_found(self):
|
||||
vuln = {
|
||||
"affected": [{
|
||||
"package": {"name": "lodash"},
|
||||
"ranges": [{"events": [{"introduced": "0"}, {"fixed": "4.17.21"}]}],
|
||||
}]
|
||||
}
|
||||
assert extract_fix_version(vuln, "lodash") == "4.17.21"
|
||||
|
||||
def test_no_fix_version(self):
|
||||
vuln = {"affected": [{"package": {"name": "x"}, "ranges": [{"events": [{"introduced": "0"}]}]}]}
|
||||
assert extract_fix_version(vuln, "x") is None
|
||||
|
||||
def test_wrong_package_name(self):
|
||||
vuln = {
|
||||
"affected": [{
|
||||
"package": {"name": "other"},
|
||||
"ranges": [{"events": [{"fixed": "1.0"}]}],
|
||||
}]
|
||||
}
|
||||
assert extract_fix_version(vuln, "lodash") is None
|
||||
@@ -50,6 +50,7 @@ services:
|
||||
- "3000"
|
||||
environment:
|
||||
NODE_ENV: production
|
||||
DATABASE_URL: postgresql://${POSTGRES_USER:-breakpilot}:${POSTGRES_PASSWORD:-breakpilot123}@bp-core-postgres:5432/${POSTGRES_DB:-breakpilot_db}
|
||||
BACKEND_URL: http://backend-compliance:8002
|
||||
CONSENT_SERVICE_URL: http://bp-core-consent-service:8081
|
||||
SDK_URL: http://ai-compliance-sdk:8090
|
||||
|
||||
Reference in New Issue
Block a user