feat(use-case-compiler): MC-based compliance questionnaires with scoring
Build + Deploy / build-admin-compliance (push) Successful in 2m46s
Build + Deploy / build-backend-compliance (push) Successful in 26s
Build + Deploy / build-ai-sdk (push) Successful in 52s
Build + Deploy / build-developer-portal (push) Successful in 22s
Build + Deploy / build-tts (push) Successful in 16s
Build + Deploy / build-document-crawler (push) Successful in 12s
Build + Deploy / build-dsms-gateway (push) Successful in 20s
Build + Deploy / build-dsms-node (push) Successful in 16s
CI / branch-name (push) Has been skipped
CI / guardrail-integrity (push) Has been skipped
CI / loc-budget (push) Failing after 18s
CI / secret-scan (push) Has been skipped
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / nodejs-build (push) Successful in 3m16s
CI / dep-audit (push) Has been skipped
CI / sbom-scan (push) Has been skipped
CI / test-go (push) Successful in 1m0s
CI / test-python-backend (push) Successful in 41s
CI / test-python-document-crawler (push) Successful in 29s
CI / test-python-dsms-gateway (push) Successful in 23s
CI / validate-canonical-controls (push) Successful in 16s
Build + Deploy / trigger-orca (push) Successful in 2m36s

Implements the Use-Case Compiler that turns Master Controls into
interactive compliance audits. 5 templates (Vendor Check, SAST/DAST,
DSGVO, NIS2, CRA), deterministic + LLM question generation, scoring
engine with regulation/severity breakdown, and gap detection.

- Backend: 9 API endpoints, 22 unit tests (all pass)
- Frontend: Template selector, questionnaire, result dashboard
- Migration 027: usecase_audits + usecase_answers tables

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-05-12 13:49:16 +02:00
parent 74f00bbb0f
commit 06bfbd1dca
22 changed files with 3157 additions and 1 deletions
@@ -0,0 +1,102 @@
package usecase
import (
"testing"
)
func TestParseLLMResponse_ValidJSON(t *testing.T) {
input := `[
{
"question": "Ist eine Datenschutz-Folgenabschaetzung durchgefuehrt?",
"pass_criteria": ["DSFA dokumentiert"],
"fail_criteria": ["Keine DSFA"],
"severity": "HIGH"
},
{
"question": "Sind Betroffenenrechte implementiert?",
"pass_criteria": ["Prozess vorhanden"],
"fail_criteria": ["Kein Prozess"],
"severity": "MEDIUM"
}
]`
result := parseLLMResponse(input)
if len(result) != 2 {
t.Fatalf("Expected 2 questions, got %d", len(result))
}
if result[0].Question != "Ist eine Datenschutz-Folgenabschaetzung durchgefuehrt?" {
t.Errorf("Unexpected question: %s", result[0].Question)
}
if result[0].Severity != "HIGH" {
t.Errorf("Expected HIGH severity, got %s", result[0].Severity)
}
}
func TestParseLLMResponse_WithPreamble(t *testing.T) {
input := `Hier sind die Prueffragen:
[{"question":"Test?","pass_criteria":["OK"],"fail_criteria":["NOK"],"severity":"LOW"}]
Ich hoffe das hilft.`
result := parseLLMResponse(input)
if len(result) != 1 {
t.Fatalf("Expected 1 question from wrapped response, got %d", len(result))
}
}
func TestParseLLMResponse_InvalidJSON(t *testing.T) {
result := parseLLMResponse("This is not JSON at all")
if result != nil {
t.Errorf("Expected nil for invalid JSON, got %v", result)
}
}
func TestParseLLMResponse_EmptyQuestion(t *testing.T) {
input := `[
{"question":"","pass_criteria":["OK"],"fail_criteria":["NOK"],"severity":"HIGH"},
{"question":"Valid?","pass_criteria":["Yes"],"fail_criteria":["No"],"severity":"LOW"}
]`
result := parseLLMResponse(input)
if len(result) != 1 {
t.Fatalf("Expected 1 valid question (empty filtered), got %d", len(result))
}
if result[0].Question != "Valid?" {
t.Errorf("Unexpected question: %s", result[0].Question)
}
}
func TestBuildPrompt(t *testing.T) {
mc := MCInfo{
MasterControlID: "MC-123",
CanonicalName: "access_control_mfa",
TotalControls: 12,
RegSource: "NIS2",
}
prompt := buildPrompt(mc, []string{"nis2", "dsgvo"})
if prompt == "" {
t.Error("Expected non-empty prompt")
}
if !contains(prompt, "access control mfa") {
t.Error("Prompt should contain readable MC name")
}
if !contains(prompt, "12 Atomic Controls") {
t.Error("Prompt should contain control count")
}
}
func contains(s, sub string) bool {
return len(s) >= len(sub) && (s == sub || len(s) > 0 && containsSubstring(s, sub))
}
func containsSubstring(s, sub string) bool {
for i := 0; i+len(sub) <= len(s); i++ {
if s[i:i+len(sub)] == sub {
return true
}
}
return false
}