feat: Compliance Maximizer — Regulatory Optimization Engine
Some checks failed
Build + Deploy / build-admin-compliance (push) Successful in 1m45s
Build + Deploy / build-backend-compliance (push) Successful in 4m42s
Build + Deploy / build-ai-sdk (push) Successful in 46s
Build + Deploy / build-developer-portal (push) Successful in 1m6s
Build + Deploy / build-tts (push) Successful in 1m14s
Build + Deploy / build-document-crawler (push) Successful in 31s
Build + Deploy / build-dsms-gateway (push) Successful in 24s
CI / branch-name (push) Has been skipped
CI / guardrail-integrity (push) Has been skipped
CI / loc-budget (push) Failing after 15s
CI / secret-scan (push) Has been skipped
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / nodejs-build (push) Successful in 2m27s
CI / dep-audit (push) Has been skipped
CI / sbom-scan (push) Has been skipped
CI / test-go (push) Failing after 37s
CI / test-python-backend (push) Successful in 42s
CI / test-python-document-crawler (push) Successful in 25s
CI / test-python-dsms-gateway (push) Successful in 23s
CI / validate-canonical-controls (push) Successful in 18s
Build + Deploy / trigger-orca (push) Successful in 4m35s

Neues Modul das den regulatorischen Spielraum fuer KI-Use-Cases
deterministisch berechnet und optimale Konfigurationen vorschlaegt.

Kernfeatures:
- 13-Dimensionen Constraint-Space (DSGVO + AI Act)
- 3-Zonen-Analyse: Verboten / Eingeschraenkt / Erlaubt
- Deterministische Optimizer-Engine (kein LLM im Kern)
- 28 Constraint-Regeln aus DSGVO, AI Act, EDPB Guidelines
- 28 Tests (Golden Suite + Meta-Tests)
- REST API: /sdk/v1/maximizer/* (9 Endpoints)
- Frontend: 3-Zonen-Visualisierung, Dimension-Form, Score-Gauges

[migration-approved]

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-04-23 09:10:20 +02:00
parent 01bf1463b8
commit 1ac716261c
30 changed files with 3779 additions and 1 deletions

View File

@@ -0,0 +1,52 @@
package maximizer
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"runtime"
)
const defaultConstraintFile = "policies/maximizer_constraints_v1.json"
// LoadConstraintRules reads a constraint ruleset from a JSON file.
func LoadConstraintRules(path string) (*ConstraintRuleSet, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("read constraint file %s: %w", path, err)
}
var rs ConstraintRuleSet
if err := json.Unmarshal(data, &rs); err != nil {
return nil, fmt.Errorf("parse constraint file %s: %w", path, err)
}
if rs.Version == "" {
return nil, fmt.Errorf("constraint file %s: missing version", path)
}
return &rs, nil
}
// LoadConstraintRulesFromDefault loads from the default policy file
// relative to the project root.
func LoadConstraintRulesFromDefault() (*ConstraintRuleSet, error) {
root := findProjectRoot()
path := filepath.Join(root, defaultConstraintFile)
return LoadConstraintRules(path)
}
// findProjectRoot walks up from the current source file to find the
// ai-compliance-sdk root (contains go.mod or policies/).
func findProjectRoot() string {
_, filename, _, ok := runtime.Caller(0)
if !ok {
return "."
}
dir := filepath.Dir(filename)
for i := 0; i < 10; i++ {
if _, err := os.Stat(filepath.Join(dir, "policies")); err == nil {
return dir
}
dir = filepath.Dir(dir)
}
return "."
}

View File

@@ -0,0 +1,76 @@
package maximizer
// ConstraintRuleSet is the top-level container loaded from maximizer_constraints_v1.json.
type ConstraintRuleSet struct {
Version string `json:"version"`
Regulations []string `json:"regulations"`
Rules []ConstraintRule `json:"rules"`
}
// ConstraintRule maps a regulatory obligation to dimension restrictions.
type ConstraintRule struct {
ID string `json:"id"`
ObligationID string `json:"obligation_id"`
Regulation string `json:"regulation"`
ArticleRef string `json:"article_ref"`
Title string `json:"title"`
Description string `json:"description"`
RuleType string `json:"rule_type"` // hard_prohibition, requirement, classification_rule, optimizer_rule, escalation_gate
Constraints []Constraint `json:"constraints"`
}
// Constraint is a single if-then rule on the dimension space.
type Constraint struct {
If ConditionSet `json:"if"`
Then EffectSet `json:"then"`
}
// ConditionSet maps dimension names to their required values.
// Values can be a string (exact match) or []string (any of).
type ConditionSet map[string]interface{}
// EffectSet defines what must be true when the condition matches.
type EffectSet struct {
// Allowed=false means hard block — no optimization possible for this rule
Allowed *bool `json:"allowed,omitempty"`
// RequiredValues: dimension must have exactly this value
RequiredValues map[string]string `json:"required_values,omitempty"`
// RequiredControls: organizational/technical controls needed
RequiredControls []string `json:"required_controls,omitempty"`
// RequiredPatterns: architectural patterns needed
RequiredPatterns []string `json:"required_patterns,omitempty"`
// Classification overrides
SetRiskClassification string `json:"set_risk_classification,omitempty"`
}
// Matches checks if a DimensionConfig satisfies all conditions in this set.
func (cs ConditionSet) Matches(config *DimensionConfig) bool {
for dim, expected := range cs {
actual := config.GetValue(dim)
if actual == "" {
return false
}
switch v := expected.(type) {
case string:
if actual != v {
return false
}
case []interface{}:
found := false
for _, item := range v {
if s, ok := item.(string); ok && actual == s {
found = true
break
}
}
if !found {
return false
}
}
}
return true
}

View File

@@ -0,0 +1,306 @@
package maximizer
// DimensionConfig is the normalized representation of an AI use case
// as a point in a 13-dimensional regulatory constraint space.
// Each dimension maps to regulatory obligations from DSGVO, AI Act, etc.
type DimensionConfig struct {
AutomationLevel AutomationLevel `json:"automation_level"`
DecisionBinding DecisionBinding `json:"decision_binding"`
DecisionImpact DecisionImpact `json:"decision_impact"`
Domain DomainCategory `json:"domain"`
DataType DataTypeSensitivity `json:"data_type"`
HumanInLoop HumanInLoopLevel `json:"human_in_loop"`
Explainability ExplainabilityLevel `json:"explainability"`
RiskClassification RiskClass `json:"risk_classification"`
LegalBasis LegalBasisType `json:"legal_basis"`
TransparencyRequired bool `json:"transparency_required"`
LoggingRequired bool `json:"logging_required"`
ModelType ModelType `json:"model_type"`
DeploymentScope DeploymentScope `json:"deployment_scope"`
}
// --- Dimension Enums ---
type AutomationLevel string
const (
AutoNone AutomationLevel = "none"
AutoAssistive AutomationLevel = "assistive"
AutoPartial AutomationLevel = "partial"
AutoFull AutomationLevel = "full"
)
type DecisionBinding string
const (
BindingNonBinding DecisionBinding = "non_binding"
BindingHumanReview DecisionBinding = "human_review_required"
BindingFullyBinding DecisionBinding = "fully_binding"
)
type DecisionImpact string
const (
ImpactLow DecisionImpact = "low"
ImpactMedium DecisionImpact = "medium"
ImpactHigh DecisionImpact = "high"
)
type DomainCategory string
const (
DomainHR DomainCategory = "hr"
DomainFinance DomainCategory = "finance"
DomainEducation DomainCategory = "education"
DomainHealth DomainCategory = "health"
DomainMarketing DomainCategory = "marketing"
DomainGeneral DomainCategory = "general"
)
type DataTypeSensitivity string
const (
DataNonPersonal DataTypeSensitivity = "non_personal"
DataPersonal DataTypeSensitivity = "personal"
DataSensitive DataTypeSensitivity = "sensitive"
DataBiometric DataTypeSensitivity = "biometric"
)
type HumanInLoopLevel string
const (
HILNone HumanInLoopLevel = "none"
HILOptional HumanInLoopLevel = "optional"
HILRequired HumanInLoopLevel = "required"
)
type ExplainabilityLevel string
const (
ExplainNone ExplainabilityLevel = "none"
ExplainBasic ExplainabilityLevel = "basic"
ExplainHigh ExplainabilityLevel = "high"
)
type RiskClass string
const (
RiskMinimal RiskClass = "minimal"
RiskLimited RiskClass = "limited"
RiskHigh RiskClass = "high"
RiskProhibited RiskClass = "prohibited"
)
type LegalBasisType string
const (
LegalConsent LegalBasisType = "consent"
LegalContract LegalBasisType = "contract"
LegalLegalObligation LegalBasisType = "legal_obligation"
LegalLegitimateInterest LegalBasisType = "legitimate_interest"
LegalPublicInterest LegalBasisType = "public_interest"
)
type ModelType string
const (
ModelRuleBased ModelType = "rule_based"
ModelStatistical ModelType = "statistical"
ModelBlackboxLLM ModelType = "blackbox_llm"
)
type DeploymentScope string
const (
ScopeInternal DeploymentScope = "internal"
ScopeExternal DeploymentScope = "external"
ScopePublic DeploymentScope = "public"
)
// --- Ordinal Orderings (higher = more regulatory risk) ---
var automationOrder = map[AutomationLevel]int{
AutoNone: 0, AutoAssistive: 1, AutoPartial: 2, AutoFull: 3,
}
var bindingOrder = map[DecisionBinding]int{
BindingNonBinding: 0, BindingHumanReview: 1, BindingFullyBinding: 2,
}
var impactOrder = map[DecisionImpact]int{
ImpactLow: 0, ImpactMedium: 1, ImpactHigh: 2,
}
var dataTypeOrder = map[DataTypeSensitivity]int{
DataNonPersonal: 0, DataPersonal: 1, DataSensitive: 2, DataBiometric: 3,
}
var hilOrder = map[HumanInLoopLevel]int{
HILRequired: 0, HILOptional: 1, HILNone: 2,
}
var explainOrder = map[ExplainabilityLevel]int{
ExplainHigh: 0, ExplainBasic: 1, ExplainNone: 2,
}
var riskOrder = map[RiskClass]int{
RiskMinimal: 0, RiskLimited: 1, RiskHigh: 2, RiskProhibited: 3,
}
var modelTypeOrder = map[ModelType]int{
ModelRuleBased: 0, ModelStatistical: 1, ModelBlackboxLLM: 2,
}
var scopeOrder = map[DeploymentScope]int{
ScopeInternal: 0, ScopeExternal: 1, ScopePublic: 2,
}
// AllValues returns the ordered list of allowed values for each dimension.
var AllValues = map[string][]string{
"automation_level": {"none", "assistive", "partial", "full"},
"decision_binding": {"non_binding", "human_review_required", "fully_binding"},
"decision_impact": {"low", "medium", "high"},
"domain": {"hr", "finance", "education", "health", "marketing", "general"},
"data_type": {"non_personal", "personal", "sensitive", "biometric"},
"human_in_loop": {"required", "optional", "none"},
"explainability": {"high", "basic", "none"},
"risk_classification": {"minimal", "limited", "high", "prohibited"},
"legal_basis": {"consent", "contract", "legal_obligation", "legitimate_interest", "public_interest"},
"transparency_required": {"true", "false"},
"logging_required": {"true", "false"},
"model_type": {"rule_based", "statistical", "blackbox_llm"},
"deployment_scope": {"internal", "external", "public"},
}
// DimensionDelta represents a single change between two configs.
type DimensionDelta struct {
Dimension string `json:"dimension"`
From string `json:"from"`
To string `json:"to"`
Impact string `json:"impact"` // human-readable impact description
}
// GetValue returns the string value of a dimension by name.
func (d *DimensionConfig) GetValue(dimension string) string {
switch dimension {
case "automation_level":
return string(d.AutomationLevel)
case "decision_binding":
return string(d.DecisionBinding)
case "decision_impact":
return string(d.DecisionImpact)
case "domain":
return string(d.Domain)
case "data_type":
return string(d.DataType)
case "human_in_loop":
return string(d.HumanInLoop)
case "explainability":
return string(d.Explainability)
case "risk_classification":
return string(d.RiskClassification)
case "legal_basis":
return string(d.LegalBasis)
case "transparency_required":
if d.TransparencyRequired {
return "true"
}
return "false"
case "logging_required":
if d.LoggingRequired {
return "true"
}
return "false"
case "model_type":
return string(d.ModelType)
case "deployment_scope":
return string(d.DeploymentScope)
default:
return ""
}
}
// SetValue sets a dimension value by name. Returns false if the dimension is unknown.
func (d *DimensionConfig) SetValue(dimension, value string) bool {
switch dimension {
case "automation_level":
d.AutomationLevel = AutomationLevel(value)
case "decision_binding":
d.DecisionBinding = DecisionBinding(value)
case "decision_impact":
d.DecisionImpact = DecisionImpact(value)
case "domain":
d.Domain = DomainCategory(value)
case "data_type":
d.DataType = DataTypeSensitivity(value)
case "human_in_loop":
d.HumanInLoop = HumanInLoopLevel(value)
case "explainability":
d.Explainability = ExplainabilityLevel(value)
case "risk_classification":
d.RiskClassification = RiskClass(value)
case "legal_basis":
d.LegalBasis = LegalBasisType(value)
case "transparency_required":
d.TransparencyRequired = value == "true"
case "logging_required":
d.LoggingRequired = value == "true"
case "model_type":
d.ModelType = ModelType(value)
case "deployment_scope":
d.DeploymentScope = DeploymentScope(value)
default:
return false
}
return true
}
// Diff computes the changes between two configs.
func (d *DimensionConfig) Diff(other *DimensionConfig) []DimensionDelta {
dimensions := []string{
"automation_level", "decision_binding", "decision_impact", "domain",
"data_type", "human_in_loop", "explainability", "risk_classification",
"legal_basis", "transparency_required", "logging_required",
"model_type", "deployment_scope",
}
var deltas []DimensionDelta
for _, dim := range dimensions {
from := d.GetValue(dim)
to := other.GetValue(dim)
if from != to {
deltas = append(deltas, DimensionDelta{
Dimension: dim,
From: from,
To: to,
Impact: describeDeltaImpact(dim, from, to),
})
}
}
return deltas
}
// Clone returns a deep copy of the config.
func (d *DimensionConfig) Clone() DimensionConfig {
return *d
}
func describeDeltaImpact(dimension, from, to string) string {
switch dimension {
case "automation_level":
return "Automatisierungsgrad: " + from + " → " + to
case "decision_binding":
return "Entscheidungsbindung: " + from + " → " + to
case "human_in_loop":
return "Menschliche Kontrolle: " + from + " → " + to
case "explainability":
return "Erklaerbarkeit: " + from + " → " + to
case "data_type":
return "Datensensitivitaet: " + from + " → " + to
case "transparency_required":
return "Transparenzpflicht: " + from + " → " + to
case "logging_required":
return "Protokollierungspflicht: " + from + " → " + to
default:
return dimension + ": " + from + " → " + to
}
}

View File

@@ -0,0 +1,201 @@
package maximizer
import (
"testing"
"github.com/breakpilot/ai-compliance-sdk/internal/ucca"
)
func TestGetValueSetValueRoundtrip(t *testing.T) {
config := DimensionConfig{
AutomationLevel: AutoFull,
DecisionBinding: BindingFullyBinding,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
DataType: DataPersonal,
HumanInLoop: HILNone,
Explainability: ExplainNone,
RiskClassification: RiskHigh,
LegalBasis: LegalContract,
TransparencyRequired: true,
LoggingRequired: false,
ModelType: ModelBlackboxLLM,
DeploymentScope: ScopeExternal,
}
for _, dim := range allDimensions {
val := config.GetValue(dim)
if val == "" {
t.Errorf("GetValue(%q) returned empty", dim)
}
clone := DimensionConfig{}
ok := clone.SetValue(dim, val)
if !ok {
t.Errorf("SetValue(%q, %q) returned false", dim, val)
}
if clone.GetValue(dim) != val {
t.Errorf("SetValue roundtrip failed for %q: got %q, want %q", dim, clone.GetValue(dim), val)
}
}
}
func TestGetValueUnknownDimension(t *testing.T) {
config := DimensionConfig{}
if v := config.GetValue("nonexistent"); v != "" {
t.Errorf("expected empty for unknown dimension, got %q", v)
}
if ok := config.SetValue("nonexistent", "x"); ok {
t.Error("expected false for unknown dimension")
}
}
func TestDiffIdentical(t *testing.T) {
config := DimensionConfig{
AutomationLevel: AutoAssistive,
DecisionImpact: ImpactLow,
Domain: DomainGeneral,
}
deltas := config.Diff(&config)
if len(deltas) != 0 {
t.Errorf("expected 0 deltas for identical configs, got %d", len(deltas))
}
}
func TestDiffDetectsChanges(t *testing.T) {
a := DimensionConfig{
AutomationLevel: AutoFull,
HumanInLoop: HILNone,
DecisionBinding: BindingFullyBinding,
}
b := DimensionConfig{
AutomationLevel: AutoAssistive,
HumanInLoop: HILRequired,
DecisionBinding: BindingHumanReview,
}
deltas := a.Diff(&b)
changed := make(map[string]bool)
for _, d := range deltas {
changed[d.Dimension] = true
}
for _, dim := range []string{"automation_level", "human_in_loop", "decision_binding"} {
if !changed[dim] {
t.Errorf("expected %q in deltas", dim)
}
}
}
func TestClone(t *testing.T) {
orig := DimensionConfig{
AutomationLevel: AutoFull,
Domain: DomainHR,
}
clone := orig.Clone()
clone.AutomationLevel = AutoAssistive
if orig.AutomationLevel != AutoFull {
t.Error("clone modified original")
}
}
func TestMapIntakeToDimensions(t *testing.T) {
intake := &ucca.UseCaseIntake{
Domain: "hr",
Automation: ucca.AutomationFullyAutomated,
DataTypes: ucca.DataTypes{
PersonalData: true,
Article9Data: true,
},
Purpose: ucca.Purpose{
DecisionMaking: true,
},
Outputs: ucca.Outputs{
LegalEffects: true,
},
ModelUsage: ucca.ModelUsage{
Training: true,
},
}
config := MapIntakeToDimensions(intake)
tests := []struct {
dimension string
expected string
}{
{"automation_level", "full"},
{"domain", "hr"},
{"data_type", "sensitive"},
{"decision_impact", "high"},
{"model_type", "blackbox_llm"},
{"human_in_loop", "none"},
{"decision_binding", "fully_binding"},
}
for _, tc := range tests {
got := config.GetValue(tc.dimension)
if got != tc.expected {
t.Errorf("MapIntakeToDimensions: %s = %q, want %q", tc.dimension, got, tc.expected)
}
}
}
func TestMapIntakeToDimensionsBiometricWins(t *testing.T) {
intake := &ucca.UseCaseIntake{
DataTypes: ucca.DataTypes{
PersonalData: true,
Article9Data: true,
BiometricData: true,
},
}
config := MapIntakeToDimensions(intake)
if config.DataType != DataBiometric {
t.Errorf("expected biometric (highest sensitivity), got %s", config.DataType)
}
}
func TestMapDimensionsToIntakePreservesOriginal(t *testing.T) {
original := &ucca.UseCaseIntake{
UseCaseText: "Test use case",
Domain: "hr",
Title: "My Assessment",
Automation: ucca.AutomationFullyAutomated,
DataTypes: ucca.DataTypes{
PersonalData: true,
},
Hosting: ucca.Hosting{
Region: "eu",
},
}
config := &DimensionConfig{
AutomationLevel: AutoAssistive,
DataType: DataPersonal,
Domain: DomainHR,
}
result := MapDimensionsToIntake(config, original)
if result.UseCaseText != "Test use case" {
t.Error("MapDimensionsToIntake did not preserve UseCaseText")
}
if result.Title != "My Assessment" {
t.Error("MapDimensionsToIntake did not preserve Title")
}
if result.Hosting.Region != "eu" {
t.Error("MapDimensionsToIntake did not preserve Hosting")
}
if result.Automation != ucca.AutomationAssistive {
t.Errorf("expected assistive automation, got %s", result.Automation)
}
}
func TestAllValuesComplete(t *testing.T) {
for _, dim := range allDimensions {
vals, ok := AllValues[dim]
if !ok {
t.Errorf("AllValues missing dimension %q", dim)
}
if len(vals) == 0 {
t.Errorf("AllValues[%q] is empty", dim)
}
}
}

View File

@@ -0,0 +1,218 @@
package maximizer
// Zone classifies a dimension value's regulatory status.
type Zone string
const (
ZoneForbidden Zone = "FORBIDDEN"
ZoneRestricted Zone = "RESTRICTED"
ZoneSafe Zone = "SAFE"
)
// ZoneInfo classifies a single dimension value within the constraint space.
type ZoneInfo struct {
Dimension string `json:"dimension"`
CurrentValue string `json:"current_value"`
Zone Zone `json:"zone"`
AllowedValues []string `json:"allowed_values,omitempty"`
ForbiddenValues []string `json:"forbidden_values,omitempty"`
Safeguards []string `json:"safeguards,omitempty"`
Reason string `json:"reason"`
ObligationRefs []string `json:"obligation_refs"`
}
// Violation is a hard block triggered by a constraint rule.
type Violation struct {
RuleID string `json:"rule_id"`
ObligationID string `json:"obligation_id"`
ArticleRef string `json:"article_ref"`
Title string `json:"title"`
Description string `json:"description"`
Dimension string `json:"dimension,omitempty"`
}
// Restriction is a safeguard requirement (yellow zone).
type Restriction struct {
RuleID string `json:"rule_id"`
ObligationID string `json:"obligation_id"`
ArticleRef string `json:"article_ref"`
Title string `json:"title"`
Required map[string]string `json:"required"`
}
// TriggeredConstraint records which constraint rule was triggered and why.
type TriggeredConstraint struct {
RuleID string `json:"rule_id"`
ObligationID string `json:"obligation_id"`
Regulation string `json:"regulation"`
ArticleRef string `json:"article_ref"`
Title string `json:"title"`
RuleType string `json:"rule_type"`
}
// EvaluationResult is the complete 3-zone analysis of a DimensionConfig.
type EvaluationResult struct {
IsCompliant bool `json:"is_compliant"`
Violations []Violation `json:"violations"`
Restrictions []Restriction `json:"restrictions"`
ZoneMap map[string]ZoneInfo `json:"zone_map"`
RequiredControls []string `json:"required_controls"`
RequiredPatterns []string `json:"required_patterns"`
TriggeredRules []TriggeredConstraint `json:"triggered_rules"`
RiskClassification string `json:"risk_classification,omitempty"`
}
// Evaluator evaluates dimension configs against constraint rules.
type Evaluator struct {
rules *ConstraintRuleSet
}
// NewEvaluator creates an evaluator from a loaded constraint ruleset.
func NewEvaluator(rules *ConstraintRuleSet) *Evaluator {
return &Evaluator{rules: rules}
}
// Evaluate checks a config against all constraints and produces a 3-zone result.
func (e *Evaluator) Evaluate(config *DimensionConfig) *EvaluationResult {
result := &EvaluationResult{
IsCompliant: true,
ZoneMap: make(map[string]ZoneInfo),
RequiredControls: []string{},
RequiredPatterns: []string{},
}
// Initialize all dimensions as SAFE
for _, dim := range allDimensions {
result.ZoneMap[dim] = ZoneInfo{
Dimension: dim,
CurrentValue: config.GetValue(dim),
Zone: ZoneSafe,
}
}
// Evaluate each rule
for _, rule := range e.rules.Rules {
e.evaluateRule(config, &rule, result)
}
// Apply risk classification if set
if result.RiskClassification == "" {
result.RiskClassification = string(config.RiskClassification)
}
return result
}
func (e *Evaluator) evaluateRule(config *DimensionConfig, rule *ConstraintRule, result *EvaluationResult) {
for _, constraint := range rule.Constraints {
if !constraint.If.Matches(config) {
continue
}
// Rule triggered
result.TriggeredRules = append(result.TriggeredRules, TriggeredConstraint{
RuleID: rule.ID,
ObligationID: rule.ObligationID,
Regulation: rule.Regulation,
ArticleRef: rule.ArticleRef,
Title: rule.Title,
RuleType: rule.RuleType,
})
// Hard block?
if constraint.Then.Allowed != nil && !*constraint.Then.Allowed {
result.IsCompliant = false
result.Violations = append(result.Violations, Violation{
RuleID: rule.ID,
ObligationID: rule.ObligationID,
ArticleRef: rule.ArticleRef,
Title: rule.Title,
Description: rule.Description,
})
e.markForbiddenDimensions(config, constraint.If, rule, result)
continue
}
// Required values (yellow zone)?
if len(constraint.Then.RequiredValues) > 0 {
e.applyRequiredValues(config, constraint.Then.RequiredValues, rule, result)
}
// Risk classification override
if constraint.Then.SetRiskClassification != "" {
result.RiskClassification = constraint.Then.SetRiskClassification
}
// Collect controls and patterns
result.RequiredControls = appendUnique(result.RequiredControls, constraint.Then.RequiredControls...)
result.RequiredPatterns = appendUnique(result.RequiredPatterns, constraint.Then.RequiredPatterns...)
}
}
// markForbiddenDimensions marks the dimensions from the condition as FORBIDDEN.
func (e *Evaluator) markForbiddenDimensions(
config *DimensionConfig, cond ConditionSet, rule *ConstraintRule, result *EvaluationResult,
) {
for dim := range cond {
zi := result.ZoneMap[dim]
zi.Zone = ZoneForbidden
zi.Reason = rule.Title
zi.ObligationRefs = appendUnique(zi.ObligationRefs, rule.ArticleRef)
zi.ForbiddenValues = appendUnique(zi.ForbiddenValues, config.GetValue(dim))
result.ZoneMap[dim] = zi
}
}
// applyRequiredValues checks if required dimension values are met.
func (e *Evaluator) applyRequiredValues(
config *DimensionConfig, required map[string]string, rule *ConstraintRule, result *EvaluationResult,
) {
unmet := make(map[string]string)
for dim, requiredVal := range required {
actual := config.GetValue(dim)
if actual != requiredVal {
unmet[dim] = requiredVal
// Mark as RESTRICTED (upgrade from SAFE, but don't downgrade from FORBIDDEN)
zi := result.ZoneMap[dim]
if zi.Zone != ZoneForbidden {
zi.Zone = ZoneRestricted
zi.Reason = rule.Title
zi.AllowedValues = appendUnique(zi.AllowedValues, requiredVal)
zi.Safeguards = appendUnique(zi.Safeguards, rule.ArticleRef)
zi.ObligationRefs = appendUnique(zi.ObligationRefs, rule.ArticleRef)
result.ZoneMap[dim] = zi
}
}
}
if len(unmet) > 0 {
result.IsCompliant = false
result.Restrictions = append(result.Restrictions, Restriction{
RuleID: rule.ID,
ObligationID: rule.ObligationID,
ArticleRef: rule.ArticleRef,
Title: rule.Title,
Required: unmet,
})
}
}
var allDimensions = []string{
"automation_level", "decision_binding", "decision_impact", "domain",
"data_type", "human_in_loop", "explainability", "risk_classification",
"legal_basis", "transparency_required", "logging_required",
"model_type", "deployment_scope",
}
func appendUnique(slice []string, items ...string) []string {
seen := make(map[string]bool, len(slice))
for _, s := range slice {
seen[s] = true
}
for _, item := range items {
if item != "" && !seen[item] {
slice = append(slice, item)
seen[item] = true
}
}
return slice
}

View File

@@ -0,0 +1,229 @@
package maximizer
import (
"path/filepath"
"runtime"
"testing"
)
func loadTestRules(t *testing.T) *ConstraintRuleSet {
t.Helper()
_, filename, _, ok := runtime.Caller(0)
if !ok {
t.Fatal("cannot determine test file location")
}
// Walk up from internal/maximizer/ to ai-compliance-sdk/
dir := filepath.Dir(filename) // internal/maximizer
dir = filepath.Dir(dir) // internal
dir = filepath.Dir(dir) // ai-compliance-sdk
path := filepath.Join(dir, "policies", "maximizer_constraints_v1.json")
rules, err := LoadConstraintRules(path)
if err != nil {
t.Fatalf("LoadConstraintRules: %v", err)
}
return rules
}
func TestLoadConstraintRules(t *testing.T) {
rules := loadTestRules(t)
if rules.Version != "1.0.0" {
t.Errorf("expected version 1.0.0, got %s", rules.Version)
}
if len(rules.Rules) < 20 {
t.Errorf("expected at least 20 rules, got %d", len(rules.Rules))
}
}
func TestEvalCompliantConfig(t *testing.T) {
rules := loadTestRules(t)
eval := NewEvaluator(rules)
config := &DimensionConfig{
AutomationLevel: AutoAssistive,
DecisionBinding: BindingHumanReview,
DecisionImpact: ImpactLow,
Domain: DomainGeneral,
DataType: DataNonPersonal,
HumanInLoop: HILRequired,
Explainability: ExplainBasic,
RiskClassification: RiskMinimal,
LegalBasis: LegalContract,
TransparencyRequired: false,
LoggingRequired: false,
ModelType: ModelRuleBased,
DeploymentScope: ScopeInternal,
}
result := eval.Evaluate(config)
if !result.IsCompliant {
t.Errorf("expected compliant, got violations: %+v", result.Violations)
}
// All dimensions should be SAFE
for dim, zi := range result.ZoneMap {
if zi.Zone != ZoneSafe {
t.Errorf("dimension %s: expected SAFE, got %s", dim, zi.Zone)
}
}
}
func TestEvalHRFullAutomationBlocked(t *testing.T) {
rules := loadTestRules(t)
eval := NewEvaluator(rules)
config := &DimensionConfig{
AutomationLevel: AutoFull,
DecisionBinding: BindingFullyBinding,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
DataType: DataPersonal,
HumanInLoop: HILNone,
Explainability: ExplainNone,
RiskClassification: RiskMinimal,
LegalBasis: LegalContract,
TransparencyRequired: false,
LoggingRequired: false,
ModelType: ModelBlackboxLLM,
DeploymentScope: ScopeExternal,
}
result := eval.Evaluate(config)
if result.IsCompliant {
t.Error("expected non-compliant for HR full automation")
}
if len(result.Violations) == 0 {
t.Error("expected at least one violation")
}
// automation_level should be FORBIDDEN
zi := result.ZoneMap["automation_level"]
if zi.Zone != ZoneForbidden {
t.Errorf("automation_level: expected FORBIDDEN, got %s", zi.Zone)
}
}
func TestEvalProhibitedClassification(t *testing.T) {
rules := loadTestRules(t)
eval := NewEvaluator(rules)
config := &DimensionConfig{
RiskClassification: RiskProhibited,
DeploymentScope: ScopePublic,
}
result := eval.Evaluate(config)
if result.IsCompliant {
t.Error("expected non-compliant for prohibited classification")
}
found := false
for _, v := range result.Violations {
if v.RuleID == "MC-AIA-PROHIBITED-001" {
found = true
}
}
if !found {
t.Error("expected MC-AIA-PROHIBITED-001 violation")
}
}
func TestEvalSensitiveDataRequiresConsent(t *testing.T) {
rules := loadTestRules(t)
eval := NewEvaluator(rules)
config := &DimensionConfig{
DataType: DataSensitive,
LegalBasis: LegalLegitimateInterest, // wrong basis for sensitive
}
result := eval.Evaluate(config)
if result.IsCompliant {
t.Error("expected non-compliant: sensitive data without consent")
}
// Should require consent
found := false
for _, r := range result.Restrictions {
if val, ok := r.Required["legal_basis"]; ok && val == "consent" {
found = true
}
}
if !found {
t.Error("expected restriction requiring legal_basis=consent")
}
}
func TestEvalHighRiskRequiresLogging(t *testing.T) {
rules := loadTestRules(t)
eval := NewEvaluator(rules)
config := &DimensionConfig{
RiskClassification: RiskHigh,
LoggingRequired: false,
TransparencyRequired: false,
HumanInLoop: HILNone,
Explainability: ExplainNone,
}
result := eval.Evaluate(config)
if result.IsCompliant {
t.Error("expected non-compliant: high risk without logging/transparency/hil")
}
// Check logging_required is RESTRICTED
zi := result.ZoneMap["logging_required"]
if zi.Zone != ZoneRestricted {
t.Errorf("logging_required: expected RESTRICTED, got %s", zi.Zone)
}
}
func TestEvalTriggeredRulesHaveObligationRefs(t *testing.T) {
rules := loadTestRules(t)
eval := NewEvaluator(rules)
config := &DimensionConfig{
AutomationLevel: AutoFull,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
DataType: DataPersonal,
}
result := eval.Evaluate(config)
for _, tr := range result.TriggeredRules {
if tr.RuleID == "" {
t.Error("triggered rule missing RuleID")
}
if tr.ObligationID == "" {
t.Error("triggered rule missing ObligationID")
}
if tr.ArticleRef == "" {
t.Error("triggered rule missing ArticleRef")
}
}
}
func TestConditionSetMatchesExact(t *testing.T) {
config := &DimensionConfig{
Domain: DomainHR,
DecisionImpact: ImpactHigh,
}
tests := []struct {
name string
cond ConditionSet
matches bool
}{
{"exact match", ConditionSet{"domain": "hr", "decision_impact": "high"}, true},
{"partial match fails", ConditionSet{"domain": "hr", "decision_impact": "low"}, false},
{"unknown value", ConditionSet{"domain": "finance"}, false},
{"empty condition", ConditionSet{}, true},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := tc.cond.Matches(config)
if got != tc.matches {
t.Errorf("expected %v, got %v", tc.matches, got)
}
})
}
}

View File

@@ -0,0 +1,189 @@
package maximizer
import "github.com/breakpilot/ai-compliance-sdk/internal/ucca"
// MapIntakeToDimensions converts a UseCaseIntake to a normalized DimensionConfig.
// Highest sensitivity wins for multi-value fields.
func MapIntakeToDimensions(intake *ucca.UseCaseIntake) *DimensionConfig {
config := &DimensionConfig{
AutomationLevel: mapAutomation(intake.Automation),
DecisionBinding: deriveBinding(intake),
DecisionImpact: deriveImpact(intake),
Domain: mapDomain(intake.Domain),
DataType: deriveDataType(intake.DataTypes),
HumanInLoop: deriveHIL(intake.Automation),
Explainability: ExplainBasic, // default
RiskClassification: RiskMinimal, // will be set by evaluator
LegalBasis: LegalContract, // default
TransparencyRequired: false,
LoggingRequired: false,
ModelType: deriveModelType(intake.ModelUsage),
DeploymentScope: deriveScope(intake),
}
return config
}
// MapDimensionsToIntake converts a DimensionConfig back to a UseCaseIntake,
// preserving unchanged fields from the original intake.
func MapDimensionsToIntake(config *DimensionConfig, original *ucca.UseCaseIntake) *ucca.UseCaseIntake {
result := *original // shallow copy
// Map automation level
switch config.AutomationLevel {
case AutoNone:
result.Automation = ucca.AutomationAssistive
case AutoAssistive:
result.Automation = ucca.AutomationAssistive
case AutoPartial:
result.Automation = ucca.AutomationSemiAutomated
case AutoFull:
result.Automation = ucca.AutomationFullyAutomated
}
// Map data type back
result.DataTypes = mapDataTypeBack(config.DataType, original.DataTypes)
// Map domain back
result.Domain = mapDomainBack(config.Domain, original.Domain)
return &result
}
func mapAutomation(a ucca.AutomationLevel) AutomationLevel {
switch a {
case ucca.AutomationAssistive:
return AutoAssistive
case ucca.AutomationSemiAutomated:
return AutoPartial
case ucca.AutomationFullyAutomated:
return AutoFull
default:
return AutoNone
}
}
func deriveBinding(intake *ucca.UseCaseIntake) DecisionBinding {
if intake.Outputs.LegalEffects || intake.Outputs.AccessDecisions {
if intake.Automation == ucca.AutomationFullyAutomated {
return BindingFullyBinding
}
return BindingHumanReview
}
return BindingNonBinding
}
func deriveImpact(intake *ucca.UseCaseIntake) DecisionImpact {
if intake.Outputs.LegalEffects || intake.Outputs.AccessDecisions {
return ImpactHigh
}
if intake.Outputs.RankingsOrScores || intake.Purpose.EvaluationScoring || intake.Purpose.DecisionMaking {
return ImpactMedium
}
return ImpactLow
}
func mapDomain(d ucca.Domain) DomainCategory {
switch d {
case "hr", "human_resources":
return DomainHR
case "finance", "banking", "insurance", "investment":
return DomainFinance
case "education", "school", "university":
return DomainEducation
case "health", "healthcare", "medical":
return DomainHealth
case "marketing", "advertising":
return DomainMarketing
default:
return DomainGeneral
}
}
func deriveDataType(dt ucca.DataTypes) DataTypeSensitivity {
// Highest sensitivity wins
if dt.BiometricData {
return DataBiometric
}
if dt.Article9Data {
return DataSensitive
}
if dt.PersonalData || dt.EmployeeData || dt.CustomerData ||
dt.FinancialData || dt.MinorData || dt.LocationData ||
dt.Images || dt.Audio {
return DataPersonal
}
return DataNonPersonal
}
func deriveHIL(a ucca.AutomationLevel) HumanInLoopLevel {
switch a {
case ucca.AutomationAssistive:
return HILRequired
case ucca.AutomationSemiAutomated:
return HILOptional
case ucca.AutomationFullyAutomated:
return HILNone
default:
return HILRequired
}
}
func deriveModelType(mu ucca.ModelUsage) ModelType {
if mu.RAG && !mu.Training && !mu.Finetune {
return ModelRuleBased
}
if mu.Training || mu.Finetune {
return ModelBlackboxLLM
}
return ModelStatistical
}
func deriveScope(intake *ucca.UseCaseIntake) DeploymentScope {
if intake.Purpose.PublicService || intake.Outputs.DataExport {
return ScopePublic
}
if intake.Purpose.CustomerSupport || intake.Purpose.Marketing {
return ScopeExternal
}
return ScopeInternal
}
func mapDataTypeBack(dt DataTypeSensitivity, original ucca.DataTypes) ucca.DataTypes {
result := original
switch dt {
case DataNonPersonal:
result.PersonalData = false
result.Article9Data = false
result.BiometricData = false
case DataPersonal:
result.PersonalData = true
result.Article9Data = false
result.BiometricData = false
case DataSensitive:
result.PersonalData = true
result.Article9Data = true
result.BiometricData = false
case DataBiometric:
result.PersonalData = true
result.Article9Data = true
result.BiometricData = true
}
return result
}
func mapDomainBack(dc DomainCategory, original ucca.Domain) ucca.Domain {
switch dc {
case DomainHR:
return "hr"
case DomainFinance:
return "finance"
case DomainEducation:
return "education"
case DomainHealth:
return "health"
case DomainMarketing:
return "marketing"
default:
return original
}
}

View File

@@ -0,0 +1,291 @@
package maximizer
import "sort"
const maxVariants = 5
// OptimizedVariant is a single compliant configuration with scoring.
type OptimizedVariant struct {
Config DimensionConfig `json:"config"`
Evaluation *EvaluationResult `json:"evaluation"`
Deltas []DimensionDelta `json:"deltas"`
DeltaCount int `json:"delta_count"`
SafetyScore int `json:"safety_score"`
UtilityScore int `json:"utility_score"`
CompositeScore float64 `json:"composite_score"`
Rationale string `json:"rationale"`
}
// OptimizationResult contains the original evaluation and ranked compliant variants.
type OptimizationResult struct {
OriginalConfig DimensionConfig `json:"original_config"`
OriginalCompliant bool `json:"original_compliant"`
OriginalEval *EvaluationResult `json:"original_evaluation"`
Variants []OptimizedVariant `json:"variants"`
MaxSafeConfig *OptimizedVariant `json:"max_safe_config"`
}
// Optimizer finds the maximum compliant configuration variant.
type Optimizer struct {
evaluator *Evaluator
weights ScoreWeights
}
// NewOptimizer creates an optimizer backed by the given evaluator.
func NewOptimizer(evaluator *Evaluator) *Optimizer {
return &Optimizer{evaluator: evaluator, weights: DefaultWeights}
}
// Optimize takes a desired (possibly non-compliant) config and returns
// ranked compliant alternatives.
func (o *Optimizer) Optimize(desired *DimensionConfig) *OptimizationResult {
eval := o.evaluator.Evaluate(desired)
result := &OptimizationResult{
OriginalConfig: *desired,
OriginalCompliant: eval.IsCompliant,
OriginalEval: eval,
}
if eval.IsCompliant {
variant := o.scoreVariant(desired, desired, eval)
variant.Rationale = "Konfiguration ist bereits konform"
result.Variants = []OptimizedVariant{variant}
result.MaxSafeConfig = &result.Variants[0]
return result
}
// Check for hard prohibitions that cannot be optimized
if o.hasProhibitedClassification(desired) {
result.Variants = []OptimizedVariant{}
return result
}
candidates := o.generateCandidates(desired, eval)
result.Variants = candidates
if len(candidates) > 0 {
result.MaxSafeConfig = &result.Variants[0]
}
return result
}
func (o *Optimizer) hasProhibitedClassification(config *DimensionConfig) bool {
return config.RiskClassification == RiskProhibited
}
// generateCandidates builds compliant variants by fixing violations.
func (o *Optimizer) generateCandidates(desired *DimensionConfig, eval *EvaluationResult) []OptimizedVariant {
// Strategy 1: Fix all violations in one pass (greedy nearest fix)
greedy := o.greedyFix(desired, eval)
var candidates []OptimizedVariant
if greedy != nil {
greedyEval := o.evaluator.Evaluate(&greedy.Config)
if greedyEval.IsCompliant {
v := o.scoreVariant(desired, &greedy.Config, greedyEval)
v.Rationale = "Minimale Anpassung — naechster konformer Zustand"
candidates = append(candidates, v)
}
}
// Strategy 2: Conservative variant (maximum safety)
conservative := o.conservativeFix(desired, eval)
if conservative != nil {
consEval := o.evaluator.Evaluate(&conservative.Config)
if consEval.IsCompliant {
v := o.scoreVariant(desired, &conservative.Config, consEval)
v.Rationale = "Konservative Variante — maximale regulatorische Sicherheit"
candidates = append(candidates, v)
}
}
// Strategy 3: Fix restricted dimensions too (belt-and-suspenders)
enhanced := o.enhancedFix(desired, eval)
if enhanced != nil {
enhEval := o.evaluator.Evaluate(&enhanced.Config)
if enhEval.IsCompliant {
v := o.scoreVariant(desired, &enhanced.Config, enhEval)
v.Rationale = "Erweiterte Variante — alle Einschraenkungen vorab behoben"
candidates = append(candidates, v)
}
}
// Deduplicate and sort by composite score
candidates = deduplicateVariants(candidates)
sort.Slice(candidates, func(i, j int) bool {
return candidates[i].CompositeScore > candidates[j].CompositeScore
})
if len(candidates) > maxVariants {
candidates = candidates[:maxVariants]
}
return candidates
}
// greedyFix applies the minimum change per violated dimension.
func (o *Optimizer) greedyFix(desired *DimensionConfig, eval *EvaluationResult) *OptimizedVariant {
fixed := desired.Clone()
// Fix FORBIDDEN zones
for dim, zi := range eval.ZoneMap {
if zi.Zone != ZoneForbidden {
continue
}
o.fixDimension(&fixed, dim, eval)
}
// Fix RESTRICTED zones (required values not met)
for _, restriction := range eval.Restrictions {
for dim, requiredVal := range restriction.Required {
fixed.SetValue(dim, requiredVal)
}
}
// Re-evaluate and iterate (max 3 passes to converge)
for i := 0; i < 3; i++ {
reEval := o.evaluator.Evaluate(&fixed)
if reEval.IsCompliant {
break
}
for dim, zi := range reEval.ZoneMap {
if zi.Zone == ZoneForbidden {
o.fixDimension(&fixed, dim, reEval)
}
}
for _, restriction := range reEval.Restrictions {
for dim, requiredVal := range restriction.Required {
fixed.SetValue(dim, requiredVal)
}
}
}
return &OptimizedVariant{Config: fixed}
}
// conservativeFix chooses the safest allowed value for each violated dimension.
func (o *Optimizer) conservativeFix(desired *DimensionConfig, eval *EvaluationResult) *OptimizedVariant {
fixed := desired.Clone()
for dim, zi := range eval.ZoneMap {
if zi.Zone == ZoneSafe {
continue
}
// Use the safest (lowest ordinal risk) value
vals := AllValues[dim]
if len(vals) > 0 {
fixed.SetValue(dim, vals[0]) // index 0 = safest
}
}
// Apply all required values
for _, restriction := range eval.Restrictions {
for dim, val := range restriction.Required {
fixed.SetValue(dim, val)
}
}
return &OptimizedVariant{Config: fixed}
}
// enhancedFix fixes violations AND proactively resolves restrictions.
func (o *Optimizer) enhancedFix(desired *DimensionConfig, eval *EvaluationResult) *OptimizedVariant {
fixed := desired.Clone()
// Fix all non-SAFE dimensions
for dim, zi := range eval.ZoneMap {
if zi.Zone == ZoneSafe {
continue
}
if len(zi.AllowedValues) > 0 {
fixed.SetValue(dim, zi.AllowedValues[0])
} else {
o.fixDimension(&fixed, dim, eval)
}
}
// Apply required values
for _, restriction := range eval.Restrictions {
for dim, val := range restriction.Required {
fixed.SetValue(dim, val)
}
}
// Re-evaluate to converge
for i := 0; i < 3; i++ {
reEval := o.evaluator.Evaluate(&fixed)
if reEval.IsCompliant {
break
}
for _, restriction := range reEval.Restrictions {
for dim, val := range restriction.Required {
fixed.SetValue(dim, val)
}
}
}
return &OptimizedVariant{Config: fixed}
}
// fixDimension steps the dimension to the nearest safer value.
func (o *Optimizer) fixDimension(config *DimensionConfig, dim string, eval *EvaluationResult) {
vals := AllValues[dim]
if len(vals) == 0 {
return
}
current := config.GetValue(dim)
currentIdx := indexOf(vals, current)
if currentIdx < 0 {
config.SetValue(dim, vals[0])
return
}
// For risk-ordered dimensions, step toward the safer end (lower index).
// For inverse dimensions (human_in_loop, explainability), lower index = more safe.
if currentIdx > 0 {
config.SetValue(dim, vals[currentIdx-1])
}
}
func (o *Optimizer) scoreVariant(original, variant *DimensionConfig, eval *EvaluationResult) OptimizedVariant {
deltas := original.Diff(variant)
safety := ComputeSafetyScore(eval)
utility := ComputeUtilityScore(original, variant)
composite := ComputeCompositeScore(safety, utility, o.weights)
return OptimizedVariant{
Config: *variant,
Evaluation: eval,
Deltas: deltas,
DeltaCount: len(deltas),
SafetyScore: safety,
UtilityScore: utility,
CompositeScore: composite,
}
}
func indexOf(slice []string, val string) int {
for i, v := range slice {
if v == val {
return i
}
}
return -1
}
func deduplicateVariants(variants []OptimizedVariant) []OptimizedVariant {
seen := make(map[string]bool)
var unique []OptimizedVariant
for _, v := range variants {
key := configKey(&v.Config)
if !seen[key] {
seen[key] = true
unique = append(unique, v)
}
}
return unique
}
func configKey(c *DimensionConfig) string {
var key string
for _, dim := range allDimensions {
key += dim + "=" + c.GetValue(dim) + ";"
}
return key
}

View File

@@ -0,0 +1,300 @@
package maximizer
import "testing"
func newTestOptimizer(t *testing.T) *Optimizer {
t.Helper()
rules := loadTestRules(t)
eval := NewEvaluator(rules)
return NewOptimizer(eval)
}
// --- Golden Test Cases ---
func TestGC01_HRFullAutomationBlocked(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
AutomationLevel: AutoFull,
DecisionBinding: BindingFullyBinding,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
DataType: DataPersonal,
HumanInLoop: HILNone,
Explainability: ExplainNone,
RiskClassification: RiskMinimal,
LegalBasis: LegalContract,
ModelType: ModelBlackboxLLM,
DeploymentScope: ScopeExternal,
}
result := opt.Optimize(config)
if result.OriginalCompliant {
t.Fatal("expected original to be non-compliant")
}
if result.MaxSafeConfig == nil {
t.Fatal("expected an optimized variant")
}
max := result.MaxSafeConfig
if max.Config.AutomationLevel == AutoFull {
t.Error("optimizer must change automation_level from full")
}
if max.Config.HumanInLoop != HILRequired {
t.Errorf("expected human_in_loop=required, got %s", max.Config.HumanInLoop)
}
if max.Config.DecisionBinding == BindingFullyBinding {
t.Error("expected decision_binding to change from fully_binding")
}
// Verify the optimized config is actually compliant
if !max.Evaluation.IsCompliant {
t.Errorf("MaxSafeConfig is not compliant: violations=%+v", max.Evaluation.Violations)
}
}
func TestGC02_HRRankingWithHumanReviewAllowed(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
AutomationLevel: AutoAssistive,
DecisionBinding: BindingHumanReview,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
DataType: DataPersonal,
HumanInLoop: HILRequired,
Explainability: ExplainBasic,
RiskClassification: RiskMinimal,
LegalBasis: LegalContract,
TransparencyRequired: true,
LoggingRequired: true,
ModelType: ModelBlackboxLLM,
DeploymentScope: ScopeExternal,
}
result := opt.Optimize(config)
// Should be allowed with conditions (requirements from high-risk classification)
if result.MaxSafeConfig == nil {
t.Fatal("expected a variant")
}
}
func TestGC05_SensitiveDataWithoutLegalBasis(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
DataType: DataSensitive,
LegalBasis: LegalLegitimateInterest,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
AutomationLevel: AutoAssistive,
HumanInLoop: HILRequired,
DecisionBinding: BindingHumanReview,
}
result := opt.Optimize(config)
if result.OriginalCompliant {
t.Error("expected non-compliant: sensitive data with legitimate_interest")
}
if result.MaxSafeConfig == nil {
t.Fatal("expected optimized variant")
}
if result.MaxSafeConfig.Config.LegalBasis != LegalConsent {
t.Errorf("expected legal_basis=consent, got %s", result.MaxSafeConfig.Config.LegalBasis)
}
}
func TestGC16_ProhibitedPracticeBlocked(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
RiskClassification: RiskProhibited,
DeploymentScope: ScopePublic,
}
result := opt.Optimize(config)
if result.OriginalCompliant {
t.Error("expected non-compliant for prohibited")
}
// Prohibited = no optimization possible
if len(result.Variants) > 0 {
t.Error("expected no variants for prohibited classification")
}
}
func TestGC18_OptimizerMinimalChange(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
AutomationLevel: AutoFull,
DecisionBinding: BindingFullyBinding,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
DataType: DataPersonal,
HumanInLoop: HILNone,
Explainability: ExplainBasic,
RiskClassification: RiskMinimal,
LegalBasis: LegalContract,
ModelType: ModelStatistical,
DeploymentScope: ScopeInternal,
}
result := opt.Optimize(config)
if result.MaxSafeConfig == nil {
t.Fatal("expected optimized variant")
}
max := result.MaxSafeConfig
// Domain must NOT change
if max.Config.Domain != DomainHR {
t.Errorf("optimizer must not change domain: got %s", max.Config.Domain)
}
// Explainability was already basic, should stay
if max.Config.Explainability != ExplainBasic {
t.Errorf("optimizer should keep explainability=basic, got %s", max.Config.Explainability)
}
// Model type should not change unnecessarily
if max.Config.ModelType != ModelStatistical {
t.Errorf("optimizer should not change model_type unnecessarily, got %s", max.Config.ModelType)
}
}
func TestGC20_AlreadyCompliantNoChanges(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
AutomationLevel: AutoAssistive,
DecisionBinding: BindingNonBinding,
DecisionImpact: ImpactLow,
Domain: DomainGeneral,
DataType: DataNonPersonal,
HumanInLoop: HILRequired,
Explainability: ExplainBasic,
RiskClassification: RiskMinimal,
LegalBasis: LegalContract,
TransparencyRequired: false,
LoggingRequired: false,
ModelType: ModelRuleBased,
DeploymentScope: ScopeInternal,
}
result := opt.Optimize(config)
if !result.OriginalCompliant {
t.Error("expected compliant")
}
if result.MaxSafeConfig == nil {
t.Fatal("expected variant")
}
if result.MaxSafeConfig.DeltaCount != 0 {
t.Errorf("expected 0 deltas for compliant config, got %d", result.MaxSafeConfig.DeltaCount)
}
if result.MaxSafeConfig.UtilityScore != 100 {
t.Errorf("expected utility 100, got %d", result.MaxSafeConfig.UtilityScore)
}
}
// --- Meta Tests ---
func TestMT01_Determinism(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
AutomationLevel: AutoFull,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
DataType: DataPersonal,
HumanInLoop: HILNone,
}
r1 := opt.Optimize(config)
r2 := opt.Optimize(config)
if r1.OriginalCompliant != r2.OriginalCompliant {
t.Error("determinism failed: different compliance result")
}
if len(r1.Variants) != len(r2.Variants) {
t.Errorf("determinism failed: %d vs %d variants", len(r1.Variants), len(r2.Variants))
}
if r1.MaxSafeConfig != nil && r2.MaxSafeConfig != nil {
if r1.MaxSafeConfig.CompositeScore != r2.MaxSafeConfig.CompositeScore {
t.Error("determinism failed: different composite scores")
}
}
}
func TestMT03_ViolationsReferenceObligations(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
AutomationLevel: AutoFull,
DecisionImpact: ImpactHigh,
DataType: DataSensitive,
}
result := opt.Optimize(config)
for _, v := range result.OriginalEval.Violations {
if v.ObligationID == "" {
t.Errorf("violation %s missing obligation reference", v.RuleID)
}
}
for _, tr := range result.OriginalEval.TriggeredRules {
if tr.ObligationID == "" {
t.Errorf("triggered rule %s missing obligation reference", tr.RuleID)
}
}
}
func TestMT05_OptimizerMinimality(t *testing.T) {
opt := newTestOptimizer(t)
// Config that only violates one dimension
config := &DimensionConfig{
AutomationLevel: AutoAssistive,
DecisionBinding: BindingHumanReview,
DecisionImpact: ImpactLow,
Domain: DomainGeneral,
DataType: DataSensitive, // only violation: needs consent
HumanInLoop: HILRequired,
Explainability: ExplainBasic,
RiskClassification: RiskMinimal,
LegalBasis: LegalLegitimateInterest, // must change to consent
TransparencyRequired: false,
LoggingRequired: false,
ModelType: ModelRuleBased,
DeploymentScope: ScopeInternal,
}
result := opt.Optimize(config)
if result.MaxSafeConfig == nil {
t.Fatal("expected optimized variant")
}
// Check that only compliance-related dimensions changed
for _, d := range result.MaxSafeConfig.Deltas {
switch d.Dimension {
case "legal_basis", "transparency_required", "logging_required", "data_type":
// Expected: legal_basis→consent, transparency, logging for sensitive data
// data_type→personal is from optimizer meta-rule (reduce unnecessary sensitivity)
default:
t.Errorf("unexpected dimension change: %s (%s → %s)", d.Dimension, d.From, d.To)
}
}
}
func TestOptimizeProducesRankedVariants(t *testing.T) {
opt := newTestOptimizer(t)
config := &DimensionConfig{
AutomationLevel: AutoFull,
DecisionImpact: ImpactHigh,
Domain: DomainHR,
DataType: DataPersonal,
HumanInLoop: HILNone,
Explainability: ExplainNone,
ModelType: ModelBlackboxLLM,
DeploymentScope: ScopeExternal,
}
result := opt.Optimize(config)
if len(result.Variants) < 2 {
t.Skipf("only %d variants generated", len(result.Variants))
}
// Verify descending composite score order
for i := 1; i < len(result.Variants); i++ {
if result.Variants[i].CompositeScore > result.Variants[i-1].CompositeScore {
t.Errorf("variants not sorted: [%d]=%.1f > [%d]=%.1f",
i, result.Variants[i].CompositeScore,
i-1, result.Variants[i-1].CompositeScore)
}
}
}

View File

@@ -0,0 +1,84 @@
package maximizer
// ScoreWeights controls the balance between safety and business utility.
type ScoreWeights struct {
Safety float64 `json:"safety"`
Utility float64 `json:"utility"`
}
// DefaultWeights prioritizes business utility slightly over safety margin
// since the optimizer already ensures compliance.
var DefaultWeights = ScoreWeights{Safety: 0.4, Utility: 0.6}
// dimensionBusinessWeight indicates how much business value each dimension
// contributes. Higher = more costly to change for the business.
var dimensionBusinessWeight = map[string]int{
"automation_level": 15,
"decision_binding": 12,
"deployment_scope": 10,
"model_type": 8,
"decision_impact": 7,
"explainability": 5,
"data_type": 5,
"human_in_loop": 5,
"legal_basis": 4,
"domain": 3,
"risk_classification": 3,
"transparency_required": 2,
"logging_required": 2,
}
// ComputeSafetyScore returns 0-100 where 100 = completely safe (no restrictions).
// Decreases with each RESTRICTED or FORBIDDEN zone.
func ComputeSafetyScore(eval *EvaluationResult) int {
if eval == nil {
return 0
}
total := len(allDimensions)
safe := 0
for _, zi := range eval.ZoneMap {
if zi.Zone == ZoneSafe {
safe++
}
}
if total == 0 {
return 100
}
return (safe * 100) / total
}
// ComputeUtilityScore returns 0-100 where 100 = no changes from original.
// Decreases based on the business weight of each changed dimension.
func ComputeUtilityScore(original, variant *DimensionConfig) int {
if original == nil || variant == nil {
return 0
}
deltas := original.Diff(variant)
if len(deltas) == 0 {
return 100
}
maxCost := 0
for _, w := range dimensionBusinessWeight {
maxCost += w
}
cost := 0
for _, d := range deltas {
w := dimensionBusinessWeight[d.Dimension]
if w == 0 {
w = 3 // default
}
cost += w
}
if cost >= maxCost {
return 0
}
return 100 - (cost*100)/maxCost
}
// ComputeCompositeScore combines safety and utility into a single ranking score.
func ComputeCompositeScore(safety, utility int, weights ScoreWeights) float64 {
return weights.Safety*float64(safety) + weights.Utility*float64(utility)
}

View File

@@ -0,0 +1,88 @@
package maximizer
import "testing"
func TestSafetyScoreAllSafe(t *testing.T) {
zm := make(map[string]ZoneInfo)
for _, dim := range allDimensions {
zm[dim] = ZoneInfo{Zone: ZoneSafe}
}
eval := &EvaluationResult{ZoneMap: zm}
score := ComputeSafetyScore(eval)
if score != 100 {
t.Errorf("expected 100, got %d", score)
}
}
func TestSafetyScoreWithRestrictions(t *testing.T) {
zm := make(map[string]ZoneInfo)
for _, dim := range allDimensions {
zm[dim] = ZoneInfo{Zone: ZoneSafe}
}
// Mark 3 as restricted
zm["automation_level"] = ZoneInfo{Zone: ZoneRestricted}
zm["human_in_loop"] = ZoneInfo{Zone: ZoneRestricted}
zm["logging_required"] = ZoneInfo{Zone: ZoneForbidden}
eval := &EvaluationResult{ZoneMap: zm}
score := ComputeSafetyScore(eval)
safe := len(allDimensions) - 3
expected := (safe * 100) / len(allDimensions)
if score != expected {
t.Errorf("expected %d, got %d", expected, score)
}
}
func TestSafetyScoreNil(t *testing.T) {
if s := ComputeSafetyScore(nil); s != 0 {
t.Errorf("expected 0 for nil, got %d", s)
}
}
func TestUtilityScoreNoChanges(t *testing.T) {
config := &DimensionConfig{AutomationLevel: AutoFull}
score := ComputeUtilityScore(config, config)
if score != 100 {
t.Errorf("expected 100 for identical configs, got %d", score)
}
}
func TestUtilityScoreWithChanges(t *testing.T) {
original := &DimensionConfig{
AutomationLevel: AutoFull,
HumanInLoop: HILNone,
}
variant := &DimensionConfig{
AutomationLevel: AutoAssistive,
HumanInLoop: HILRequired,
}
score := ComputeUtilityScore(original, variant)
if score >= 100 {
t.Errorf("expected < 100 with changes, got %d", score)
}
if score <= 0 {
t.Errorf("expected > 0 for moderate changes, got %d", score)
}
}
func TestUtilityScoreNil(t *testing.T) {
if s := ComputeUtilityScore(nil, nil); s != 0 {
t.Errorf("expected 0 for nil, got %d", s)
}
}
func TestCompositeScore(t *testing.T) {
score := ComputeCompositeScore(80, 60, DefaultWeights)
expected := 0.4*80.0 + 0.6*60.0 // 32 + 36 = 68
if score != expected {
t.Errorf("expected %.1f, got %.1f", expected, score)
}
}
func TestCompositeScoreCustomWeights(t *testing.T) {
score := ComputeCompositeScore(100, 0, ScoreWeights{Safety: 1.0, Utility: 0.0})
if score != 100.0 {
t.Errorf("expected 100, got %.1f", score)
}
}

View File

@@ -0,0 +1,144 @@
package maximizer
import (
"context"
"fmt"
"github.com/breakpilot/ai-compliance-sdk/internal/ucca"
"github.com/google/uuid"
)
// Service contains the business logic for the Compliance Maximizer.
type Service struct {
store *Store
evaluator *Evaluator
optimizer *Optimizer
uccaStore *ucca.Store
rules *ConstraintRuleSet
}
// NewService creates a maximizer service.
func NewService(store *Store, uccaStore *ucca.Store, rules *ConstraintRuleSet) *Service {
eval := NewEvaluator(rules)
opt := NewOptimizer(eval)
return &Service{
store: store,
evaluator: eval,
optimizer: opt,
uccaStore: uccaStore,
rules: rules,
}
}
// OptimizeInput is the request to optimize a dimension config.
type OptimizeInput struct {
Config DimensionConfig `json:"config"`
Title string `json:"title"`
TenantID uuid.UUID `json:"-"`
UserID uuid.UUID `json:"-"`
}
// OptimizeFromIntakeInput wraps a UCCA intake for optimization.
type OptimizeFromIntakeInput struct {
Intake ucca.UseCaseIntake `json:"intake"`
Title string `json:"title"`
TenantID uuid.UUID `json:"-"`
UserID uuid.UUID `json:"-"`
}
// Optimize evaluates and optimizes a dimension config.
func (s *Service) Optimize(ctx context.Context, in *OptimizeInput) (*Optimization, error) {
result := s.optimizer.Optimize(&in.Config)
o := &Optimization{
TenantID: in.TenantID,
Title: in.Title,
InputConfig: in.Config,
IsCompliant: result.OriginalCompliant,
OriginalEvaluation: *result.OriginalEval,
Variants: result.Variants,
ZoneMap: result.OriginalEval.ZoneMap,
ConstraintVersion: s.rules.Version,
CreatedBy: in.UserID,
}
if result.MaxSafeConfig != nil {
o.MaxSafeConfig = result.MaxSafeConfig
}
if err := s.store.CreateOptimization(ctx, o); err != nil {
return nil, fmt.Errorf("optimize: %w", err)
}
return o, nil
}
// OptimizeFromIntake maps a UCCA intake to dimensions and optimizes.
func (s *Service) OptimizeFromIntake(ctx context.Context, in *OptimizeFromIntakeInput) (*Optimization, error) {
config := MapIntakeToDimensions(&in.Intake)
return s.Optimize(ctx, &OptimizeInput{
Config: *config,
Title: in.Title,
TenantID: in.TenantID,
UserID: in.UserID,
})
}
// OptimizeFromAssessment loads an existing UCCA assessment and optimizes it.
func (s *Service) OptimizeFromAssessment(ctx context.Context, assessmentID, tenantID, userID uuid.UUID) (*Optimization, error) {
assessment, err := s.uccaStore.GetAssessment(ctx, assessmentID)
if err != nil {
return nil, fmt.Errorf("load assessment %s: %w", assessmentID, err)
}
config := MapIntakeToDimensions(&assessment.Intake)
result := s.optimizer.Optimize(config)
o := &Optimization{
TenantID: tenantID,
AssessmentID: &assessmentID,
Title: assessment.Title,
InputConfig: *config,
IsCompliant: result.OriginalCompliant,
OriginalEvaluation: *result.OriginalEval,
Variants: result.Variants,
ZoneMap: result.OriginalEval.ZoneMap,
ConstraintVersion: s.rules.Version,
CreatedBy: userID,
}
if result.MaxSafeConfig != nil {
o.MaxSafeConfig = result.MaxSafeConfig
}
if err := s.store.CreateOptimization(ctx, o); err != nil {
return nil, fmt.Errorf("optimize from assessment: %w", err)
}
return o, nil
}
// Evaluate only evaluates without persisting (3-zone analysis).
func (s *Service) Evaluate(config *DimensionConfig) *EvaluationResult {
return s.evaluator.Evaluate(config)
}
// GetOptimization retrieves a stored optimization.
func (s *Service) GetOptimization(ctx context.Context, id uuid.UUID) (*Optimization, error) {
return s.store.GetOptimization(ctx, id)
}
// ListOptimizations returns optimizations for a tenant.
func (s *Service) ListOptimizations(ctx context.Context, tenantID uuid.UUID, f *OptimizationFilters) ([]Optimization, int, error) {
return s.store.ListOptimizations(ctx, tenantID, f)
}
// DeleteOptimization removes an optimization.
func (s *Service) DeleteOptimization(ctx context.Context, id uuid.UUID) error {
return s.store.DeleteOptimization(ctx, id)
}
// GetDimensionSchema returns the dimension schema for the frontend.
func (s *Service) GetDimensionSchema() map[string][]string {
return AllValues
}
// GetConstraintRules returns the loaded rules for transparency.
func (s *Service) GetConstraintRules() *ConstraintRuleSet {
return s.rules
}

View File

@@ -0,0 +1,209 @@
package maximizer
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
)
// Optimization is the DB entity for a maximizer optimization result.
type Optimization struct {
ID uuid.UUID `json:"id"`
TenantID uuid.UUID `json:"tenant_id"`
AssessmentID *uuid.UUID `json:"assessment_id,omitempty"`
Title string `json:"title"`
Status string `json:"status"`
InputConfig DimensionConfig `json:"input_config"`
IsCompliant bool `json:"is_compliant"`
OriginalEvaluation EvaluationResult `json:"original_evaluation"`
MaxSafeConfig *OptimizedVariant `json:"max_safe_config,omitempty"`
Variants []OptimizedVariant `json:"variants"`
ZoneMap map[string]ZoneInfo `json:"zone_map"`
ConstraintVersion string `json:"constraint_version"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
CreatedBy uuid.UUID `json:"created_by"`
}
// Store handles maximizer data persistence.
type Store struct {
pool *pgxpool.Pool
}
// NewStore creates a new maximizer store.
func NewStore(pool *pgxpool.Pool) *Store {
return &Store{pool: pool}
}
// CreateOptimization persists a new optimization result.
func (s *Store) CreateOptimization(ctx context.Context, o *Optimization) error {
o.ID = uuid.New()
o.CreatedAt = time.Now().UTC()
o.UpdatedAt = o.CreatedAt
if o.Status == "" {
o.Status = "completed"
}
if o.ConstraintVersion == "" {
o.ConstraintVersion = "1.0.0"
}
inputConfig, _ := json.Marshal(o.InputConfig)
originalEval, _ := json.Marshal(o.OriginalEvaluation)
maxSafe, _ := json.Marshal(o.MaxSafeConfig)
variants, _ := json.Marshal(o.Variants)
zoneMap, _ := json.Marshal(o.ZoneMap)
_, err := s.pool.Exec(ctx, `
INSERT INTO maximizer_optimizations (
id, tenant_id, assessment_id, title, status,
input_config, is_compliant, original_evaluation,
max_safe_config, variants, zone_map,
constraint_version, created_at, updated_at, created_by
) VALUES (
$1, $2, $3, $4, $5,
$6, $7, $8,
$9, $10, $11,
$12, $13, $14, $15
)`,
o.ID, o.TenantID, o.AssessmentID, o.Title, o.Status,
inputConfig, o.IsCompliant, originalEval,
maxSafe, variants, zoneMap,
o.ConstraintVersion, o.CreatedAt, o.UpdatedAt, o.CreatedBy,
)
if err != nil {
return fmt.Errorf("create optimization: %w", err)
}
return nil
}
// GetOptimization retrieves a single optimization by ID.
func (s *Store) GetOptimization(ctx context.Context, id uuid.UUID) (*Optimization, error) {
row := s.pool.QueryRow(ctx, `
SELECT id, tenant_id, assessment_id, title, status,
input_config, is_compliant, original_evaluation,
max_safe_config, variants, zone_map,
constraint_version, created_at, updated_at, created_by
FROM maximizer_optimizations WHERE id = $1`, id)
return s.scanOptimization(row)
}
// OptimizationFilters for list queries.
type OptimizationFilters struct {
IsCompliant *bool
Search string
Limit int
Offset int
}
// ListOptimizations returns optimizations for a tenant.
func (s *Store) ListOptimizations(ctx context.Context, tenantID uuid.UUID, f *OptimizationFilters) ([]Optimization, int, error) {
if f == nil {
f = &OptimizationFilters{}
}
if f.Limit <= 0 {
f.Limit = 20
}
where := "WHERE tenant_id = $1"
args := []interface{}{tenantID}
idx := 2
if f.IsCompliant != nil {
where += fmt.Sprintf(" AND is_compliant = $%d", idx)
args = append(args, *f.IsCompliant)
idx++
}
if f.Search != "" {
where += fmt.Sprintf(" AND title ILIKE $%d", idx)
args = append(args, "%"+f.Search+"%")
idx++
}
// Count
var total int
countQuery := "SELECT COUNT(*) FROM maximizer_optimizations " + where
if err := s.pool.QueryRow(ctx, countQuery, args...).Scan(&total); err != nil {
return nil, 0, fmt.Errorf("count optimizations: %w", err)
}
// Fetch
query := fmt.Sprintf(`
SELECT id, tenant_id, assessment_id, title, status,
input_config, is_compliant, original_evaluation,
max_safe_config, variants, zone_map,
constraint_version, created_at, updated_at, created_by
FROM maximizer_optimizations %s
ORDER BY created_at DESC
LIMIT $%d OFFSET $%d`, where, idx, idx+1)
args = append(args, f.Limit, f.Offset)
rows, err := s.pool.Query(ctx, query, args...)
if err != nil {
return nil, 0, fmt.Errorf("list optimizations: %w", err)
}
defer rows.Close()
var results []Optimization
for rows.Next() {
o, err := s.scanOptimizationRows(rows)
if err != nil {
return nil, 0, err
}
results = append(results, *o)
}
return results, total, nil
}
// DeleteOptimization removes an optimization.
func (s *Store) DeleteOptimization(ctx context.Context, id uuid.UUID) error {
_, err := s.pool.Exec(ctx, `DELETE FROM maximizer_optimizations WHERE id = $1`, id)
if err != nil {
return fmt.Errorf("delete optimization: %w", err)
}
return nil
}
func (s *Store) scanOptimization(row pgx.Row) (*Optimization, error) {
var o Optimization
var inputConfig, originalEval, maxSafe, variants, zoneMap []byte
err := row.Scan(
&o.ID, &o.TenantID, &o.AssessmentID, &o.Title, &o.Status,
&inputConfig, &o.IsCompliant, &originalEval,
&maxSafe, &variants, &zoneMap,
&o.ConstraintVersion, &o.CreatedAt, &o.UpdatedAt, &o.CreatedBy,
)
if err != nil {
return nil, fmt.Errorf("scan optimization: %w", err)
}
json.Unmarshal(inputConfig, &o.InputConfig)
json.Unmarshal(originalEval, &o.OriginalEvaluation)
json.Unmarshal(maxSafe, &o.MaxSafeConfig)
json.Unmarshal(variants, &o.Variants)
json.Unmarshal(zoneMap, &o.ZoneMap)
return &o, nil
}
func (s *Store) scanOptimizationRows(rows pgx.Rows) (*Optimization, error) {
var o Optimization
var inputConfig, originalEval, maxSafe, variants, zoneMap []byte
err := rows.Scan(
&o.ID, &o.TenantID, &o.AssessmentID, &o.Title, &o.Status,
&inputConfig, &o.IsCompliant, &originalEval,
&maxSafe, &variants, &zoneMap,
&o.ConstraintVersion, &o.CreatedAt, &o.UpdatedAt, &o.CreatedBy,
)
if err != nil {
return nil, fmt.Errorf("scan optimization row: %w", err)
}
json.Unmarshal(inputConfig, &o.InputConfig)
json.Unmarshal(originalEval, &o.OriginalEvaluation)
json.Unmarshal(maxSafe, &o.MaxSafeConfig)
json.Unmarshal(variants, &o.Variants)
json.Unmarshal(zoneMap, &o.ZoneMap)
return &o, nil
}