refactor(backend/api): split schemas.py into per-domain modules (1899 -> 39 LOC shim)
Phase 1 Step 3 of PHASE1_RUNBOOK.md. compliance/api/schemas.py is
decomposed into 16 per-domain Pydantic schema modules under
compliance/schemas/:
common.py ( 79) — 6 API enums + PaginationMeta
regulation.py ( 52)
requirement.py ( 80)
control.py (119) — Control + Mapping
evidence.py ( 66)
risk.py ( 79)
ai_system.py ( 63)
dashboard.py (195) — Dashboard, Export, Executive Dashboard
service_module.py (121)
bsi.py ( 58) — BSI + PDF extraction
audit_session.py (172)
report.py ( 53)
isms_governance.py (343) — Scope, Context, Policy, Objective, SoA
isms_audit.py (431) — Finding, CAPA, Review, Internal Audit, Readiness, Trail, ISO27001
vvt.py (168)
tom.py ( 71)
compliance/api/schemas.py becomes a 39-line re-export shim so existing
imports (from compliance.api.schemas import RegulationResponse) keep
working unchanged. New code should import from the domain module
directly (from compliance.schemas.regulation import RegulationResponse).
Deferred-from-sweep: all 28 class Config blocks in the original file
were converted to model_config = ConfigDict(...) during the split.
schemas.py-sourced PydanticDeprecatedSince20 warnings are now gone.
Cross-domain references handled via targeted imports (e.g. dashboard.py
imports EvidenceResponse from evidence, RiskResponse from risk). common
API enums + PaginationMeta are imported by every domain module.
Verified:
- 173/173 pytest compliance/tests/ tests/contracts/ pass
- OpenAPI 360 paths / 484 operations unchanged (contract test green)
- All new files under the 500-line hard cap (largest: isms_audit.py
at 431, isms_governance.py at 343, dashboard.py at 195)
- No file in compliance/schemas/ or compliance/api/schemas.py
exceeds the hard cap
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
195
backend-compliance/compliance/schemas/dashboard.py
Normal file
195
backend-compliance/compliance/schemas/dashboard.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""
|
||||
Dashboard, Export, Executive Dashboard Pydantic schemas — extracted from compliance/api/schemas.py.
|
||||
|
||||
Phase 1 Step 3: the monolithic ``compliance.api.schemas`` module is being
|
||||
split per domain under ``compliance.schemas``. This module is re-exported
|
||||
from ``compliance.api.schemas`` for backwards compatibility.
|
||||
"""
|
||||
|
||||
from datetime import datetime, date
|
||||
from typing import Optional, List, Any, Dict
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from compliance.schemas.common import (
|
||||
PaginationMeta, RegulationType, ControlType, ControlDomain,
|
||||
ControlStatus, RiskLevel, EvidenceStatus,
|
||||
)
|
||||
from compliance.schemas.evidence import EvidenceResponse
|
||||
from compliance.schemas.risk import RiskResponse
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Dashboard & Export Schemas
|
||||
# ============================================================================
|
||||
|
||||
class DashboardResponse(BaseModel):
|
||||
compliance_score: float
|
||||
total_regulations: int
|
||||
total_requirements: int
|
||||
total_controls: int
|
||||
controls_by_status: Dict[str, int]
|
||||
controls_by_domain: Dict[str, Dict[str, int]]
|
||||
total_evidence: int
|
||||
evidence_by_status: Dict[str, int]
|
||||
total_risks: int
|
||||
risks_by_level: Dict[str, int]
|
||||
recent_activity: List[Dict[str, Any]]
|
||||
|
||||
|
||||
class ExportRequest(BaseModel):
|
||||
export_type: str = "full" # "full", "controls_only", "evidence_only"
|
||||
included_regulations: Optional[List[str]] = None
|
||||
included_domains: Optional[List[str]] = None
|
||||
date_range_start: Optional[date] = None
|
||||
date_range_end: Optional[date] = None
|
||||
|
||||
|
||||
class ExportResponse(BaseModel):
|
||||
id: str
|
||||
export_type: str
|
||||
export_name: Optional[str] = None
|
||||
status: str
|
||||
requested_by: str
|
||||
requested_at: datetime
|
||||
completed_at: Optional[datetime] = None
|
||||
file_path: Optional[str] = None
|
||||
file_hash: Optional[str] = None
|
||||
file_size_bytes: Optional[int] = None
|
||||
total_controls: Optional[int] = None
|
||||
total_evidence: Optional[int] = None
|
||||
compliance_score: Optional[float] = None
|
||||
error_message: Optional[str] = None
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
class ExportListResponse(BaseModel):
|
||||
exports: List[ExportResponse]
|
||||
total: int
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Seeding Schemas
|
||||
# ============================================================================
|
||||
|
||||
class SeedRequest(BaseModel):
|
||||
force: bool = False
|
||||
|
||||
|
||||
class SeedResponse(BaseModel):
|
||||
success: bool
|
||||
message: str
|
||||
counts: Dict[str, int]
|
||||
|
||||
|
||||
class PaginatedEvidenceResponse(BaseModel):
|
||||
"""Paginated response for evidence."""
|
||||
data: List[EvidenceResponse]
|
||||
pagination: PaginationMeta
|
||||
|
||||
|
||||
class PaginatedRiskResponse(BaseModel):
|
||||
"""Paginated response for risks."""
|
||||
data: List[RiskResponse]
|
||||
pagination: PaginationMeta
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Executive Dashboard Schemas (Phase 3 - Sprint 1)
|
||||
# ============================================================================
|
||||
|
||||
class TrendDataPoint(BaseModel):
|
||||
"""A single data point for trend charts."""
|
||||
date: str # ISO date string
|
||||
score: float
|
||||
label: Optional[str] = None # Formatted date for display (e.g., "Jan 26")
|
||||
|
||||
|
||||
class RiskSummary(BaseModel):
|
||||
"""Summary of a risk for executive display."""
|
||||
id: str
|
||||
risk_id: str
|
||||
title: str
|
||||
risk_level: str # "low", "medium", "high", "critical"
|
||||
owner: Optional[str] = None
|
||||
status: str
|
||||
category: str
|
||||
impact: int
|
||||
likelihood: int
|
||||
|
||||
|
||||
class DeadlineItem(BaseModel):
|
||||
"""An upcoming deadline for executive display."""
|
||||
id: str
|
||||
title: str
|
||||
deadline: str # ISO date string
|
||||
days_remaining: int
|
||||
type: str # "control_review", "evidence_expiry", "audit"
|
||||
status: str # "on_track", "at_risk", "overdue"
|
||||
owner: Optional[str] = None
|
||||
|
||||
|
||||
class TeamWorkloadItem(BaseModel):
|
||||
"""Workload distribution for a team or person."""
|
||||
name: str
|
||||
pending_tasks: int
|
||||
in_progress_tasks: int
|
||||
completed_tasks: int
|
||||
total_tasks: int
|
||||
completion_rate: float
|
||||
|
||||
|
||||
class ExecutiveDashboardResponse(BaseModel):
|
||||
"""
|
||||
Executive Dashboard Response
|
||||
|
||||
Provides a high-level overview for managers and executives:
|
||||
- Traffic light status (green/yellow/red)
|
||||
- Overall compliance score
|
||||
- 12-month trend data
|
||||
- Top 5 risks
|
||||
- Upcoming deadlines
|
||||
- Team workload distribution
|
||||
"""
|
||||
traffic_light_status: str # "green", "yellow", "red"
|
||||
overall_score: float
|
||||
score_trend: List[TrendDataPoint]
|
||||
previous_score: Optional[float] = None
|
||||
score_change: Optional[float] = None # Positive = improvement
|
||||
|
||||
# Counts
|
||||
total_regulations: int
|
||||
total_requirements: int
|
||||
total_controls: int
|
||||
open_risks: int
|
||||
|
||||
# Top items
|
||||
top_risks: List[RiskSummary]
|
||||
upcoming_deadlines: List[DeadlineItem]
|
||||
|
||||
# Workload
|
||||
team_workload: List[TeamWorkloadItem]
|
||||
|
||||
# Last updated
|
||||
last_updated: str
|
||||
|
||||
|
||||
class ComplianceSnapshotCreate(BaseModel):
|
||||
"""Request to create a compliance snapshot."""
|
||||
notes: Optional[str] = None
|
||||
|
||||
|
||||
class ComplianceSnapshotResponse(BaseModel):
|
||||
"""Response for a compliance snapshot."""
|
||||
id: str
|
||||
snapshot_date: str
|
||||
overall_score: float
|
||||
scores_by_regulation: Dict[str, float]
|
||||
scores_by_domain: Dict[str, float]
|
||||
total_controls: int
|
||||
passed_controls: int
|
||||
failed_controls: int
|
||||
notes: Optional[str] = None
|
||||
created_at: str
|
||||
|
||||
Reference in New Issue
Block a user