Implement full evidence integrity pipeline to prevent compliance theater: - Confidence levels (E0-E4), truth status tracking, assertion engine - Four-Eyes approval workflow, audit trail, reject endpoint - Evidence distribution dashboard, LLM audit routes - Traceability matrix (backend endpoint + Compliance Hub UI tab) - Anti-fake badges, control status machine, normative patterns - 2 migrations, 4 test suites, MkDocs documentation Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
163 lines
5.1 KiB
Python
163 lines
5.1 KiB
Python
"""
|
|
FastAPI routes for LLM Generation Audit Trail.
|
|
|
|
Endpoints:
|
|
- POST /llm-audit: Record an LLM generation event
|
|
- GET /llm-audit: List audit records with filters
|
|
"""
|
|
|
|
import logging
|
|
import uuid as uuid_module
|
|
from datetime import datetime
|
|
from typing import Optional
|
|
|
|
from fastapi import APIRouter, Depends, Query
|
|
from pydantic import BaseModel
|
|
from sqlalchemy.orm import Session
|
|
|
|
from classroom_engine.database import get_db
|
|
from ..db.models import LLMGenerationAuditDB
|
|
|
|
logger = logging.getLogger(__name__)
|
|
router = APIRouter(tags=["compliance-llm-audit"])
|
|
|
|
|
|
# ============================================================================
|
|
# Schemas
|
|
# ============================================================================
|
|
|
|
class LLMAuditCreate(BaseModel):
|
|
entity_type: str
|
|
entity_id: Optional[str] = None
|
|
generation_mode: str
|
|
truth_status: str = "generated"
|
|
may_be_used_as_evidence: bool = False
|
|
llm_model: Optional[str] = None
|
|
llm_provider: Optional[str] = None
|
|
prompt_hash: Optional[str] = None
|
|
input_summary: Optional[str] = None
|
|
output_summary: Optional[str] = None
|
|
metadata: Optional[dict] = None
|
|
tenant_id: Optional[str] = None
|
|
|
|
|
|
class LLMAuditResponse(BaseModel):
|
|
id: str
|
|
tenant_id: Optional[str] = None
|
|
entity_type: str
|
|
entity_id: Optional[str] = None
|
|
generation_mode: str
|
|
truth_status: str
|
|
may_be_used_as_evidence: bool
|
|
llm_model: Optional[str] = None
|
|
llm_provider: Optional[str] = None
|
|
prompt_hash: Optional[str] = None
|
|
input_summary: Optional[str] = None
|
|
output_summary: Optional[str] = None
|
|
metadata: Optional[dict] = None
|
|
created_at: datetime
|
|
|
|
class Config:
|
|
from_attributes = True
|
|
|
|
|
|
# ============================================================================
|
|
# Routes
|
|
# ============================================================================
|
|
|
|
@router.post("/llm-audit", response_model=LLMAuditResponse)
|
|
async def create_llm_audit(
|
|
data: LLMAuditCreate,
|
|
db: Session = Depends(get_db),
|
|
):
|
|
"""Record an LLM generation event for audit trail."""
|
|
from ..db.models import EvidenceTruthStatusEnum
|
|
|
|
# Validate truth_status
|
|
try:
|
|
truth_enum = EvidenceTruthStatusEnum(data.truth_status)
|
|
except ValueError:
|
|
truth_enum = EvidenceTruthStatusEnum.GENERATED
|
|
|
|
record = LLMGenerationAuditDB(
|
|
id=str(uuid_module.uuid4()),
|
|
tenant_id=data.tenant_id,
|
|
entity_type=data.entity_type,
|
|
entity_id=data.entity_id,
|
|
generation_mode=data.generation_mode,
|
|
truth_status=truth_enum,
|
|
may_be_used_as_evidence=data.may_be_used_as_evidence,
|
|
llm_model=data.llm_model,
|
|
llm_provider=data.llm_provider,
|
|
prompt_hash=data.prompt_hash,
|
|
input_summary=data.input_summary[:500] if data.input_summary else None,
|
|
output_summary=data.output_summary[:500] if data.output_summary else None,
|
|
extra_metadata=data.metadata or {},
|
|
)
|
|
db.add(record)
|
|
db.commit()
|
|
db.refresh(record)
|
|
|
|
return LLMAuditResponse(
|
|
id=record.id,
|
|
tenant_id=record.tenant_id,
|
|
entity_type=record.entity_type,
|
|
entity_id=record.entity_id,
|
|
generation_mode=record.generation_mode,
|
|
truth_status=record.truth_status.value if record.truth_status else "generated",
|
|
may_be_used_as_evidence=record.may_be_used_as_evidence,
|
|
llm_model=record.llm_model,
|
|
llm_provider=record.llm_provider,
|
|
prompt_hash=record.prompt_hash,
|
|
input_summary=record.input_summary,
|
|
output_summary=record.output_summary,
|
|
metadata=record.extra_metadata,
|
|
created_at=record.created_at,
|
|
)
|
|
|
|
|
|
@router.get("/llm-audit")
|
|
async def list_llm_audit(
|
|
entity_type: Optional[str] = Query(None),
|
|
entity_id: Optional[str] = Query(None),
|
|
page: int = Query(1, ge=1),
|
|
limit: int = Query(50, ge=1, le=200),
|
|
db: Session = Depends(get_db),
|
|
):
|
|
"""List LLM generation audit records with optional filters."""
|
|
query = db.query(LLMGenerationAuditDB)
|
|
|
|
if entity_type:
|
|
query = query.filter(LLMGenerationAuditDB.entity_type == entity_type)
|
|
if entity_id:
|
|
query = query.filter(LLMGenerationAuditDB.entity_id == entity_id)
|
|
|
|
total = query.count()
|
|
offset = (page - 1) * limit
|
|
records = query.order_by(LLMGenerationAuditDB.created_at.desc()).offset(offset).limit(limit).all()
|
|
|
|
return {
|
|
"records": [
|
|
LLMAuditResponse(
|
|
id=r.id,
|
|
tenant_id=r.tenant_id,
|
|
entity_type=r.entity_type,
|
|
entity_id=r.entity_id,
|
|
generation_mode=r.generation_mode,
|
|
truth_status=r.truth_status.value if r.truth_status else "generated",
|
|
may_be_used_as_evidence=r.may_be_used_as_evidence,
|
|
llm_model=r.llm_model,
|
|
llm_provider=r.llm_provider,
|
|
prompt_hash=r.prompt_hash,
|
|
input_summary=r.input_summary,
|
|
output_summary=r.output_summary,
|
|
metadata=r.extra_metadata,
|
|
created_at=r.created_at,
|
|
)
|
|
for r in records
|
|
],
|
|
"total": total,
|
|
"page": page,
|
|
"limit": limit,
|
|
}
|