Files
breakpilot-compliance/backend-compliance/compliance/api/agent_scan_routes.py
T
Benjamin Admin 7c7513525e feat: Document-centric scan results + DSI deduplication
DSI Dedup (consent-tester):
- Only H1/H2 headings count as documents (not H3/H4 sub-sections)
- Sub-sections (Cookies, Betroffenenrechte, Social Media) are part of
  parent document's full text, not separate documents
- Reduces IHK result from 30 to ~11 real documents

Backend (agent_scan_routes):
- ScanFinding gets doc_title field linking each finding to its document
- doc_title set when creating DSI findings for document attribution

Frontend (ScanResult.tsx):
- 3 sections: Services table, Document cards, General findings
- Documents: expandable cards with completeness bar (green/yellow/red)
- Findings grouped under their parent document
- Each card shows: title, word count, findings count, % completeness
- Findings without doc_title go to "Allgemeine Findings" section

Email Summary (agent_scan_helpers):
- Findings listed under their parent document
- General findings in separate section
- No more flat mixed list

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-05-05 09:56:29 +02:00

498 lines
20 KiB
Python

"""
Agent Website Scan Routes — deep scan endpoint that performs multi-page
website analysis with SOLL/IST service comparison.
POST /api/compliance/agent/scan
"""
import logging
import os
import re
from datetime import datetime, timezone
import httpx
from fastapi import APIRouter
from pydantic import BaseModel
from compliance.services.website_scanner import scan_website, DetectedService
from compliance.services.dse_service_extractor import extract_dse_services, compare_services
from compliance.services.smtp_sender import send_email
from compliance.services.dse_parser import parse_dse
from compliance.services.dse_matcher import build_text_references, TextReference
from compliance.services.mandatory_content_checker import (
check_mandatory_documents, check_dse_mandatory_content, MandatoryFinding,
)
from compliance.services.legal_basis_validator import validate_legal_bases
from compliance.api.agent_scan_helpers import (
add_corrections, build_scan_summary, fetch_dse_text, fetch_dse_html,
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/compliance/agent", tags=["agent"])
SDK_URL = os.environ.get("AI_SDK_URL", "http://bp-compliance-ai-sdk:8090")
TENANT_ID = "9282a473-5c95-4b3a-bf78-0ecc0ec71d3e"
USER_ID = "00000000-0000-0000-0000-000000000001"
SDK_HEADERS = {
"Content-Type": "application/json",
"X-Tenant-ID": TENANT_ID,
"X-User-ID": USER_ID,
}
class ScanRequest(BaseModel):
url: str
mode: str = "post_launch"
recipient: str = "dsb@breakpilot.local"
class ServiceInfo(BaseModel):
name: str
category: str
provider: str
country: str
eu_adequate: bool
requires_consent: bool
legal_ref: str
in_dse: bool
status: str # "ok", "undocumented", "outdated"
class TextReferenceModel(BaseModel):
found: bool = False
source_url: str = ""
document_type: str = "Datenschutzerklaerung"
section_heading: str = ""
section_number: str = ""
parent_section: str = ""
paragraph_index: int = 0
original_text: str = ""
issue: str = ""
correction_type: str = ""
correction_text: str = ""
insert_after: str = ""
class ScanFinding(BaseModel):
code: str
severity: str
text: str
correction: str = ""
doc_title: str = ""
text_reference: TextReferenceModel | None = None
class DiscoveredDocument(BaseModel):
title: str
url: str
doc_type: str
language: str = ""
word_count: int = 0
completeness_pct: int = 0
findings_count: int = 0
class ScanResponse(BaseModel):
url: str
pages_scanned: int
pages_list: list[str] = []
services: list[ServiceInfo]
findings: list[ScanFinding]
discovered_documents: list[DiscoveredDocument] = []
ai_detected: bool
chatbot_detected: bool
chatbot_provider: str
missing_pages: dict
summary: str
email_status: str
scanned_at: str
import asyncio
import uuid as _uuid
# In-memory scan job store (survives until container restart)
_scan_jobs: dict[str, dict] = {}
class ScanStartResponse(BaseModel):
scan_id: str
status: str = "running"
message: str = ""
class ScanStatusResponse(BaseModel):
scan_id: str
status: str # "running", "completed", "failed"
progress: str = ""
result: ScanResponse | None = None
error: str = ""
@router.post("/scan")
async def scan_website_endpoint(req: ScanRequest):
"""Start async website scan. Returns scan_id immediately.
Poll GET /scan/{scan_id} for status and results."""
scan_id = str(_uuid.uuid4())[:8]
_scan_jobs[scan_id] = {"status": "running", "progress": "Scan gestartet...", "result": None, "error": ""}
# Launch scan in background
asyncio.create_task(_run_scan(scan_id, req))
return ScanStartResponse(scan_id=scan_id, status="running", message="Scan gestartet. Ergebnisse unter GET /scan/{scan_id}")
@router.get("/scan/{scan_id}")
async def get_scan_status(scan_id: str):
"""Poll scan status. Returns result when completed."""
job = _scan_jobs.get(scan_id)
if not job:
return {"scan_id": scan_id, "status": "not_found", "error": "Scan-ID nicht gefunden"}
return ScanStatusResponse(
scan_id=scan_id,
status=job["status"],
progress=job.get("progress", ""),
result=job.get("result"),
error=job.get("error", ""),
)
async def _run_scan(scan_id: str, req: ScanRequest):
"""Background scan task — updates _scan_jobs with progress."""
try:
result = await _execute_scan(req, scan_id)
_scan_jobs[scan_id]["status"] = "completed"
_scan_jobs[scan_id]["result"] = result
_scan_jobs[scan_id]["progress"] = "Fertig"
except Exception as e:
logger.error("Scan %s failed: %s", scan_id, e)
_scan_jobs[scan_id]["status"] = "failed"
_scan_jobs[scan_id]["error"] = str(e)[:500]
async def _execute_scan(req: ScanRequest, scan_id: str = "") -> ScanResponse:
"""Execute the full scan pipeline (called as background task)."""
is_live = req.mode == "post_launch"
def _progress(msg: str):
if scan_id and scan_id in _scan_jobs:
_scan_jobs[scan_id]["progress"] = msg
_progress("Schritt 1/7: Website wird gescannt...")
# Step 1: Scan website — try Playwright first (JS-rendered), fallback to httpx
playwright_htmls: dict[str, str] = {}
try:
async with httpx.AsyncClient(timeout=300.0) as pw_client:
pw_resp = await pw_client.post(
"http://bp-compliance-consent-tester:8094/website-scan",
json={"url": req.url, "max_pages": 50, "click_nav": True},
)
if pw_resp.status_code == 200:
pw_data = pw_resp.json()
playwright_htmls = pw_data.get("page_htmls", {})
logger.info("Playwright scan: %d pages, %d scripts",
pw_data.get("pages_count", 0), len(pw_data.get("external_scripts", [])))
except Exception as e:
logger.warning("Playwright scanner unavailable, falling back to httpx: %s", e)
# Use Playwright results if available, otherwise fall back to httpx scanner
if playwright_htmls:
# Build ScanResult from Playwright data
from compliance.services.website_scanner import ScanResult, DetectedService, _detect_services, _detect_ai_mentions
from compliance.services.service_registry import SERVICE_REGISTRY
scan = ScanResult()
scan.pages_scanned = list(playwright_htmls.keys())
for page_url, html in playwright_htmls.items():
_detect_services(html, page_url, scan)
_detect_ai_mentions(html, page_url, scan)
# Deduplicate
seen = set()
unique = []
for svc in scan.detected_services:
if svc.id not in seen:
seen.add(svc.id)
unique.append(svc)
scan.detected_services = unique
scan.chatbot_detected = any(s.category == "chatbot" for s in scan.detected_services)
if scan.chatbot_detected:
scan.chatbot_provider = next(s.name for s in scan.detected_services if s.category == "chatbot")
else:
scan = await scan_website(req.url)
logger.info("Scanned %d pages, found %d services", len(scan.pages_scanned), len(scan.detected_services))
_progress(f"Schritt 2/7: Rechtliche Dokumente suchen... ({len(scan.pages_scanned)} Seiten gescannt)")
# Step 1b: DSI Discovery — find all legal documents on the website
discovered_docs: list[DiscoveredDocument] = []
dsi_findings: list[ScanFinding] = []
try:
async with httpx.AsyncClient(timeout=300.0) as dsi_client:
dsi_resp = await dsi_client.post(
"http://bp-compliance-consent-tester:8094/dsi-discovery",
json={"url": req.url, "max_documents": 30},
)
if dsi_resp.status_code == 200:
dsi_data = dsi_resp.json()
logger.info("DSI discovery: %d documents found", dsi_data.get("total_found", 0))
# Check each document against its legal requirements
from compliance.services.dsi_document_checker import (
check_document_completeness, classify_document_type,
)
for doc in dsi_data.get("documents", []):
doc_type = classify_document_type(doc["title"], doc["url"])
doc_text = doc.get("full_text", "") or doc.get("text_preview", "")
doc_findings = check_document_completeness(
doc_text, doc_type, doc["title"], doc["url"],
)
# Count completeness
score_finding = next((f for f in doc_findings if "SCORE" in f.get("code", "")), None)
completeness = 0
if score_finding:
import re as _re2
pct_match = _re2.search(r"(\d+)%", score_finding.get("text", ""))
if pct_match:
completeness = int(pct_match.group(1))
discovered_docs.append(DiscoveredDocument(
title=doc["title"], url=doc["url"],
doc_type=doc_type, language=doc.get("language", ""),
word_count=doc.get("word_count", 0),
completeness_pct=completeness,
findings_count=len([f for f in doc_findings if "SCORE" not in f.get("code", "")]),
))
for df in doc_findings:
if "SCORE" not in df.get("code", ""):
dsi_findings.append(ScanFinding(
code=df["code"], severity=df["severity"], text=df["text"],
doc_title=doc["title"],
))
except Exception as e:
logger.warning("DSI discovery failed: %s %s", type(e).__name__, e)
_progress(f"Schritt 3/7: Datenschutzerklaerung analysieren... ({len(discovered_docs)} Dokumente gefunden)")
# Step 2: Fetch privacy policy text
# Priority: 1) Playwright HTMLs, 2) DSI Discovery full_text, 3) httpx fallback
dse_text = ""
for page_url, html in playwright_htmls.items():
if re.search(r"datenschutz|privacy|dsgvo", page_url, re.IGNORECASE):
clean = re.sub(r"<(script|style)[^>]*>.*?</\1>", "", html, flags=re.DOTALL | re.IGNORECASE)
clean = re.sub(r"<[^>]+>", " ", clean)
clean = re.sub(r"\s+", " ", clean).strip()
dse_text = clean[:8000]
break
# Fallback: use DSI discovery texts (combined from all DSE documents found)
if not dse_text and discovered_docs:
try:
dsi_data_local = dsi_resp.json() if 'dsi_resp' in dir() else {}
for doc in dsi_data_local.get("documents", []):
if doc.get("doc_type", "") in ("dse", "privacy", "datenschutz") or \
"datenschutz" in doc.get("title", "").lower():
ft = doc.get("full_text", "")
if ft and len(ft) > len(dse_text):
dse_text = ft[:8000]
except Exception:
pass
if not dse_text:
dse_text = await fetch_dse_text(req.url, scan.pages_scanned)
# Step 3: Extract services mentioned in DSE via LLM + text fallback
dse_services = await extract_dse_services(dse_text) if dse_text else []
logger.info("DSE mentions %d services (LLM)", len(dse_services))
# Fallback: if LLM extraction failed, search DSE text directly for service names
if not dse_services and dse_text:
dse_lower = dse_text.lower()
detected_dicts_for_check = [_service_to_dict(s) for s in scan.detected_services]
for svc in detected_dicts_for_check:
name = svc.get("name", "").lower()
# Check if service name appears in DSE text
if name and len(name) > 3 and name in dse_lower:
dse_services.append({"name": svc["name"], "purpose": "", "country": svc.get("country", ""), "legal_basis": ""})
if dse_services:
logger.info("DSE text fallback found %d services", len(dse_services))
# Step 4: Parse DSE into structured sections (prefer Playwright HTML)
dse_html = ""
for page_url, html in playwright_htmls.items():
if re.search(r"datenschutz|privacy|dsgvo", page_url, re.IGNORECASE):
dse_html = html
break
if not dse_html:
dse_html = await fetch_dse_html(req.url, scan.pages_scanned)
dse_sections = parse_dse(dse_html, req.url) if dse_html else []
logger.info("Parsed %d DSE sections", len(dse_sections))
_progress("Schritt 4/7: SOLL/IST Vergleich...")
# Step 5: SOLL/IST comparison
detected_dicts = [_service_to_dict(s) for s in scan.detected_services]
comparison = compare_services(detected_dicts, dse_services)
# Step 6: Build TextReferences for each detected service
text_refs = build_text_references(detected_dicts, dse_services, dse_sections, req.url)
# Step 7: Generate findings with text references
services_info, findings = _build_findings(comparison, scan, is_live, text_refs)
# Step 8: Check mandatory content (documents + DSE sections)
mandatory_findings = check_mandatory_documents(scan.pages_scanned, scan.missing_pages)
mandatory_findings += check_dse_mandatory_content(dse_sections, dse_text)
for mf in mandatory_findings:
findings.append(ScanFinding(
code=mf.code, severity=mf.severity,
text=f"{mf.text}" + (f"{mf.suggestion}" if mf.suggestion else ""),
))
# Step 8b: Validate legal bases (lit. a-f) in DSE
if dse_text:
lit_findings = validate_legal_bases(dse_text)
for lf in lit_findings:
findings.append(ScanFinding(
code=f"LIT-{lf.purpose.upper()}",
severity=lf.severity,
text=lf.text,
text_reference=TextReferenceModel(
found=True, source_url=req.url,
original_text=lf.original_text,
issue="incorrect", correction_type="replace",
correction_text=f"Korrekte Rechtsgrundlage: {lf.correct_basis} ({lf.legal_ref})",
) if lf.original_text else None,
))
# Step 8c: Add DSI document findings
findings.extend(dsi_findings)
_progress(f"Schritt 5/7: Korrekturen generieren... ({len(findings)} Findings)")
# Step 9: Generate corrections for pre-launch mode
if not is_live and findings:
await add_corrections(findings, dse_text)
_progress("Schritt 6/7: Report erstellen...")
# Step 7: Build summary
summary = build_scan_summary(req.url, scan, comparison, findings, is_live, discovered_docs)
_progress("Schritt 7/7: E-Mail senden...")
# Step 8: Send notification
mode_label = "INTERNE PRUEFUNG" if not is_live else "LIVE-WEBSITE"
email_result = send_email(
recipient=req.recipient,
subject=f"[{mode_label}] Website-Scan: {req.url[:50]}",
body_html=f"<pre>{summary}</pre>",
)
return ScanResponse(
url=req.url,
pages_scanned=len(scan.pages_scanned),
pages_list=scan.pages_scanned,
services=services_info,
findings=findings,
discovered_documents=discovered_docs,
ai_detected=len(scan.ai_mentions) > 0,
chatbot_detected=scan.chatbot_detected,
chatbot_provider=scan.chatbot_provider,
missing_pages=scan.missing_pages,
summary=summary,
email_status=email_result.get("status", "failed"),
scanned_at=datetime.now(timezone.utc).isoformat(),
)
def _service_to_dict(svc: DetectedService) -> dict:
return {
"id": svc.id, "name": svc.name, "category": svc.category,
"provider": svc.provider, "country": svc.country,
"eu_adequate": svc.eu_adequate, "requires_consent": svc.requires_consent,
"legal_ref": svc.legal_ref,
}
def _build_findings(
comparison: dict, scan, is_live: bool, text_refs: dict | None = None,
) -> tuple[list[ServiceInfo], list[ScanFinding]]:
"""Build service info list and findings from comparison."""
services = []
findings = []
text_refs = text_refs or {}
def _get_ref(svc_id: str) -> TextReferenceModel | None:
ref = text_refs.get(svc_id)
if not ref:
return None
return TextReferenceModel(
found=ref.found, source_url=ref.source_url,
document_type=ref.document_type, section_heading=ref.section_heading,
section_number=ref.section_number, parent_section=ref.parent_section,
paragraph_index=ref.paragraph_index, original_text=ref.original_text,
issue=ref.issue, correction_type=ref.correction_type,
correction_text=ref.correction_text, insert_after=ref.insert_after,
)
# Undocumented services (on website, NOT in DSE)
for svc in comparison["undocumented"]:
services.append(ServiceInfo(
name=svc["name"], category=svc.get("category", "other"),
provider=svc.get("provider", ""), country=svc.get("country", ""),
eu_adequate=svc.get("eu_adequate", False),
requires_consent=svc.get("requires_consent", False),
legal_ref=svc.get("legal_ref", ""), in_dse=False, status="undocumented",
))
severity = "HIGH" if is_live else "MEDIUM"
ref = _get_ref(svc.get("id", ""))
findings.append(ScanFinding(
code=f"DSE-MISSING-{svc['id'].upper()}",
severity=severity,
text=f"{svc['name']} ({svc.get('provider', '')}, {svc.get('country', '')}) "
f"ist auf der Website eingebunden aber NICHT in der Datenschutzerklaerung "
f"dokumentiert (Art. 13 DSGVO).",
text_reference=ref,
))
# Documented services (OK)
for item in comparison["documented"]:
svc = item["detected"]
services.append(ServiceInfo(
name=svc["name"], category=svc.get("category", "other"),
provider=svc.get("provider", ""), country=svc.get("country", ""),
eu_adequate=svc.get("eu_adequate", False),
requires_consent=svc.get("requires_consent", False),
legal_ref=svc.get("legal_ref", ""), in_dse=True, status="ok",
))
# Check third-country transfer
if not svc.get("eu_adequate", False):
findings.append(ScanFinding(
code=f"TRANSFER-{svc['id'].upper()}",
severity="MEDIUM",
text=f"{svc['name']} ({svc.get('country', '')}) — Drittlandtransfer. "
f"Pruefen ob SCCs oder Angemessenheitsbeschluss dokumentiert sind.",
))
# Outdated services (in DSE, NOT on website)
for svc in comparison["outdated"]:
services.append(ServiceInfo(
name=svc["name"], category="other",
provider=svc.get("provider", ""), country=svc.get("country", ""),
eu_adequate=True, requires_consent=False,
legal_ref="", in_dse=True, status="outdated",
))
findings.append(ScanFinding(
code=f"DSE-OUTDATED-{svc['name'].upper().replace(' ', '_')[:20]}",
severity="LOW",
text=f"{svc['name']} in Datenschutzerklaerung erwaehnt aber auf der Website "
f"nicht mehr gefunden. Eintrag bei naechster Aktualisierung entfernen.",
))
# Missing pages (e.g., /impressum returns 404)
for page_url, status_code in scan.missing_pages.items():
if "impressum" in page_url.lower():
findings.append(ScanFinding(
code="MISSING-IMPRESSUM",
severity="HIGH",
text=f"Impressum-Seite gibt HTTP {status_code} zurueck (§5 TMG Verstoss).",
))
return services, findings