02ff96f74e
Build + Deploy / build-admin-compliance (push) Successful in 2m7s
Build + Deploy / build-backend-compliance (push) Failing after 5m21s
Build + Deploy / build-ai-sdk (push) Successful in 53s
Build + Deploy / build-developer-portal (push) Successful in 1m18s
Build + Deploy / build-tts (push) Successful in 1m42s
Build + Deploy / build-document-crawler (push) Successful in 45s
Build + Deploy / build-dsms-gateway (push) Successful in 27s
Build + Deploy / build-dsms-node (push) Successful in 19s
CI / branch-name (push) Has been skipped
Build + Deploy / trigger-orca (push) Has been skipped
CI / guardrail-integrity (push) Has been skipped
CI / loc-budget (push) Failing after 19s
CI / secret-scan (push) Has been skipped
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / nodejs-build (push) Successful in 3m6s
CI / dep-audit (push) Has been skipped
CI / sbom-scan (push) Has been skipped
CI / test-go (push) Successful in 55s
CI / test-python-backend (push) Successful in 44s
CI / test-python-document-crawler (push) Successful in 30s
CI / test-python-dsms-gateway (push) Successful in 26s
CI / validate-canonical-controls (push) Successful in 18s
9 files had conflict markers from the branch merge. All resolved keeping the feature branch version. Also split agent_scan_routes.py (534→367 LOC) by extracting Pydantic models to agent_scan_models.py. [guardrail-change] Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
368 lines
16 KiB
Python
368 lines
16 KiB
Python
"""
|
|
Agent Website Scan Routes — deep scan endpoint that performs multi-page
|
|
website analysis with SOLL/IST service comparison.
|
|
|
|
POST /api/compliance/agent/scan
|
|
"""
|
|
|
|
import logging
|
|
import os
|
|
import re
|
|
from datetime import datetime, timezone
|
|
|
|
import httpx
|
|
from fastapi import APIRouter
|
|
|
|
from compliance.services.website_scanner import scan_website, DetectedService
|
|
from compliance.services.dse_service_extractor import extract_dse_services, compare_services
|
|
from compliance.services.smtp_sender import send_email
|
|
from compliance.services.dse_parser import parse_dse
|
|
from compliance.services.dse_matcher import build_text_references, TextReference
|
|
from compliance.services.mandatory_content_checker import (
|
|
check_mandatory_documents, check_dse_mandatory_content, MandatoryFinding,
|
|
)
|
|
from compliance.services.legal_basis_validator import validate_legal_bases
|
|
from compliance.api.agent_scan_helpers import add_corrections, build_scan_summary
|
|
from compliance.api.agent_scan_models import (
|
|
ScanRequest, ServiceInfo, TextReferenceModel, ScanFinding,
|
|
DiscoveredDocument, ScanResponse, ScanStartResponse, ScanStatusResponse,
|
|
)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
router = APIRouter(prefix="/compliance/agent", tags=["agent"])
|
|
|
|
SDK_URL = os.environ.get("AI_SDK_URL", "http://bp-compliance-ai-sdk:8090")
|
|
TENANT_ID = "9282a473-5c95-4b3a-bf78-0ecc0ec71d3e"
|
|
USER_ID = "00000000-0000-0000-0000-000000000001"
|
|
SDK_HEADERS = {
|
|
"Content-Type": "application/json",
|
|
"X-Tenant-ID": TENANT_ID,
|
|
"X-User-ID": USER_ID,
|
|
}
|
|
|
|
|
|
import asyncio
|
|
import uuid as _uuid
|
|
|
|
# In-memory scan job store (survives until container restart)
|
|
_scan_jobs: dict[str, dict] = {}
|
|
|
|
|
|
@router.post("/scan")
|
|
async def scan_website_endpoint(req: ScanRequest):
|
|
"""Start async website scan. Returns scan_id immediately.
|
|
Poll GET /scan/{scan_id} for status and results."""
|
|
scan_id = str(_uuid.uuid4())[:8]
|
|
_scan_jobs[scan_id] = {"status": "running", "progress": "Scan gestartet...", "result": None, "error": ""}
|
|
|
|
# Launch scan in background
|
|
asyncio.create_task(_run_scan(scan_id, req))
|
|
|
|
return ScanStartResponse(scan_id=scan_id, status="running", message="Scan gestartet. Ergebnisse unter GET /scan/{scan_id}")
|
|
|
|
|
|
@router.get("/scan/{scan_id}")
|
|
async def get_scan_status(scan_id: str):
|
|
"""Poll scan status. Returns result when completed."""
|
|
job = _scan_jobs.get(scan_id)
|
|
if not job:
|
|
return {"scan_id": scan_id, "status": "not_found", "error": "Scan-ID nicht gefunden"}
|
|
return ScanStatusResponse(
|
|
scan_id=scan_id,
|
|
status=job["status"],
|
|
progress=job.get("progress", ""),
|
|
result=job.get("result"),
|
|
error=job.get("error", ""),
|
|
)
|
|
|
|
|
|
async def _run_scan(scan_id: str, req: ScanRequest):
|
|
"""Background scan task — updates _scan_jobs with progress."""
|
|
try:
|
|
result = await _execute_scan(req, scan_id)
|
|
_scan_jobs[scan_id]["status"] = "completed"
|
|
_scan_jobs[scan_id]["result"] = result
|
|
_scan_jobs[scan_id]["progress"] = "Fertig"
|
|
except Exception as e:
|
|
logger.error("Scan %s failed: %s", scan_id, e)
|
|
_scan_jobs[scan_id]["status"] = "failed"
|
|
_scan_jobs[scan_id]["error"] = str(e)[:500]
|
|
|
|
|
|
async def _execute_scan(req: ScanRequest, scan_id: str = "") -> ScanResponse:
|
|
"""Execute the full scan pipeline (called as background task)."""
|
|
is_live = req.mode == "post_launch"
|
|
def _progress(msg: str):
|
|
if scan_id and scan_id in _scan_jobs:
|
|
_scan_jobs[scan_id]["progress"] = msg
|
|
|
|
_progress("Schritt 1/7: Website wird gescannt...")
|
|
# Step 1: Scan website — try Playwright first (JS-rendered), fallback to httpx
|
|
playwright_htmls: dict[str, str] = {}
|
|
try:
|
|
async with httpx.AsyncClient(timeout=300.0) as pw_client:
|
|
pw_resp = await pw_client.post(
|
|
"http://bp-compliance-consent-tester:8094/website-scan",
|
|
json={"url": req.url, "max_pages": 50, "click_nav": True},
|
|
)
|
|
if pw_resp.status_code == 200:
|
|
pw_data = pw_resp.json()
|
|
playwright_htmls = pw_data.get("page_htmls", {})
|
|
logger.info("Playwright scan: %d pages, %d scripts",
|
|
pw_data.get("pages_count", 0), len(pw_data.get("external_scripts", [])))
|
|
except Exception as e:
|
|
logger.warning("Playwright scanner unavailable, falling back to httpx: %s", e)
|
|
|
|
# Use Playwright results if available, otherwise fall back to httpx scanner
|
|
if playwright_htmls:
|
|
from compliance.services.website_scanner import ScanResult, _detect_services, _detect_ai_mentions
|
|
scan = ScanResult()
|
|
scan.pages_scanned = list(playwright_htmls.keys())
|
|
for page_url, html in playwright_htmls.items():
|
|
_detect_services(html, page_url, scan)
|
|
_detect_ai_mentions(html, page_url, scan)
|
|
seen = set()
|
|
unique = []
|
|
for svc in scan.detected_services:
|
|
if svc.id not in seen:
|
|
seen.add(svc.id)
|
|
unique.append(svc)
|
|
scan.detected_services = unique
|
|
scan.chatbot_detected = any(s.category == "chatbot" for s in scan.detected_services)
|
|
if scan.chatbot_detected:
|
|
scan.chatbot_provider = next(s.name for s in scan.detected_services if s.category == "chatbot")
|
|
else:
|
|
scan = await scan_website(req.url)
|
|
|
|
logger.info("Scanned %d pages, found %d services", len(scan.pages_scanned), len(scan.detected_services))
|
|
|
|
# Step 1b: DSI Discovery — find all legal documents on the website
|
|
discovered_docs: list[DiscoveredDocument] = []
|
|
dsi_findings: list[ScanFinding] = []
|
|
try:
|
|
async with httpx.AsyncClient(timeout=180.0) as dsi_client:
|
|
dsi_resp = await dsi_client.post(
|
|
"http://bp-compliance-consent-tester:8094/dsi-discovery",
|
|
json={"url": req.url, "max_documents": 20},
|
|
)
|
|
if dsi_resp.status_code == 200:
|
|
dsi_data = dsi_resp.json()
|
|
logger.info("DSI discovery: %d documents found", dsi_data.get("total_found", 0))
|
|
from compliance.services.dsi_document_checker import check_document_completeness, classify_document_type
|
|
for doc in dsi_data.get("documents", []):
|
|
doc_type = classify_document_type(doc["title"], doc["url"])
|
|
doc_findings = check_document_completeness(
|
|
doc.get("text_preview", ""), doc_type, doc["title"], doc["url"],
|
|
)
|
|
score_finding = next((f for f in doc_findings if "SCORE" in f.get("code", "")), None)
|
|
completeness = 0
|
|
if score_finding:
|
|
pct_match = re.search(r"(\d+)%", score_finding.get("text", ""))
|
|
if pct_match:
|
|
completeness = int(pct_match.group(1))
|
|
discovered_docs.append(DiscoveredDocument(
|
|
title=doc["title"], url=doc["url"],
|
|
doc_type=doc_type, language=doc.get("language", ""),
|
|
word_count=doc.get("word_count", 0),
|
|
completeness_pct=completeness,
|
|
findings_count=len([f for f in doc_findings if "SCORE" not in f.get("code", "")]),
|
|
))
|
|
for df in doc_findings:
|
|
if "SCORE" not in df.get("code", ""):
|
|
dsi_findings.append(ScanFinding(code=df["code"], severity=df["severity"], text=df["text"]))
|
|
except Exception as e:
|
|
logger.warning("DSI discovery failed: %s", e)
|
|
|
|
# Step 2: Fetch privacy policy text (from Playwright HTMLs or httpx)
|
|
dse_text = ""
|
|
for page_url, html in playwright_htmls.items():
|
|
if re.search(r"datenschutz|privacy|dsgvo", page_url, re.IGNORECASE):
|
|
clean = re.sub(r"<(script|style)[^>]*>.*?</\1>", "", html, flags=re.DOTALL | re.IGNORECASE)
|
|
clean = re.sub(r"<[^>]+>", " ", clean)
|
|
clean = re.sub(r"\s+", " ", clean).strip()
|
|
dse_text = clean[:4000]
|
|
break
|
|
if not dse_text:
|
|
dse_text = await _fetch_dse_text(req.url, scan.pages_scanned)
|
|
|
|
# Step 3: Extract services mentioned in DSE via LLM + text fallback
|
|
dse_services = await extract_dse_services(dse_text) if dse_text else []
|
|
logger.info("DSE mentions %d services (LLM)", len(dse_services))
|
|
if not dse_services and dse_text:
|
|
dse_lower = dse_text.lower()
|
|
detected_dicts_for_check = [_service_to_dict(s) for s in scan.detected_services]
|
|
for svc in detected_dicts_for_check:
|
|
name = svc.get("name", "").lower()
|
|
if name and len(name) > 3 and name in dse_lower:
|
|
dse_services.append({"name": svc["name"], "purpose": "", "country": svc.get("country", ""), "legal_basis": ""})
|
|
if dse_services:
|
|
logger.info("DSE text fallback found %d services", len(dse_services))
|
|
|
|
# Step 4: Parse DSE into structured sections
|
|
dse_html = ""
|
|
for page_url, html in playwright_htmls.items():
|
|
if re.search(r"datenschutz|privacy|dsgvo", page_url, re.IGNORECASE):
|
|
dse_html = html
|
|
break
|
|
if not dse_html:
|
|
dse_html = await _fetch_dse_html(req.url, scan.pages_scanned)
|
|
dse_sections = parse_dse(dse_html, req.url) if dse_html else []
|
|
|
|
# Step 5-8: Comparison, findings, mandatory checks, legal basis validation
|
|
detected_dicts = [_service_to_dict(s) for s in scan.detected_services]
|
|
comparison = compare_services(detected_dicts, dse_services)
|
|
text_refs = build_text_references(detected_dicts, dse_services, dse_sections, req.url)
|
|
services_info, findings = _build_findings(comparison, scan, is_live, text_refs)
|
|
mandatory_findings = check_mandatory_documents(scan.pages_scanned, scan.missing_pages)
|
|
mandatory_findings += check_dse_mandatory_content(dse_sections, dse_text)
|
|
for mf in mandatory_findings:
|
|
findings.append(ScanFinding(
|
|
code=mf.code, severity=mf.severity,
|
|
text=f"{mf.text}" + (f" — {mf.suggestion}" if mf.suggestion else ""),
|
|
))
|
|
if dse_text:
|
|
for lf in validate_legal_bases(dse_text):
|
|
findings.append(ScanFinding(
|
|
code=f"LIT-{lf.purpose.upper()}", severity=lf.severity, text=lf.text,
|
|
text_reference=TextReferenceModel(
|
|
found=True, source_url=req.url, original_text=lf.original_text,
|
|
issue="incorrect", correction_type="replace",
|
|
correction_text=f"Korrekte Rechtsgrundlage: {lf.correct_basis} ({lf.legal_ref})",
|
|
) if lf.original_text else None,
|
|
))
|
|
findings.extend(dsi_findings)
|
|
if not is_live and findings:
|
|
await add_corrections(findings, dse_text)
|
|
|
|
_progress("Schritt 6/7: Report erstellen...")
|
|
summary = build_scan_summary(req.url, scan, comparison, findings, is_live, discovered_docs)
|
|
|
|
_progress("Schritt 7/7: E-Mail senden...")
|
|
mode_label = "INTERNE PRUEFUNG" if not is_live else "LIVE-WEBSITE"
|
|
email_result = send_email(
|
|
recipient=req.recipient,
|
|
subject=f"[{mode_label}] Website-Scan: {req.url[:50]}",
|
|
body_html=f"<pre>{summary}</pre>",
|
|
)
|
|
|
|
return ScanResponse(
|
|
url=req.url, pages_scanned=len(scan.pages_scanned), pages_list=scan.pages_scanned,
|
|
services=services_info, findings=findings, discovered_documents=discovered_docs,
|
|
ai_detected=len(scan.ai_mentions) > 0, chatbot_detected=scan.chatbot_detected,
|
|
chatbot_provider=scan.chatbot_provider, missing_pages=scan.missing_pages,
|
|
summary=summary, email_status=email_result.get("status", "failed"),
|
|
scanned_at=datetime.now(timezone.utc).isoformat(),
|
|
)
|
|
|
|
|
|
async def _fetch_dse_text(url: str, scanned_pages: list[str]) -> str:
|
|
"""Fetch DSE text from the privacy policy page."""
|
|
dse_url = next((p for p in scanned_pages if re.search(r"datenschutz|privacy|dsgvo", p, re.IGNORECASE)), url)
|
|
try:
|
|
async with httpx.AsyncClient(timeout=15.0, follow_redirects=True) as client:
|
|
resp = await client.get(dse_url, headers={"User-Agent": "BreakPilot-Compliance-Agent/1.0"})
|
|
clean = re.sub(r"<[^>]+>", " ", resp.text)
|
|
return re.sub(r"\s+", " ", clean).strip()[:4000]
|
|
except Exception:
|
|
return ""
|
|
|
|
|
|
async def _fetch_dse_html(url: str, scanned_pages: list[str]) -> str:
|
|
"""Fetch the raw HTML of the privacy policy page."""
|
|
dse_url = next((p for p in scanned_pages if re.search(r"datenschutz|privacy|dsgvo", p, re.IGNORECASE)), url)
|
|
try:
|
|
async with httpx.AsyncClient(timeout=15.0, follow_redirects=True) as client:
|
|
resp = await client.get(dse_url, headers={"User-Agent": "BreakPilot-Compliance-Agent/1.0"})
|
|
return resp.text
|
|
except Exception:
|
|
return ""
|
|
|
|
|
|
def _service_to_dict(svc: DetectedService) -> dict:
|
|
return {
|
|
"id": svc.id, "name": svc.name, "category": svc.category,
|
|
"provider": svc.provider, "country": svc.country,
|
|
"eu_adequate": svc.eu_adequate, "requires_consent": svc.requires_consent,
|
|
"legal_ref": svc.legal_ref,
|
|
}
|
|
|
|
|
|
def _build_findings(
|
|
comparison: dict, scan, is_live: bool, text_refs: dict | None = None,
|
|
) -> tuple[list[ServiceInfo], list[ScanFinding]]:
|
|
"""Build service info list and findings from comparison."""
|
|
services = []
|
|
findings = []
|
|
text_refs = text_refs or {}
|
|
|
|
def _get_ref(svc_id: str) -> TextReferenceModel | None:
|
|
ref = text_refs.get(svc_id)
|
|
if not ref:
|
|
return None
|
|
return TextReferenceModel(
|
|
found=ref.found, source_url=ref.source_url,
|
|
document_type=ref.document_type, section_heading=ref.section_heading,
|
|
section_number=ref.section_number, parent_section=ref.parent_section,
|
|
paragraph_index=ref.paragraph_index, original_text=ref.original_text,
|
|
issue=ref.issue, correction_type=ref.correction_type,
|
|
correction_text=ref.correction_text, insert_after=ref.insert_after,
|
|
)
|
|
|
|
for svc in comparison["undocumented"]:
|
|
services.append(ServiceInfo(
|
|
name=svc["name"], category=svc.get("category", "other"),
|
|
provider=svc.get("provider", ""), country=svc.get("country", ""),
|
|
eu_adequate=svc.get("eu_adequate", False),
|
|
requires_consent=svc.get("requires_consent", False),
|
|
legal_ref=svc.get("legal_ref", ""), in_dse=False, status="undocumented",
|
|
))
|
|
ref = _get_ref(svc.get("id", ""))
|
|
findings.append(ScanFinding(
|
|
code=f"DSE-MISSING-{svc['id'].upper()}",
|
|
severity="HIGH" if is_live else "MEDIUM",
|
|
text=f"{svc['name']} ({svc.get('provider', '')}, {svc.get('country', '')}) "
|
|
f"ist auf der Website eingebunden aber NICHT in der Datenschutzerklaerung "
|
|
f"dokumentiert (Art. 13 DSGVO).",
|
|
text_reference=ref,
|
|
))
|
|
|
|
for item in comparison["documented"]:
|
|
svc = item["detected"]
|
|
services.append(ServiceInfo(
|
|
name=svc["name"], category=svc.get("category", "other"),
|
|
provider=svc.get("provider", ""), country=svc.get("country", ""),
|
|
eu_adequate=svc.get("eu_adequate", False),
|
|
requires_consent=svc.get("requires_consent", False),
|
|
legal_ref=svc.get("legal_ref", ""), in_dse=True, status="ok",
|
|
))
|
|
if not svc.get("eu_adequate", False):
|
|
findings.append(ScanFinding(
|
|
code=f"TRANSFER-{svc['id'].upper()}", severity="MEDIUM",
|
|
text=f"{svc['name']} ({svc.get('country', '')}) — Drittlandtransfer. "
|
|
f"Pruefen ob SCCs oder Angemessenheitsbeschluss dokumentiert sind.",
|
|
))
|
|
|
|
for svc in comparison["outdated"]:
|
|
services.append(ServiceInfo(
|
|
name=svc["name"], category="other",
|
|
provider=svc.get("provider", ""), country=svc.get("country", ""),
|
|
eu_adequate=True, requires_consent=False,
|
|
legal_ref="", in_dse=True, status="outdated",
|
|
))
|
|
findings.append(ScanFinding(
|
|
code=f"DSE-OUTDATED-{svc['name'].upper().replace(' ', '_')[:20]}",
|
|
severity="LOW",
|
|
text=f"{svc['name']} in Datenschutzerklaerung erwaehnt aber auf der Website "
|
|
f"nicht mehr gefunden. Eintrag bei naechster Aktualisierung entfernen.",
|
|
))
|
|
|
|
for page_url, status_code in scan.missing_pages.items():
|
|
if "impressum" in page_url.lower():
|
|
findings.append(ScanFinding(
|
|
code="MISSING-IMPRESSUM", severity="HIGH",
|
|
text=f"Impressum-Seite gibt HTTP {status_code} zurueck (§5 TMG Verstoss).",
|
|
))
|
|
|
|
return services, findings
|