feat: website scanner with SOLL/IST service comparison + corrections

- website_scanner.py: multi-page crawl, 20+ service patterns (tracking,
  CDN, chatbots, payment, fonts, captcha, video), AI text detection
- dse_service_extractor.py: LLM extracts services from privacy policy text
- agent_scan_routes.py: POST /agent/scan — combines scan + DSE comparison,
  generates findings (undocumented, outdated, third-country transfer),
  auto-corrections via Qwen in pre-launch mode

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-04-28 15:35:31 +02:00
parent d0dc284cd5
commit 711b9b3146
4 changed files with 679 additions and 0 deletions

View File

@@ -0,0 +1,302 @@
"""
Agent Website Scan Routes — deep scan endpoint that performs multi-page
website analysis with SOLL/IST service comparison.
POST /api/compliance/agent/scan
"""
import logging
import os
from datetime import datetime, timezone
import httpx
from fastapi import APIRouter
from pydantic import BaseModel
from compliance.services.website_scanner import scan_website, DetectedService
from compliance.services.dse_service_extractor import extract_dse_services, compare_services
from compliance.services.smtp_sender import send_email
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/compliance/agent", tags=["agent"])
SDK_URL = os.environ.get("AI_SDK_URL", "http://bp-compliance-ai-sdk:8090")
TENANT_ID = "9282a473-5c95-4b3a-bf78-0ecc0ec71d3e"
USER_ID = "00000000-0000-0000-0000-000000000001"
SDK_HEADERS = {
"Content-Type": "application/json",
"X-Tenant-ID": TENANT_ID,
"X-User-ID": USER_ID,
}
class ScanRequest(BaseModel):
url: str
mode: str = "post_launch"
recipient: str = "dsb@breakpilot.local"
class ServiceInfo(BaseModel):
name: str
category: str
provider: str
country: str
eu_adequate: bool
requires_consent: bool
legal_ref: str
in_dse: bool
status: str # "ok", "undocumented", "outdated"
class ScanFinding(BaseModel):
code: str
severity: str
text: str
correction: str = ""
class ScanResponse(BaseModel):
url: str
pages_scanned: int
services: list[ServiceInfo]
findings: list[ScanFinding]
ai_detected: bool
chatbot_detected: bool
chatbot_provider: str
missing_pages: dict
summary: str
email_status: str
scanned_at: str
@router.post("/scan", response_model=ScanResponse)
async def scan_website_endpoint(req: ScanRequest):
"""Deep website scan: multi-page crawl + SOLL/IST service comparison."""
is_live = req.mode == "post_launch"
# Step 1: Scan website (5-10 pages)
scan = await scan_website(req.url)
logger.info("Scanned %d pages, found %d services", len(scan.pages_scanned), len(scan.detected_services))
# Step 2: Fetch privacy policy text for SOLL extraction
dse_text = await _fetch_dse_text(req.url, scan.pages_scanned)
# Step 3: Extract services mentioned in DSE via LLM
dse_services = await extract_dse_services(dse_text) if dse_text else []
logger.info("DSE mentions %d services", len(dse_services))
# Step 4: SOLL/IST comparison
detected_dicts = [_service_to_dict(s) for s in scan.detected_services]
comparison = compare_services(detected_dicts, dse_services)
# Step 5: Generate findings
services_info, findings = _build_findings(comparison, scan, is_live)
# Step 6: Generate corrections for pre-launch mode
if not is_live and findings:
await _add_corrections(findings, dse_text)
# Step 7: Build summary
summary = _build_scan_summary(req.url, scan, comparison, findings, is_live)
# Step 8: Send notification
mode_label = "INTERNE PRUEFUNG" if not is_live else "LIVE-WEBSITE"
email_result = send_email(
recipient=req.recipient,
subject=f"[{mode_label}] Website-Scan: {req.url[:50]}",
body_html=f"<pre>{summary}</pre>",
)
return ScanResponse(
url=req.url,
pages_scanned=len(scan.pages_scanned),
services=services_info,
findings=findings,
ai_detected=len(scan.ai_mentions) > 0,
chatbot_detected=scan.chatbot_detected,
chatbot_provider=scan.chatbot_provider,
missing_pages=scan.missing_pages,
summary=summary,
email_status=email_result.get("status", "failed"),
scanned_at=datetime.now(timezone.utc).isoformat(),
)
async def _fetch_dse_text(url: str, scanned_pages: list[str]) -> str:
"""Find and fetch the privacy policy page text."""
import re
# Find DSE URL from scanned pages
dse_url = None
for page in scanned_pages:
if re.search(r"datenschutz|privacy|dsgvo", page, re.IGNORECASE):
dse_url = page
break
if not dse_url:
dse_url = url # Fallback to provided URL
try:
async with httpx.AsyncClient(timeout=15.0, follow_redirects=True) as client:
resp = await client.get(dse_url, headers={"User-Agent": "BreakPilot-Compliance-Agent/1.0"})
html = resp.text
clean = re.sub(r"<(script|style)[^>]*>.*?</\1>", "", html, flags=re.DOTALL | re.IGNORECASE)
clean = re.sub(r"<[^>]+>", " ", clean)
clean = re.sub(r"\s+", " ", clean).strip()
return clean[:4000]
except Exception:
return ""
def _service_to_dict(svc: DetectedService) -> dict:
return {
"id": svc.id, "name": svc.name, "category": svc.category,
"provider": svc.provider, "country": svc.country,
"eu_adequate": svc.eu_adequate, "requires_consent": svc.requires_consent,
"legal_ref": svc.legal_ref,
}
def _build_findings(
comparison: dict, scan, is_live: bool,
) -> tuple[list[ServiceInfo], list[ScanFinding]]:
"""Build service info list and findings from comparison."""
services = []
findings = []
# Undocumented services (on website, NOT in DSE)
for svc in comparison["undocumented"]:
services.append(ServiceInfo(
name=svc["name"], category=svc.get("category", "other"),
provider=svc.get("provider", ""), country=svc.get("country", ""),
eu_adequate=svc.get("eu_adequate", False),
requires_consent=svc.get("requires_consent", False),
legal_ref=svc.get("legal_ref", ""), in_dse=False, status="undocumented",
))
severity = "HIGH" if is_live else "MEDIUM"
findings.append(ScanFinding(
code=f"DSE-MISSING-{svc['id'].upper()}",
severity=severity,
text=f"{svc['name']} ({svc.get('provider', '')}, {svc.get('country', '')}) "
f"ist auf der Website eingebunden aber NICHT in der Datenschutzerklaerung "
f"dokumentiert (Art. 13 DSGVO).",
))
# Documented services (OK)
for item in comparison["documented"]:
svc = item["detected"]
services.append(ServiceInfo(
name=svc["name"], category=svc.get("category", "other"),
provider=svc.get("provider", ""), country=svc.get("country", ""),
eu_adequate=svc.get("eu_adequate", False),
requires_consent=svc.get("requires_consent", False),
legal_ref=svc.get("legal_ref", ""), in_dse=True, status="ok",
))
# Check third-country transfer
if not svc.get("eu_adequate", False):
findings.append(ScanFinding(
code=f"TRANSFER-{svc['id'].upper()}",
severity="MEDIUM",
text=f"{svc['name']} ({svc.get('country', '')}) — Drittlandtransfer. "
f"Pruefen ob SCCs oder Angemessenheitsbeschluss dokumentiert sind.",
))
# Outdated services (in DSE, NOT on website)
for svc in comparison["outdated"]:
services.append(ServiceInfo(
name=svc["name"], category="other",
provider=svc.get("provider", ""), country=svc.get("country", ""),
eu_adequate=True, requires_consent=False,
legal_ref="", in_dse=True, status="outdated",
))
findings.append(ScanFinding(
code=f"DSE-OUTDATED-{svc['name'].upper().replace(' ', '_')[:20]}",
severity="LOW",
text=f"{svc['name']} in Datenschutzerklaerung erwaehnt aber auf der Website "
f"nicht mehr gefunden. Eintrag bei naechster Aktualisierung entfernen.",
))
# Missing pages (e.g., /impressum returns 404)
for page_url, status_code in scan.missing_pages.items():
if "impressum" in page_url.lower():
findings.append(ScanFinding(
code="MISSING-IMPRESSUM",
severity="HIGH",
text=f"Impressum-Seite gibt HTTP {status_code} zurueck (§5 TMG Verstoss).",
))
return services, findings
async def _add_corrections(findings: list[ScanFinding], dse_text: str) -> None:
"""Add correction suggestions for pre-launch mode via LLM."""
for finding in findings:
if finding.severity in ("HIGH", "MEDIUM") and "MISSING" in finding.code:
service_name = finding.code.replace("DSE-MISSING-", "").replace("_", " ").title()
try:
async with httpx.AsyncClient(timeout=30.0) as client:
resp = await client.post(f"{SDK_URL}/sdk/v1/llm/chat", headers=SDK_HEADERS, json={
"messages": [
{"role": "system", "content": (
"/no_think\n"
"Du bist Datenschutzexperte. Erstelle einen einbaufertigen "
"Textbaustein fuer eine deutsche Datenschutzerklaerung fuer "
f"den Dienst '{service_name}'. Enthalte: Ueberschrift, "
"Anbietername, Zweck, Rechtsgrundlage nach DSGVO, "
"Drittlandtransfer-Hinweis wenn noetig, "
"Widerspruchsmoeglichkeit. Max 150 Woerter."
)},
{"role": "user", "content": f"Erstelle DSE-Textbaustein fuer: {service_name}"},
],
})
data = resp.json()
import re
raw = (
data.get("response", "")
or (data.get("message", {}) or {}).get("content", "")
or ""
).strip()
raw = re.sub(r"<think>.*?</think>", "", raw, flags=re.DOTALL).strip()
if raw:
finding.correction = raw
except Exception as e:
logger.warning("Correction generation failed for %s: %s", service_name, e)
def _build_scan_summary(
url: str, scan, comparison: dict, findings: list[ScanFinding], is_live: bool,
) -> str:
"""Build German scan summary."""
mode = "PRUEFUNG LIVE-WEBSITE" if is_live else "INTERNE PRUEFUNG"
n_undoc = len(comparison["undocumented"])
n_ok = len(comparison["documented"])
n_outdated = len(comparison["outdated"])
n_findings = len(findings)
high = sum(1 for f in findings if f.severity == "HIGH")
parts = [
f"{mode} — Website-Scan",
f"URL: {url}",
f"Seiten gescannt: {len(scan.pages_scanned)}",
"",
f"Dienstleister-Abgleich (DSE vs. Website):",
f" Korrekt dokumentiert: {n_ok}",
f" NICHT in DSE (Verstoss): {n_undoc}",
f" Veraltet in DSE: {n_outdated}",
"",
f"Findings: {n_findings} ({high} mit hoher Prioritaet)",
]
if findings:
parts.append("")
for f in findings[:10]:
marker = "!!" if f.severity == "HIGH" else "!" if f.severity == "MEDIUM" else "i"
parts.append(f" [{marker}] {f.text}")
if is_live and high > 0:
parts.extend([
"",
"ACHTUNG: Verstoesse auf einer bereits veroeffentlichten Website. "
"Sofortige Korrektur empfohlen.",
])
return "\n".join(parts)