""" Agent Website Scan Routes — deep scan endpoint that performs multi-page website analysis with SOLL/IST service comparison. POST /api/compliance/agent/scan """ import logging import os from datetime import datetime, timezone import httpx from fastapi import APIRouter from pydantic import BaseModel from compliance.services.website_scanner import scan_website, DetectedService from compliance.services.dse_service_extractor import extract_dse_services, compare_services from compliance.services.smtp_sender import send_email from compliance.services.dse_parser import parse_dse from compliance.services.dse_matcher import build_text_references, TextReference from compliance.services.mandatory_content_checker import ( check_mandatory_documents, check_dse_mandatory_content, MandatoryFinding, ) from compliance.services.legal_basis_validator import validate_legal_bases logger = logging.getLogger(__name__) router = APIRouter(prefix="/compliance/agent", tags=["agent"]) SDK_URL = os.environ.get("AI_SDK_URL", "http://bp-compliance-ai-sdk:8090") TENANT_ID = "9282a473-5c95-4b3a-bf78-0ecc0ec71d3e" USER_ID = "00000000-0000-0000-0000-000000000001" SDK_HEADERS = { "Content-Type": "application/json", "X-Tenant-ID": TENANT_ID, "X-User-ID": USER_ID, } class ScanRequest(BaseModel): url: str mode: str = "post_launch" recipient: str = "dsb@breakpilot.local" class ServiceInfo(BaseModel): name: str category: str provider: str country: str eu_adequate: bool requires_consent: bool legal_ref: str in_dse: bool status: str # "ok", "undocumented", "outdated" class TextReferenceModel(BaseModel): found: bool = False source_url: str = "" document_type: str = "Datenschutzerklaerung" section_heading: str = "" section_number: str = "" parent_section: str = "" paragraph_index: int = 0 original_text: str = "" issue: str = "" correction_type: str = "" correction_text: str = "" insert_after: str = "" class ScanFinding(BaseModel): code: str severity: str text: str correction: str = "" text_reference: TextReferenceModel | None = None class ScanResponse(BaseModel): url: str pages_scanned: int pages_list: list[str] = [] services: list[ServiceInfo] findings: list[ScanFinding] ai_detected: bool chatbot_detected: bool chatbot_provider: str missing_pages: dict summary: str email_status: str scanned_at: str @router.post("/scan", response_model=ScanResponse) async def scan_website_endpoint(req: ScanRequest): """Deep website scan: multi-page crawl + SOLL/IST service comparison.""" is_live = req.mode == "post_launch" # Step 1: Scan website — try Playwright first (JS-rendered), fallback to httpx playwright_htmls: dict[str, str] = {} try: async with httpx.AsyncClient(timeout=120.0) as pw_client: pw_resp = await pw_client.post( "http://bp-compliance-consent-tester:8094/website-scan", json={"url": req.url, "max_pages": 15, "click_nav": True}, ) if pw_resp.status_code == 200: pw_data = pw_resp.json() playwright_htmls = pw_data.get("page_htmls", {}) logger.info("Playwright scan: %d pages, %d scripts", pw_data.get("pages_count", 0), len(pw_data.get("external_scripts", []))) except Exception as e: logger.warning("Playwright scanner unavailable, falling back to httpx: %s", e) # Use Playwright results if available, otherwise fall back to httpx scanner if playwright_htmls: # Build ScanResult from Playwright data from compliance.services.website_scanner import ScanResult, DetectedService, _detect_services, _detect_ai_mentions from compliance.services.service_registry import SERVICE_REGISTRY scan = ScanResult() scan.pages_scanned = list(playwright_htmls.keys()) for page_url, html in playwright_htmls.items(): _detect_services(html, page_url, scan) _detect_ai_mentions(html, page_url, scan) # Deduplicate seen = set() unique = [] for svc in scan.detected_services: if svc.id not in seen: seen.add(svc.id) unique.append(svc) scan.detected_services = unique scan.chatbot_detected = any(s.category == "chatbot" for s in scan.detected_services) if scan.chatbot_detected: scan.chatbot_provider = next(s.name for s in scan.detected_services if s.category == "chatbot") else: scan = await scan_website(req.url) logger.info("Scanned %d pages, found %d services", len(scan.pages_scanned), len(scan.detected_services)) # Step 2: Fetch privacy policy text (from Playwright HTMLs or httpx) dse_text = "" for page_url, html in playwright_htmls.items(): if re.search(r"datenschutz|privacy|dsgvo", page_url, re.IGNORECASE): import re as _re clean = _re.sub(r"<(script|style)[^>]*>.*?", "", html, flags=_re.DOTALL | _re.IGNORECASE) clean = _re.sub(r"<[^>]+>", " ", clean) clean = _re.sub(r"\s+", " ", clean).strip() dse_text = clean[:4000] break if not dse_text: dse_text = await _fetch_dse_text(req.url, scan.pages_scanned) # Step 3: Extract services mentioned in DSE via LLM + text fallback dse_services = await extract_dse_services(dse_text) if dse_text else [] logger.info("DSE mentions %d services (LLM)", len(dse_services)) # Fallback: if LLM extraction failed, search DSE text directly for service names if not dse_services and dse_text: dse_lower = dse_text.lower() detected_dicts_for_check = [_service_to_dict(s) for s in scan.detected_services] for svc in detected_dicts_for_check: name = svc.get("name", "").lower() # Check if service name appears in DSE text if name and len(name) > 3 and name in dse_lower: dse_services.append({"name": svc["name"], "purpose": "", "country": svc.get("country", ""), "legal_basis": ""}) if dse_services: logger.info("DSE text fallback found %d services", len(dse_services)) # Step 4: Parse DSE into structured sections (prefer Playwright HTML) dse_html = "" for page_url, html in playwright_htmls.items(): if re.search(r"datenschutz|privacy|dsgvo", page_url, re.IGNORECASE): dse_html = html break if not dse_html: dse_html = await _fetch_dse_html(req.url, scan.pages_scanned) dse_sections = parse_dse(dse_html, req.url) if dse_html else [] logger.info("Parsed %d DSE sections", len(dse_sections)) # Step 5: SOLL/IST comparison detected_dicts = [_service_to_dict(s) for s in scan.detected_services] comparison = compare_services(detected_dicts, dse_services) # Step 6: Build TextReferences for each detected service text_refs = build_text_references(detected_dicts, dse_services, dse_sections, req.url) # Step 7: Generate findings with text references services_info, findings = _build_findings(comparison, scan, is_live, text_refs) # Step 8: Check mandatory content (documents + DSE sections) mandatory_findings = check_mandatory_documents(scan.pages_scanned, scan.missing_pages) mandatory_findings += check_dse_mandatory_content(dse_sections, dse_text) for mf in mandatory_findings: findings.append(ScanFinding( code=mf.code, severity=mf.severity, text=f"{mf.text}" + (f" — {mf.suggestion}" if mf.suggestion else ""), )) # Step 8b: Validate legal bases (lit. a-f) in DSE if dse_text: lit_findings = validate_legal_bases(dse_text) for lf in lit_findings: findings.append(ScanFinding( code=f"LIT-{lf.purpose.upper()}", severity=lf.severity, text=lf.text, text_reference=TextReferenceModel( found=True, source_url=req.url, original_text=lf.original_text, issue="incorrect", correction_type="replace", correction_text=f"Korrekte Rechtsgrundlage: {lf.correct_basis} ({lf.legal_ref})", ) if lf.original_text else None, )) # Step 9: Generate corrections for pre-launch mode if not is_live and findings: await _add_corrections(findings, dse_text) # Step 7: Build summary summary = _build_scan_summary(req.url, scan, comparison, findings, is_live) # Step 8: Send notification mode_label = "INTERNE PRUEFUNG" if not is_live else "LIVE-WEBSITE" email_result = send_email( recipient=req.recipient, subject=f"[{mode_label}] Website-Scan: {req.url[:50]}", body_html=f"
{summary}
", ) return ScanResponse( url=req.url, pages_scanned=len(scan.pages_scanned), pages_list=scan.pages_scanned, services=services_info, findings=findings, ai_detected=len(scan.ai_mentions) > 0, chatbot_detected=scan.chatbot_detected, chatbot_provider=scan.chatbot_provider, missing_pages=scan.missing_pages, summary=summary, email_status=email_result.get("status", "failed"), scanned_at=datetime.now(timezone.utc).isoformat(), ) async def _fetch_dse_text(url: str, scanned_pages: list[str]) -> str: """Find and fetch the privacy policy page text.""" import re # Find DSE URL from scanned pages dse_url = None for page in scanned_pages: if re.search(r"datenschutz|privacy|dsgvo", page, re.IGNORECASE): dse_url = page break if not dse_url: dse_url = url # Fallback to provided URL try: async with httpx.AsyncClient(timeout=15.0, follow_redirects=True) as client: resp = await client.get(dse_url, headers={"User-Agent": "BreakPilot-Compliance-Agent/1.0"}) html = resp.text clean = re.sub(r"<(script|style)[^>]*>.*?", "", html, flags=re.DOTALL | re.IGNORECASE) clean = re.sub(r"<[^>]+>", " ", clean) clean = re.sub(r"\s+", " ", clean).strip() return clean[:4000] except Exception: return "" async def _fetch_dse_html(url: str, scanned_pages: list[str]) -> str: """Fetch the raw HTML of the privacy policy page (for structured parsing).""" import re dse_url = None for page in scanned_pages: if re.search(r"datenschutz|privacy|dsgvo", page, re.IGNORECASE): dse_url = page break if not dse_url: dse_url = url try: async with httpx.AsyncClient(timeout=15.0, follow_redirects=True) as client: resp = await client.get(dse_url, headers={"User-Agent": "BreakPilot-Compliance-Agent/1.0"}) return resp.text except Exception: return "" def _service_to_dict(svc: DetectedService) -> dict: return { "id": svc.id, "name": svc.name, "category": svc.category, "provider": svc.provider, "country": svc.country, "eu_adequate": svc.eu_adequate, "requires_consent": svc.requires_consent, "legal_ref": svc.legal_ref, } def _build_findings( comparison: dict, scan, is_live: bool, text_refs: dict | None = None, ) -> tuple[list[ServiceInfo], list[ScanFinding]]: """Build service info list and findings from comparison.""" services = [] findings = [] text_refs = text_refs or {} def _get_ref(svc_id: str) -> TextReferenceModel | None: ref = text_refs.get(svc_id) if not ref: return None return TextReferenceModel( found=ref.found, source_url=ref.source_url, document_type=ref.document_type, section_heading=ref.section_heading, section_number=ref.section_number, parent_section=ref.parent_section, paragraph_index=ref.paragraph_index, original_text=ref.original_text, issue=ref.issue, correction_type=ref.correction_type, correction_text=ref.correction_text, insert_after=ref.insert_after, ) # Undocumented services (on website, NOT in DSE) for svc in comparison["undocumented"]: services.append(ServiceInfo( name=svc["name"], category=svc.get("category", "other"), provider=svc.get("provider", ""), country=svc.get("country", ""), eu_adequate=svc.get("eu_adequate", False), requires_consent=svc.get("requires_consent", False), legal_ref=svc.get("legal_ref", ""), in_dse=False, status="undocumented", )) severity = "HIGH" if is_live else "MEDIUM" ref = _get_ref(svc.get("id", "")) findings.append(ScanFinding( code=f"DSE-MISSING-{svc['id'].upper()}", severity=severity, text=f"{svc['name']} ({svc.get('provider', '')}, {svc.get('country', '')}) " f"ist auf der Website eingebunden aber NICHT in der Datenschutzerklaerung " f"dokumentiert (Art. 13 DSGVO).", text_reference=ref, )) # Documented services (OK) for item in comparison["documented"]: svc = item["detected"] services.append(ServiceInfo( name=svc["name"], category=svc.get("category", "other"), provider=svc.get("provider", ""), country=svc.get("country", ""), eu_adequate=svc.get("eu_adequate", False), requires_consent=svc.get("requires_consent", False), legal_ref=svc.get("legal_ref", ""), in_dse=True, status="ok", )) # Check third-country transfer if not svc.get("eu_adequate", False): findings.append(ScanFinding( code=f"TRANSFER-{svc['id'].upper()}", severity="MEDIUM", text=f"{svc['name']} ({svc.get('country', '')}) — Drittlandtransfer. " f"Pruefen ob SCCs oder Angemessenheitsbeschluss dokumentiert sind.", )) # Outdated services (in DSE, NOT on website) for svc in comparison["outdated"]: services.append(ServiceInfo( name=svc["name"], category="other", provider=svc.get("provider", ""), country=svc.get("country", ""), eu_adequate=True, requires_consent=False, legal_ref="", in_dse=True, status="outdated", )) findings.append(ScanFinding( code=f"DSE-OUTDATED-{svc['name'].upper().replace(' ', '_')[:20]}", severity="LOW", text=f"{svc['name']} in Datenschutzerklaerung erwaehnt aber auf der Website " f"nicht mehr gefunden. Eintrag bei naechster Aktualisierung entfernen.", )) # Missing pages (e.g., /impressum returns 404) for page_url, status_code in scan.missing_pages.items(): if "impressum" in page_url.lower(): findings.append(ScanFinding( code="MISSING-IMPRESSUM", severity="HIGH", text=f"Impressum-Seite gibt HTTP {status_code} zurueck (§5 TMG Verstoss).", )) return services, findings async def _add_corrections(findings: list[ScanFinding], dse_text: str) -> None: """Add correction suggestions for pre-launch mode via LLM.""" for finding in findings: if finding.severity in ("HIGH", "MEDIUM") and "MISSING" in finding.code: service_name = finding.code.replace("DSE-MISSING-", "").replace("_", " ").title() try: # Call Ollama directly (bypasses SDK RBAC + Think-mode issues) ollama_url = os.environ.get("OLLAMA_URL", "http://host.docker.internal:11434") ollama_model = os.environ.get("OLLAMA_MODEL", "qwen3.5:35b-a3b") async with httpx.AsyncClient(timeout=120.0) as client: resp = await client.post(f"{ollama_url}/api/generate", json={ "model": ollama_model, "prompt": ( f"Erstelle einen einbaufertigen Textbaustein fuer eine deutsche " f"Datenschutzerklaerung fuer den Dienst '{service_name}'. " f"Enthalte: Ueberschrift, Anbietername mit Sitz, Zweck der Verarbeitung, " f"Rechtsgrundlage nach DSGVO, Drittlandtransfer-Hinweis wenn noetig, " f"Widerspruchsmoeglichkeit. Max 150 Woerter. " f"Antworte NUR mit dem fertigen Textbaustein." ), "stream": False, }) data = resp.json() import re raw = data.get("response", "").strip() raw = re.sub(r".*?", "", raw, flags=re.DOTALL).strip() if raw and len(raw) > 50: finding.correction = raw except Exception as e: logger.warning("Correction generation failed for %s: %s", service_name, e) def _build_scan_summary( url: str, scan, comparison: dict, findings: list[ScanFinding], is_live: bool, ) -> str: """Build German scan summary.""" mode = "PRUEFUNG LIVE-WEBSITE" if is_live else "INTERNE PRUEFUNG" n_undoc = len(comparison["undocumented"]) n_ok = len(comparison["documented"]) n_outdated = len(comparison["outdated"]) n_findings = len(findings) high = sum(1 for f in findings if f.severity == "HIGH") parts = [ f"{mode} — Website-Scan", f"URL: {url}", f"Seiten gescannt: {len(scan.pages_scanned)}", ] for page in scan.pages_scanned: status = scan.missing_pages.get(page, 200) marker = "✗" if status >= 400 else "✓" parts.append(f" {marker} {page}" + (f" (HTTP {status})" if status >= 400 else "")) parts.extend([ "", f"Dienstleister-Abgleich (DSE vs. Website):", f" Korrekt dokumentiert: {n_ok}", f" NICHT in DSE (Verstoss): {n_undoc}", f" Veraltet in DSE: {n_outdated}", "", f"Findings: {n_findings} ({high} mit hoher Prioritaet)", ]) if findings: parts.append("") for f in findings[:10]: marker = "!!" if f.severity == "HIGH" else "!" if f.severity == "MEDIUM" else "i" parts.append(f" [{marker}] {f.text}") if is_live and high > 0: parts.extend([ "", "ACHTUNG: Verstoesse auf einer bereits veroeffentlichten Website. " "Sofortige Korrektur empfohlen.", ]) return "\n".join(parts)