Files
breakpilot-compliance/backend-compliance/compliance/services/website_scanner.py
T
Benjamin Admin 36c6101b91 Merge feat/zeroclaw-compliance-agent into main
Brings all compliance doc-check features:
- 162 regex checks + 1874 Master Controls
- LLM-agnostic agent with tool calling
- Banner check (46 checks, 30 CMPs, stealth, Shadow DOM)
- Impressum check (24 checks)
- Deep consent verification (DataLayer, GCM, TCF)
- CMP E2E tests (39 tests)
- HTML email reports, FAQ, persistent history

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-05-11 11:44:20 +02:00

198 lines
8.0 KiB
Python

"""
Website Scanner — scans multiple pages of a website for third-party services,
chatbots, tracking, AI indicators, and compares against privacy policy.
Used by the Compliance Agent for SOLL/IST analysis.
"""
import logging
import re
from dataclasses import dataclass, field
from urllib.parse import urljoin, urlparse
import httpx
logger = logging.getLogger(__name__)
USER_AGENT = "BreakPilot-Compliance-Agent/1.0"
@dataclass
class DetectedService:
id: str
name: str
category: str # "tracking", "chatbot", "cdn", "payment", "marketing", "other"
provider: str
country: str
eu_adequate: bool
requires_consent: bool
legal_ref: str
found_on: str = "" # URL where detected
@dataclass
class ScanResult:
pages_scanned: list[str] = field(default_factory=list)
detected_services: list[DetectedService] = field(default_factory=list)
ai_mentions: list[str] = field(default_factory=list)
chatbot_detected: bool = False
chatbot_provider: str = ""
missing_pages: dict = field(default_factory=dict) # url -> status_code
# ── Service Registry (imported from master) ──────────────────────────────────
from compliance.services.service_registry import SERVICE_REGISTRY # noqa: E402
AI_TEXT_PATTERNS = [
r"k(?:ue|ü)nstliche.?intelligenz",
r"artificial.?intelligence",
r"machine.?learning",
r"maschinelles.?lernen",
r"KI.?gest(?:ue|ü)tzt",
r"AI.?powered",
r"chatgpt|openai",
r"deep.?learning",
r"neural.?net",
r"automatisierte.?entscheidung",
]
FOOTER_LINK_PATTERNS = [
(r'href="([^"]*(?:impressum|imprint|legal-notice)[^"]*)"', "impressum"),
(r'href="([^"]*(?:datenschutz|privacy|dsgvo|hinweise.?zum.?datenschutz)[^"]*)"', "datenschutz"),
(r'href="([^"]*(?:agb|terms|nutzungsbedingungen)[^"]*)"', "agb"),
(r'href="([^"]*(?:cookie)[^"]*)"', "cookies"),
# Deep DSE links (regional pages, sub-pages, service marks)
(r'href="([^"]*(?:datenschutzinformation|datenschutzerklaerung|datenschutzerkl)[^"]*)"', "datenschutz_deep"),
# Navigation links often contain DSB/privacy sub-pages
(r'href="([^"]*(?:ueber.?uns.*datenschutz|servicemarken.*datenschutz|kontakt.*datenschutz)[^"]*)"', "datenschutz_nav"),
]
async def scan_website(base_url: str) -> ScanResult:
"""Scan a website: start page + footer links for services and AI indicators."""
result = ScanResult()
parsed = urlparse(base_url)
origin = f"{parsed.scheme}://{parsed.netloc}"
async with httpx.AsyncClient(timeout=10.0, follow_redirects=True) as client:
# 1. Fetch start page
start_html = await _fetch_page(client, origin, result)
if not start_html:
return result
# 2. Discover footer links
page_urls = {origin}
page_urls.add(base_url) # Also scan the provided URL
for pattern, _ in FOOTER_LINK_PATTERNS:
for match in re.finditer(pattern, start_html, re.IGNORECASE):
href = match.group(1)
if href.startswith("/"):
href = urljoin(origin, href)
if href.startswith(origin) and not re.search(r"\.(css|js|png|jpg|gif|svg|pdf|zip)(\?|$)", href):
page_urls.add(href)
# 3. Scan all pages in PARALLEL (max 10)
import asyncio
other_urls = [u for u in list(page_urls)[:10] if u != origin]
fetch_tasks = [_fetch_page(client, u, result) for u in other_urls]
other_htmls = await asyncio.gather(*fetch_tasks, return_exceptions=True)
# Process start page
_detect_services(start_html, origin, result)
_detect_ai_mentions(start_html, origin, result)
# Process other pages + discover DSE-internal links
dse_internal_urls = set()
for url, html in zip(other_urls, other_htmls):
if isinstance(html, str) and html:
_detect_services(html, url, result)
_detect_ai_mentions(html, url, result)
# If this is a DSE page, find links within it (SAME DOMAIN only)
if re.search(r"datenschutz|privacy|dsgvo", url, re.IGNORECASE):
for pattern, _ in FOOTER_LINK_PATTERNS:
for match in re.finditer(pattern, html, re.IGNORECASE):
href = match.group(1)
if href.startswith("/"):
href = urljoin(origin, href)
# IMPORTANT: Only follow links on the SAME domain
# External links (etracker.com, google.de) must NOT be scanned
if href.startswith(origin) and href not in page_urls:
dse_internal_urls.add(href)
# 4. Follow DSE-internal links (additional pages linked from privacy policy)
if dse_internal_urls:
extra_urls = [u for u in list(dse_internal_urls)[:5] if u not in page_urls]
if extra_urls:
extra_tasks = [_fetch_page(client, u, result) for u in extra_urls]
extra_htmls = await asyncio.gather(*extra_tasks, return_exceptions=True)
for url, html in zip(extra_urls, extra_htmls):
if isinstance(html, str) and html:
_detect_services(html, url, result)
# Deduplicate services
seen = set()
unique = []
for svc in result.detected_services:
if svc.id not in seen:
seen.add(svc.id)
unique.append(svc)
result.detected_services = unique
result.chatbot_detected = any(s.category == "chatbot" for s in result.detected_services)
if result.chatbot_detected:
result.chatbot_provider = next(
s.name for s in result.detected_services if s.category == "chatbot"
)
return result
async def _fetch_page(
client: httpx.AsyncClient, url: str, result: ScanResult,
) -> str:
"""Fetch a single page. Returns HTML or empty string on failure."""
try:
resp = await client.get(url, headers={"User-Agent": USER_AGENT})
result.pages_scanned.append(url)
if resp.status_code >= 400:
result.missing_pages[url] = resp.status_code
return ""
return resp.text
except Exception as e:
logger.warning("Failed to fetch %s: %s", url, e)
return ""
def _detect_services(html: str, url: str, result: ScanResult) -> None:
"""Detect third-party services in HTML.
Searches script tags + src/href attributes to avoid false positives
from DSE text mentioning services (e.g. 'wir nutzen nicht Google Analytics').
"""
# Extract script content + all src/href attributes for matching
scripts = " ".join(re.findall(r"<script[^>]*>.*?</script>", html, re.DOTALL | re.IGNORECASE))
attrs = " ".join(re.findall(r'(?:src|href|data-src|action)=["\']([^"\']+)["\']', html, re.IGNORECASE))
technical_context = scripts + " " + attrs
for pattern, meta in SERVICE_REGISTRY.items():
# First try in technical context (scripts + URLs) — no false positives
if re.search(pattern, technical_context, re.IGNORECASE):
result.detected_services.append(DetectedService(found_on=url, **meta))
continue
# For patterns that are purely technical (contain special chars), also check full HTML
is_technical = any(c in pattern for c in r"\(\.\/\d{")
if is_technical and re.search(pattern, html, re.IGNORECASE):
result.detected_services.append(DetectedService(found_on=url, **meta))
def _detect_ai_mentions(html: str, url: str, result: ScanResult) -> None:
"""Detect AI/ML text mentions in page content."""
# Strip scripts/styles first for text-only search
clean = re.sub(r"<(script|style)[^>]*>.*?</\1>", "", html, flags=re.DOTALL | re.IGNORECASE)
clean = re.sub(r"<[^>]+>", " ", clean)
for pattern in AI_TEXT_PATTERNS:
match = re.search(pattern, clean, re.IGNORECASE)
if match:
context = clean[max(0, match.start() - 40):match.end() + 40].strip()
result.ai_mentions.append(f"{url}: ...{context}...")