Files
breakpilot-compliance/backend-compliance/compliance/api/agent_scan_helpers.py
T
Benjamin Admin 7c7513525e feat: Document-centric scan results + DSI deduplication
DSI Dedup (consent-tester):
- Only H1/H2 headings count as documents (not H3/H4 sub-sections)
- Sub-sections (Cookies, Betroffenenrechte, Social Media) are part of
  parent document's full text, not separate documents
- Reduces IHK result from 30 to ~11 real documents

Backend (agent_scan_routes):
- ScanFinding gets doc_title field linking each finding to its document
- doc_title set when creating DSI findings for document attribution

Frontend (ScanResult.tsx):
- 3 sections: Services table, Document cards, General findings
- Documents: expandable cards with completeness bar (green/yellow/red)
- Findings grouped under their parent document
- Each card shows: title, word count, findings count, % completeness
- Findings without doc_title go to "Allgemeine Findings" section

Email Summary (agent_scan_helpers):
- Findings listed under their parent document
- General findings in separate section
- No more flat mixed list

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-05-05 09:56:29 +02:00

164 lines
6.8 KiB
Python

"""
Agent scan helpers — summary builder and correction generator.
Extracted from agent_scan_routes.py to keep route file under 500 LOC.
"""
import logging
import os
import re
import httpx
logger = logging.getLogger(__name__)
async def add_corrections(findings: list, dse_text: str) -> None:
"""Add correction suggestions for pre-launch mode via LLM."""
for finding in findings:
if finding.severity in ("HIGH", "MEDIUM") and "MISSING" in finding.code:
service_name = finding.code.replace("DSE-MISSING-", "").replace("_", " ").title()
try:
ollama_url = os.environ.get("OLLAMA_URL", "http://host.docker.internal:11434")
ollama_model = os.environ.get("OLLAMA_MODEL", "qwen3.5:35b-a3b")
async with httpx.AsyncClient(timeout=120.0) as client:
resp = await client.post(f"{ollama_url}/api/generate", json={
"model": ollama_model,
"prompt": (
f"Erstelle einen einbaufertigen Textbaustein fuer eine deutsche "
f"Datenschutzerklaerung fuer den Dienst '{service_name}'. "
f"Enthalte: Ueberschrift, Anbietername mit Sitz, Zweck der Verarbeitung, "
f"Rechtsgrundlage nach DSGVO, Drittlandtransfer-Hinweis wenn noetig, "
f"Widerspruchsmoeglichkeit. Max 150 Woerter. "
f"Antworte NUR mit dem fertigen Textbaustein."
),
"stream": False,
})
data = resp.json()
raw = data.get("response", "").strip()
raw = re.sub(r"<think>.*?</think>", "", raw, flags=re.DOTALL).strip()
if raw and len(raw) > 50:
finding.correction = raw
except Exception as e:
logger.warning("Correction generation failed for %s: %s", service_name, e)
def build_scan_summary(
url: str, scan, comparison: dict, findings: list, is_live: bool,
discovered_docs: list | None = None,
) -> str:
"""Build German scan summary including DSI document results."""
mode = "PRUEFUNG LIVE-WEBSITE" if is_live else "INTERNE PRUEFUNG"
n_undoc = len(comparison["undocumented"])
n_ok = len(comparison["documented"])
n_outdated = len(comparison["outdated"])
n_findings = len(findings)
high = sum(1 for f in findings if f.severity == "HIGH")
parts = [
f"{mode} — Website-Scan",
f"URL: {url}",
f"Seiten gescannt: {len(scan.pages_scanned)}",
]
for page in scan.pages_scanned:
status = scan.missing_pages.get(page, 200)
marker = "\u2717" if status >= 400 else "\u2713"
parts.append(f" {marker} {page}" + (f" (HTTP {status})" if status >= 400 else ""))
parts.extend([
"",
"Dienstleister-Abgleich (DSE vs. Website):",
f" Korrekt dokumentiert: {n_ok}",
f" NICHT in DSE (Verstoss): {n_undoc}",
f" Veraltet in DSE: {n_outdated}",
"",
f"Findings: {n_findings} ({high} mit hoher Prioritaet)",
])
# DSI Documents section — grouped with their findings
if discovered_docs:
parts.extend(["", f"Rechtliche Dokumente ({len(discovered_docs)})"])
# Group findings by doc_title
doc_findings_map: dict[str, list] = {}
general_findings: list = []
for f in findings:
dt = f.doc_title if hasattr(f, 'doc_title') else ""
if dt:
doc_findings_map.setdefault(dt, []).append(f)
else:
general_findings.append(f)
for doc in discovered_docs:
title = doc.title if hasattr(doc, 'title') else "?"
pct = doc.completeness_pct if hasattr(doc, 'completeness_pct') else 0
wc = doc.word_count if hasattr(doc, 'word_count') else 0
status = "OK" if pct >= 80 else "LUECKENHAFT" if pct >= 50 else "MANGELHAFT"
parts.append(f" [{status}] {title} ({pct}%, {wc} Woerter)")
for f in doc_findings_map.get(title, []):
sev = f.severity if hasattr(f, 'severity') else "?"
txt = f.text if hasattr(f, 'text') else str(f)
marker = "!!" if sev == "HIGH" else "!" if sev == "MEDIUM" else "i"
parts.append(f" {marker} {txt}")
# General findings (no doc association)
if general_findings:
parts.extend(["", "Allgemeine Findings"])
for f in general_findings[:20]:
sev = f.severity if hasattr(f, 'severity') else "?"
txt = f.text if hasattr(f, 'text') else str(f)
marker = "!!" if sev == "HIGH" else "!" if sev == "MEDIUM" else "i"
parts.append(f" [{marker}] {txt}")
elif findings:
parts.append("")
for f in findings[:20]:
sev = f.severity if hasattr(f, 'severity') else "?"
txt = f.text if hasattr(f, 'text') else str(f)
marker = "!!" if sev == "HIGH" else "!" if sev == "MEDIUM" else "i"
parts.append(f" [{marker}] {txt}")
if is_live and high > 0:
parts.extend([
"",
"ACHTUNG: Verstoesse auf einer bereits veroeffentlichten Website. "
"Sofortige Korrektur empfohlen.",
])
return "\n".join(parts)
async def fetch_dse_text(url: str, scanned_pages: list[str]) -> str:
"""Find and fetch the privacy policy page text."""
dse_url = None
for page in scanned_pages:
if re.search(r"datenschutz|privacy|dsgvo", page, re.IGNORECASE):
dse_url = page
break
if not dse_url:
dse_url = url
try:
async with httpx.AsyncClient(timeout=15.0, follow_redirects=True) as client:
resp = await client.get(dse_url, headers={"User-Agent": "BreakPilot-Compliance-Agent/1.0"})
html = resp.text
clean = re.sub(r"<(script|style)[^>]*>.*?</\1>", "", html, flags=re.DOTALL | re.IGNORECASE)
clean = re.sub(r"<[^>]+>", " ", clean)
clean = re.sub(r"\s+", " ", clean).strip()
return clean[:8000]
except Exception:
return ""
async def fetch_dse_html(url: str, scanned_pages: list[str]) -> str:
"""Fetch the raw HTML of the privacy policy page."""
dse_url = None
for page in scanned_pages:
if re.search(r"datenschutz|privacy|dsgvo", page, re.IGNORECASE):
dse_url = page
break
if not dse_url:
dse_url = url
try:
async with httpx.AsyncClient(timeout=15.0, follow_redirects=True) as client:
resp = await client.get(dse_url, headers={"User-Agent": "BreakPilot-Compliance-Agent/1.0"})
return resp.text
except Exception:
return ""