""" Agent scan helpers — summary builder and correction generator. Extracted from agent_scan_routes.py to keep route file under 500 LOC. """ import logging import os import re import httpx logger = logging.getLogger(__name__) async def add_corrections(findings: list, dse_text: str) -> None: """Add correction suggestions for pre-launch mode via LLM.""" for finding in findings: if finding.severity in ("HIGH", "MEDIUM") and "MISSING" in finding.code: service_name = finding.code.replace("DSE-MISSING-", "").replace("_", " ").title() try: ollama_url = os.environ.get("OLLAMA_URL", "http://host.docker.internal:11434") ollama_model = os.environ.get("OLLAMA_MODEL", "qwen3.5:35b-a3b") async with httpx.AsyncClient(timeout=120.0) as client: resp = await client.post(f"{ollama_url}/api/generate", json={ "model": ollama_model, "prompt": ( f"Erstelle einen einbaufertigen Textbaustein fuer eine deutsche " f"Datenschutzerklaerung fuer den Dienst '{service_name}'. " f"Enthalte: Ueberschrift, Anbietername mit Sitz, Zweck der Verarbeitung, " f"Rechtsgrundlage nach DSGVO, Drittlandtransfer-Hinweis wenn noetig, " f"Widerspruchsmoeglichkeit. Max 150 Woerter. " f"Antworte NUR mit dem fertigen Textbaustein." ), "stream": False, }) data = resp.json() raw = data.get("response", "").strip() raw = re.sub(r".*?", "", raw, flags=re.DOTALL).strip() if raw and len(raw) > 50: finding.correction = raw except Exception as e: logger.warning("Correction generation failed for %s: %s", service_name, e) def build_scan_summary( url: str, scan, comparison: dict, findings: list, is_live: bool, discovered_docs: list | None = None, ) -> str: """Build German scan summary including DSI document results.""" mode = "PRUEFUNG LIVE-WEBSITE" if is_live else "INTERNE PRUEFUNG" n_undoc = len(comparison["undocumented"]) n_ok = len(comparison["documented"]) n_outdated = len(comparison["outdated"]) n_findings = len(findings) high = sum(1 for f in findings if f.severity == "HIGH") parts = [ f"{mode} — Website-Scan", f"URL: {url}", f"Seiten gescannt: {len(scan.pages_scanned)}", ] for page in scan.pages_scanned: status = scan.missing_pages.get(page, 200) marker = "\u2717" if status >= 400 else "\u2713" parts.append(f" {marker} {page}" + (f" (HTTP {status})" if status >= 400 else "")) parts.extend([ "", "Dienstleister-Abgleich (DSE vs. Website):", f" Korrekt dokumentiert: {n_ok}", f" NICHT in DSE (Verstoss): {n_undoc}", f" Veraltet in DSE: {n_outdated}", "", f"Findings: {n_findings} ({high} mit hoher Prioritaet)", ]) # DSI Documents section if discovered_docs: parts.extend([ "", f"Rechtliche Dokumente gefunden: {len(discovered_docs)}", ]) for doc in discovered_docs: pct = doc.completeness_pct if hasattr(doc, 'completeness_pct') else 0 fc = doc.findings_count if hasattr(doc, 'findings_count') else 0 wc = doc.word_count if hasattr(doc, 'word_count') else 0 status = "OK" if pct >= 80 else "LUECKENHAFT" if pct >= 50 else "MANGELHAFT" dt = doc.doc_type if hasattr(doc, 'doc_type') else "unknown" title = doc.title if hasattr(doc, 'title') else "?" parts.append( f" [{status}] {title} ({dt}, {wc} Woerter, " f"{pct}% vollstaendig, {fc} Maengel)" ) if findings: parts.append("") for f in findings[:20]: sev = f.severity if hasattr(f, 'severity') else "?" txt = f.text if hasattr(f, 'text') else str(f) marker = "!!" if sev == "HIGH" else "!" if sev == "MEDIUM" else "i" parts.append(f" [{marker}] {txt}") if is_live and high > 0: parts.extend([ "", "ACHTUNG: Verstoesse auf einer bereits veroeffentlichten Website. " "Sofortige Korrektur empfohlen.", ]) return "\n".join(parts)