feat: DSI document discovery + completeness check in agent scan workflow

Agent scan now automatically:
1. Discovers all legal documents via consent-tester /dsi-discovery endpoint
2. Classifies each as DSE/AGB/Widerruf/Cookie/Impressum
3. Checks completeness against type-specific checklists:
   - DSE: 9 Art. 13 DSGVO mandatory fields (controller, DPO, purposes,
     legal basis, recipients, third-country, retention, rights, complaint)
   - AGB: §305ff BGB (scope, contract formation, liability, jurisdiction)
   - Widerruf: §355 BGB (right info, 14-day deadline, form, consequences)
4. Adds findings per document to scan results
5. Shows discovered documents with completeness % in email summary
6. Returns discovered_documents list in API response

New files:
- dsi_document_checker.py (229 LOC) — checklists + classifier
- agent_scan_helpers.py (109 LOC) — extracted summary builder + corrections

Refactor: agent_scan_routes.py 537→448 LOC (under 500 budget)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-05-04 22:09:45 +02:00
parent a6618af5ed
commit 53f6f30cf0
3 changed files with 402 additions and 78 deletions
@@ -0,0 +1,109 @@
"""
Agent scan helpers — summary builder and correction generator.
Extracted from agent_scan_routes.py to keep route file under 500 LOC.
"""
import logging
import os
import re
import httpx
logger = logging.getLogger(__name__)
async def add_corrections(findings: list, dse_text: str) -> None:
"""Add correction suggestions for pre-launch mode via LLM."""
for finding in findings:
if finding.severity in ("HIGH", "MEDIUM") and "MISSING" in finding.code:
service_name = finding.code.replace("DSE-MISSING-", "").replace("_", " ").title()
try:
ollama_url = os.environ.get("OLLAMA_URL", "http://host.docker.internal:11434")
ollama_model = os.environ.get("OLLAMA_MODEL", "qwen3.5:35b-a3b")
async with httpx.AsyncClient(timeout=120.0) as client:
resp = await client.post(f"{ollama_url}/api/generate", json={
"model": ollama_model,
"prompt": (
f"Erstelle einen einbaufertigen Textbaustein fuer eine deutsche "
f"Datenschutzerklaerung fuer den Dienst '{service_name}'. "
f"Enthalte: Ueberschrift, Anbietername mit Sitz, Zweck der Verarbeitung, "
f"Rechtsgrundlage nach DSGVO, Drittlandtransfer-Hinweis wenn noetig, "
f"Widerspruchsmoeglichkeit. Max 150 Woerter. "
f"Antworte NUR mit dem fertigen Textbaustein."
),
"stream": False,
})
data = resp.json()
raw = data.get("response", "").strip()
raw = re.sub(r"<think>.*?</think>", "", raw, flags=re.DOTALL).strip()
if raw and len(raw) > 50:
finding.correction = raw
except Exception as e:
logger.warning("Correction generation failed for %s: %s", service_name, e)
def build_scan_summary(
url: str, scan, comparison: dict, findings: list, is_live: bool,
discovered_docs: list | None = None,
) -> str:
"""Build German scan summary including DSI document results."""
mode = "PRUEFUNG LIVE-WEBSITE" if is_live else "INTERNE PRUEFUNG"
n_undoc = len(comparison["undocumented"])
n_ok = len(comparison["documented"])
n_outdated = len(comparison["outdated"])
n_findings = len(findings)
high = sum(1 for f in findings if f.severity == "HIGH")
parts = [
f"{mode} — Website-Scan",
f"URL: {url}",
f"Seiten gescannt: {len(scan.pages_scanned)}",
]
for page in scan.pages_scanned:
status = scan.missing_pages.get(page, 200)
marker = "\u2717" if status >= 400 else "\u2713"
parts.append(f" {marker} {page}" + (f" (HTTP {status})" if status >= 400 else ""))
parts.extend([
"",
"Dienstleister-Abgleich (DSE vs. Website):",
f" Korrekt dokumentiert: {n_ok}",
f" NICHT in DSE (Verstoss): {n_undoc}",
f" Veraltet in DSE: {n_outdated}",
"",
f"Findings: {n_findings} ({high} mit hoher Prioritaet)",
])
# DSI Documents section
if discovered_docs:
parts.extend([
"",
f"Rechtliche Dokumente gefunden: {len(discovered_docs)}",
])
for doc in discovered_docs:
pct = doc.completeness_pct if hasattr(doc, 'completeness_pct') else 0
fc = doc.findings_count if hasattr(doc, 'findings_count') else 0
wc = doc.word_count if hasattr(doc, 'word_count') else 0
status = "OK" if pct >= 80 else "LUECKENHAFT" if pct >= 50 else "MANGELHAFT"
dt = doc.doc_type if hasattr(doc, 'doc_type') else "unknown"
title = doc.title if hasattr(doc, 'title') else "?"
parts.append(
f" [{status}] {title} ({dt}, {wc} Woerter, "
f"{pct}% vollstaendig, {fc} Maengel)"
)
if findings:
parts.append("")
for f in findings[:20]:
sev = f.severity if hasattr(f, 'severity') else "?"
txt = f.text if hasattr(f, 'text') else str(f)
marker = "!!" if sev == "HIGH" else "!" if sev == "MEDIUM" else "i"
parts.append(f" [{marker}] {txt}")
if is_live and high > 0:
parts.extend([
"",
"ACHTUNG: Verstoesse auf einer bereits veroeffentlichten Website. "
"Sofortige Korrektur empfohlen.",
])
return "\n".join(parts)
@@ -22,6 +22,7 @@ from compliance.services.mandatory_content_checker import (
check_mandatory_documents, check_dse_mandatory_content, MandatoryFinding,
)
from compliance.services.legal_basis_validator import validate_legal_bases
from compliance.api.agent_scan_helpers import add_corrections, build_scan_summary
logger = logging.getLogger(__name__)
@@ -78,12 +79,23 @@ class ScanFinding(BaseModel):
text_reference: TextReferenceModel | None = None
class DiscoveredDocument(BaseModel):
title: str
url: str
doc_type: str
language: str = ""
word_count: int = 0
completeness_pct: int = 0
findings_count: int = 0
class ScanResponse(BaseModel):
url: str
pages_scanned: int
pages_list: list[str] = []
services: list[ServiceInfo]
findings: list[ScanFinding]
discovered_documents: list[DiscoveredDocument] = []
ai_detected: bool
chatbot_detected: bool
chatbot_provider: str
@@ -140,6 +152,52 @@ async def scan_website_endpoint(req: ScanRequest):
logger.info("Scanned %d pages, found %d services", len(scan.pages_scanned), len(scan.detected_services))
# Step 1b: DSI Discovery — find all legal documents on the website
discovered_docs: list[DiscoveredDocument] = []
dsi_findings: list[ScanFinding] = []
try:
async with httpx.AsyncClient(timeout=180.0) as dsi_client:
dsi_resp = await dsi_client.post(
"http://bp-compliance-consent-tester:8094/dsi-discovery",
json={"url": req.url, "max_documents": 20},
)
if dsi_resp.status_code == 200:
dsi_data = dsi_resp.json()
logger.info("DSI discovery: %d documents found", dsi_data.get("total_found", 0))
# Check each document against its legal requirements
from compliance.services.dsi_document_checker import (
check_document_completeness, classify_document_type,
)
for doc in dsi_data.get("documents", []):
doc_type = classify_document_type(doc["title"], doc["url"])
doc_findings = check_document_completeness(
doc.get("text_preview", ""), doc_type, doc["title"], doc["url"],
)
# Count completeness
score_finding = next((f for f in doc_findings if "SCORE" in f.get("code", "")), None)
completeness = 0
if score_finding:
import re as _re2
pct_match = _re2.search(r"(\d+)%", score_finding.get("text", ""))
if pct_match:
completeness = int(pct_match.group(1))
discovered_docs.append(DiscoveredDocument(
title=doc["title"], url=doc["url"],
doc_type=doc_type, language=doc.get("language", ""),
word_count=doc.get("word_count", 0),
completeness_pct=completeness,
findings_count=len([f for f in doc_findings if "SCORE" not in f.get("code", "")]),
))
for df in doc_findings:
if "SCORE" not in df.get("code", ""):
dsi_findings.append(ScanFinding(
code=df["code"], severity=df["severity"], text=df["text"],
))
except Exception as e:
logger.warning("DSI discovery failed: %s", e)
# Step 2: Fetch privacy policy text (from Playwright HTMLs or httpx)
dse_text = ""
for page_url, html in playwright_htmls.items():
@@ -215,12 +273,15 @@ async def scan_website_endpoint(req: ScanRequest):
) if lf.original_text else None,
))
# Step 8c: Add DSI document findings
findings.extend(dsi_findings)
# Step 9: Generate corrections for pre-launch mode
if not is_live and findings:
await _add_corrections(findings, dse_text)
await add_corrections(findings, dse_text)
# Step 7: Build summary
summary = _build_scan_summary(req.url, scan, comparison, findings, is_live)
summary = build_scan_summary(req.url, scan, comparison, findings, is_live, discovered_docs)
# Step 8: Send notification
mode_label = "INTERNE PRUEFUNG" if not is_live else "LIVE-WEBSITE"
@@ -236,6 +297,7 @@ async def scan_website_endpoint(req: ScanRequest):
pages_list=scan.pages_scanned,
services=services_info,
findings=findings,
discovered_documents=discovered_docs,
ai_detected=len(scan.ai_mentions) > 0,
chatbot_detected=scan.chatbot_detected,
chatbot_provider=scan.chatbot_provider,
@@ -384,79 +446,3 @@ def _build_findings(
return services, findings
async def _add_corrections(findings: list[ScanFinding], dse_text: str) -> None:
"""Add correction suggestions for pre-launch mode via LLM."""
for finding in findings:
if finding.severity in ("HIGH", "MEDIUM") and "MISSING" in finding.code:
service_name = finding.code.replace("DSE-MISSING-", "").replace("_", " ").title()
try:
# Call Ollama directly (bypasses SDK RBAC + Think-mode issues)
ollama_url = os.environ.get("OLLAMA_URL", "http://host.docker.internal:11434")
ollama_model = os.environ.get("OLLAMA_MODEL", "qwen3.5:35b-a3b")
async with httpx.AsyncClient(timeout=120.0) as client:
resp = await client.post(f"{ollama_url}/api/generate", json={
"model": ollama_model,
"prompt": (
f"Erstelle einen einbaufertigen Textbaustein fuer eine deutsche "
f"Datenschutzerklaerung fuer den Dienst '{service_name}'. "
f"Enthalte: Ueberschrift, Anbietername mit Sitz, Zweck der Verarbeitung, "
f"Rechtsgrundlage nach DSGVO, Drittlandtransfer-Hinweis wenn noetig, "
f"Widerspruchsmoeglichkeit. Max 150 Woerter. "
f"Antworte NUR mit dem fertigen Textbaustein."
),
"stream": False,
})
data = resp.json()
import re
raw = data.get("response", "").strip()
raw = re.sub(r"<think>.*?</think>", "", raw, flags=re.DOTALL).strip()
if raw and len(raw) > 50:
finding.correction = raw
except Exception as e:
logger.warning("Correction generation failed for %s: %s", service_name, e)
def _build_scan_summary(
url: str, scan, comparison: dict, findings: list[ScanFinding], is_live: bool,
) -> str:
"""Build German scan summary."""
mode = "PRUEFUNG LIVE-WEBSITE" if is_live else "INTERNE PRUEFUNG"
n_undoc = len(comparison["undocumented"])
n_ok = len(comparison["documented"])
n_outdated = len(comparison["outdated"])
n_findings = len(findings)
high = sum(1 for f in findings if f.severity == "HIGH")
parts = [
f"{mode} — Website-Scan",
f"URL: {url}",
f"Seiten gescannt: {len(scan.pages_scanned)}",
]
for page in scan.pages_scanned:
status = scan.missing_pages.get(page, 200)
marker = "" if status >= 400 else ""
parts.append(f" {marker} {page}" + (f" (HTTP {status})" if status >= 400 else ""))
parts.extend([
"",
f"Dienstleister-Abgleich (DSE vs. Website):",
f" Korrekt dokumentiert: {n_ok}",
f" NICHT in DSE (Verstoss): {n_undoc}",
f" Veraltet in DSE: {n_outdated}",
"",
f"Findings: {n_findings} ({high} mit hoher Prioritaet)",
])
if findings:
parts.append("")
for f in findings[:10]:
marker = "!!" if f.severity == "HIGH" else "!" if f.severity == "MEDIUM" else "i"
parts.append(f" [{marker}] {f.text}")
if is_live and high > 0:
parts.extend([
"",
"ACHTUNG: Verstoesse auf einer bereits veroeffentlichten Website. "
"Sofortige Korrektur empfohlen.",
])
return "\n".join(parts)