fix: use Ollama directly for correction generation (bypass SDK think-mode)
SDK LLM chat returns empty content due to Qwen think-mode. Direct Ollama /api/generate call with stream:false gets the full response including think tags which we strip. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -233,30 +233,27 @@ async def _add_corrections(findings: list[ScanFinding], dse_text: str) -> None:
|
||||
if finding.severity in ("HIGH", "MEDIUM") and "MISSING" in finding.code:
|
||||
service_name = finding.code.replace("DSE-MISSING-", "").replace("_", " ").title()
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=90.0) as client:
|
||||
resp = await client.post(f"{SDK_URL}/sdk/v1/llm/chat", headers=SDK_HEADERS, json={
|
||||
"messages": [
|
||||
{"role": "system", "content": (
|
||||
"/no_think\n"
|
||||
"Du bist Datenschutzexperte. Erstelle einen einbaufertigen "
|
||||
"Textbaustein fuer eine deutsche Datenschutzerklaerung fuer "
|
||||
f"den Dienst '{service_name}'. Enthalte: Ueberschrift, "
|
||||
"Anbietername, Zweck, Rechtsgrundlage nach DSGVO, "
|
||||
"Drittlandtransfer-Hinweis wenn noetig, "
|
||||
"Widerspruchsmoeglichkeit. Max 150 Woerter."
|
||||
)},
|
||||
{"role": "user", "content": f"Erstelle DSE-Textbaustein fuer: {service_name}"},
|
||||
],
|
||||
# Call Ollama directly (bypasses SDK RBAC + Think-mode issues)
|
||||
ollama_url = os.environ.get("OLLAMA_URL", "http://host.docker.internal:11434")
|
||||
ollama_model = os.environ.get("OLLAMA_MODEL", "qwen3.5:35b-a3b")
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
resp = await client.post(f"{ollama_url}/api/generate", json={
|
||||
"model": ollama_model,
|
||||
"prompt": (
|
||||
f"Erstelle einen einbaufertigen Textbaustein fuer eine deutsche "
|
||||
f"Datenschutzerklaerung fuer den Dienst '{service_name}'. "
|
||||
f"Enthalte: Ueberschrift, Anbietername mit Sitz, Zweck der Verarbeitung, "
|
||||
f"Rechtsgrundlage nach DSGVO, Drittlandtransfer-Hinweis wenn noetig, "
|
||||
f"Widerspruchsmoeglichkeit. Max 150 Woerter. "
|
||||
f"Antworte NUR mit dem fertigen Textbaustein."
|
||||
),
|
||||
"stream": False,
|
||||
})
|
||||
data = resp.json()
|
||||
import re
|
||||
raw = (
|
||||
data.get("response", "")
|
||||
or (data.get("message", {}) or {}).get("content", "")
|
||||
or ""
|
||||
).strip()
|
||||
raw = data.get("response", "").strip()
|
||||
raw = re.sub(r"<think>.*?</think>", "", raw, flags=re.DOTALL).strip()
|
||||
if raw:
|
||||
if raw and len(raw) > 50:
|
||||
finding.correction = raw
|
||||
except Exception as e:
|
||||
logger.warning("Correction generation failed for %s: %s", service_name, e)
|
||||
|
||||
Reference in New Issue
Block a user