This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/backend/ai_processing/html_generator.py
Benjamin Admin bfdaf63ba9 fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 09:51:32 +01:00

212 lines
6.0 KiB
Python

"""
AI Processing - HTML Generator.
Baut saubere HTML-Arbeitsblätter aus Analyse-JSON.
"""
from pathlib import Path
import json
import logging
from .core import BEREINIGT_DIR
logger = logging.getLogger(__name__)
def build_clean_html_from_analysis(analysis_path: Path) -> Path:
"""
Nimmt eine *_analyse.json-Datei und baut daraus ein sauberes HTML-Arbeitsblatt.
NEU:
- Fokus auf gedruckten Text (canonical_text / printed_blocks)
- Handschriftliche Eintragungen und durchgestrichene Wörter werden NICHT in den
Haupttext übernommen
- Verwendung eines Open-Source-Font-Stacks (z.B. Inter / Noto Sans)
"""
if not analysis_path.exists():
raise FileNotFoundError(f"Analysedatei nicht gefunden: {analysis_path}")
try:
data = json.loads(analysis_path.read_text(encoding="utf-8"))
except json.JSONDecodeError as e:
raise RuntimeError(f"Analyse-Datei enthält kein gültiges JSON: {analysis_path}\n{e}") from e
title = data.get("title") or "Arbeitsblatt"
subject = data.get("subject") or ""
grade_level = data.get("grade_level") or ""
instructions = data.get("instructions") or ""
tasks = data.get("tasks", []) or []
canonical_text = data.get("canonical_text") or ""
printed_blocks = data.get("printed_blocks") or []
struck = data.get("struck_through_words") or []
html_parts = []
html_parts.append("<!DOCTYPE html>")
html_parts.append("<html lang='de'>")
html_parts.append("<head>")
html_parts.append("<meta charset='UTF-8'>")
html_parts.append(f"<title>{title}</title>")
html_parts.append(
"""
<style>
:root {
--font-main: "Inter", "Noto Sans", system-ui, -apple-system, BlinkMacSystemFont, sans-serif;
}
* { box-sizing: border-box; }
body {
font-family: var(--font-main);
margin: 32px;
line-height: 1.5;
font-size: 14px;
color: #111827;
}
.page {
max-width: 800px;
margin: 0 auto;
}
h1 {
font-size: 24px;
margin-bottom: 4px;
}
h2 {
font-size: 18px;
margin-top: 24px;
}
.meta {
font-size: 12px;
color: #6b7280;
margin-bottom: 16px;
}
.instructions {
margin-bottom: 20px;
padding: 8px 10px;
border-radius: 8px;
background: #eff6ff;
border: 1px solid #bfdbfe;
font-size: 13px;
}
.text-blocks {
margin-bottom: 24px;
}
.text-block {
margin-bottom: 8px;
}
.text-block-title {
font-weight: 600;
margin-bottom: 4px;
}
.task-list {
margin-top: 8px;
}
.task {
margin-bottom: 14px;
padding-bottom: 8px;
border-bottom: 1px dashed #e5e7eb;
}
.task-title {
font-weight: 600;
margin-bottom: 4px;
}
.gap-line {
display: inline-block;
border-bottom: 1px solid #000;
min-width: 80px;
margin: 0 4px;
}
.footnote {
margin-top: 24px;
font-size: 11px;
color: #9ca3af;
}
</style>
"""
)
html_parts.append("</head>")
html_parts.append("<body>")
html_parts.append("<div class='page'>")
# Kopfbereich
html_parts.append(f"<h1>{title}</h1>")
meta_bits = []
if subject:
meta_bits.append(f"Fach: {subject}")
if grade_level:
meta_bits.append(f"Klassenstufe: {grade_level}")
if meta_bits:
html_parts.append(f"<div class='meta'>{' | '.join(meta_bits)}</div>")
if instructions:
html_parts.append(
f"<div class='instructions'><strong>Arbeitsanweisung:</strong> {instructions}</div>"
)
# Haupttext / gedruckte Blöcke
html_parts.append("<section class='text-blocks'>")
if printed_blocks:
for block in printed_blocks:
role = (block.get("role") or "body").lower()
text = (block.get("text") or "").strip()
if not text:
continue
html_parts.append("<div class='text-block'>")
if role == "title":
html_parts.append(f"<div class='text-block-title'>{text}</div>")
else:
html_parts.append(f"<div>{text}</div>")
html_parts.append("</div>")
elif canonical_text:
# Fallback: canonical_text in Absätze aufteilen
paragraphs = [
p.strip()
for p in canonical_text.replace("\r\n", "\n").split("\n\n")
if p.strip()
]
for p in paragraphs:
html_parts.append(f"<div class='text-block'>{p}</div>")
html_parts.append("</section>")
# Aufgabenbereich
if tasks:
html_parts.append("<h2>Aufgaben</h2>")
html_parts.append("<div class='task-list'>")
for idx, task in enumerate(tasks, start=1):
t_type = task.get("type") or "other"
desc = task.get("description") or ""
text_with_gaps = task.get("text_with_gaps")
html_parts.append("<div class='task'>")
html_parts.append(
f"<div class='task-title'>Aufgabe {idx} ({t_type}): {desc}</div>"
)
if text_with_gaps:
# Lücken „___" werden in Linien umgewandelt
rendered = text_with_gaps.replace("___", "<span class='gap-line'>&nbsp;</span>")
html_parts.append(f"<div>{rendered}</div>")
html_parts.append("</div>")
html_parts.append("</div>") # .task-list
# kleine Fußnote mit Hinweis
if struck:
html_parts.append(
"<div class='footnote'>Hinweis: Einige im Original durchgestrichene Wörter wurden "
"von der KI erkannt und NICHT in dieses saubere Arbeitsblatt übernommen.</div>"
)
else:
html_parts.append(
"<div class='footnote'>Dieses Arbeitsblatt wurde automatisch aus einem Scan rekonstruiert "
"und von handschriftlichen Eintragungen bereinigt.</div>"
)
html_parts.append("</div>") # .page
html_parts.append("</body></html>")
html_content = "\n".join(html_parts)
out_name = analysis_path.stem.replace("_analyse", "") + "_clean.html"
out_path = BEREINIGT_DIR / out_name
out_path.write_text(html_content, encoding="utf-8")
return out_path