feat(pipeline): structural metadata end-to-end (Blocks D2-D4)

D2: RAG service stores section/section_title/paragraph/paragraph_num/page
from embedding service chunks_with_metadata into Qdrant payloads.

D3: Control generator prefers section > article > section_title from
Qdrant, adds page to source_citation and generation_metadata.

D4: Validated with real BGB §§ 312-312k text. Found and fixed critical
bug where Phase 3 overlap destroyed the [§ ...] section prefix, causing
only the first chunk per document to have metadata. All subsequent
chunks lost section info.

Also fixes pre-existing lint issues (unused imports, ambiguous variable
names, duplicate dict key, bare except).

456 tests passing (58 embedding + 387 pipeline + 11 rag-service).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Benjamin Admin
2026-05-01 20:34:00 +02:00
parent da21339e76
commit 93099b2770
15 changed files with 1086 additions and 25 deletions
+12 -9
View File
@@ -10,8 +10,8 @@ Provides REST endpoints for:
This service handles all ML-heavy operations, keeping the main klausur-service lightweight.
"""
import os
import logging
import re
from typing import List, Optional
from contextlib import asynccontextmanager
@@ -282,8 +282,6 @@ ENGLISH_ABBREVIATIONS = {
ALL_ABBREVIATIONS = GERMAN_ABBREVIATIONS | ENGLISH_ABBREVIATIONS
# Regex pattern for legal section headers (§, Art., Article, Section, etc.)
import re
_LEGAL_SECTION_RE = re.compile(
r'^(?:'
r'§\s*\d+' # § 25, § 5a
@@ -411,8 +409,6 @@ def _parse_section_metadata(header: str) -> dict:
# Find which group matched
for i, g in enumerate(m.groups(), 1):
if g:
# Reconstruct the section reference
prefix = header[:m.start()].strip()
section = header[m.start():m.end()].strip()
break
@@ -577,7 +573,14 @@ def chunk_text_legal(text: str, chunk_size: int, overlap: int) -> List[str]:
if space_idx > 0:
overlap_text = overlap_text[space_idx + 1:]
if overlap_text:
chunk = overlap_text + ' ' + chunk
# Insert overlap AFTER the [§ ...] prefix to preserve it
# for structured metadata extraction
prefix_match = re.match(r'\[.+?\]\s*', chunk)
if prefix_match:
pos = prefix_match.end()
chunk = chunk[:pos] + overlap_text + ' ' + chunk[pos:]
else:
chunk = overlap_text + ' ' + chunk
final_chunks.append(chunk.strip())
return [c for c in final_chunks if c]
@@ -742,13 +745,13 @@ def detect_pdf_backends() -> List[str]:
available = []
try:
from unstructured.partition.pdf import partition_pdf
from unstructured.partition.pdf import partition_pdf # noqa: F401
available.append("unstructured")
except ImportError:
pass
try:
from pypdf import PdfReader
from pypdf import PdfReader # noqa: F401
available.append("pypdf")
except ImportError:
pass
@@ -808,7 +811,7 @@ def extract_pdf_unstructured(pdf_content: bytes) -> ExtractPDFResponse:
import os as os_module
try:
os_module.unlink(tmp_path)
except:
except OSError:
pass