Files
breakpilot-lehrer/klausur-service/backend/orientation_crop_api.py
Benjamin Admin 902de027f4
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 28s
CI / test-go-edu-search (push) Successful in 29s
CI / test-python-klausur (push) Failing after 2m0s
CI / test-python-agent-core (push) Successful in 17s
CI / test-nodejs-website (push) Successful in 19s
feat: auto-detect multi-page spreads and split into sub-sessions
When a book scan (double-page spread) is detected during the crop step,
the system automatically:
1. Detects vertical center gaps (spine area) via ink density projection
2. Splits into N page sub-sessions (reusing existing sub-session mechanism)
3. Individually crops each page (removing its own borders)
4. Returns sub-session IDs for downstream pipeline processing

Detection: landscape images (w > h * 1.15), vertical gap < 15% peak
density in center region (25-75%), gap width >= 0.8% of image width.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-17 16:34:06 +01:00

485 lines
15 KiB
Python

"""
Orientation & Crop API - Steps 1 and 4 of the OCR Pipeline.
Step 1: Orientation detection (fix 90/180/270 degree rotations)
Step 4 (UI index 3): Page cropping (after deskew + dewarp, so the image is straight)
These endpoints were extracted from the main pipeline to keep files manageable.
"""
import logging
import time
import uuid as uuid_mod
from typing import Any, Dict, List, Optional
import cv2
import numpy as np
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from cv_vocab_pipeline import detect_and_fix_orientation
from page_crop import detect_and_crop_page, detect_page_splits
from ocr_pipeline_session_store import (
create_session_db,
get_session_db,
get_session_image,
get_sub_sessions,
update_session_db,
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/v1/ocr-pipeline", tags=["ocr-pipeline"])
# Reference to the shared cache from ocr_pipeline_api (set in main.py)
_cache: Dict[str, Dict[str, Any]] = {}
def set_cache_ref(cache: Dict[str, Dict[str, Any]]):
"""Set reference to the shared cache from ocr_pipeline_api."""
global _cache
_cache = cache
async def _ensure_cached(session_id: str) -> Dict[str, Any]:
"""Ensure session is in cache, loading from DB if needed."""
if session_id in _cache:
return _cache[session_id]
session = await get_session_db(session_id)
if not session:
raise HTTPException(status_code=404, detail=f"Session {session_id} not found")
cache_entry: Dict[str, Any] = {
"id": session_id,
**session,
"original_bgr": None,
"oriented_bgr": None,
"cropped_bgr": None,
"deskewed_bgr": None,
"dewarped_bgr": None,
}
for img_type, bgr_key in [
("original", "original_bgr"),
("oriented", "oriented_bgr"),
("cropped", "cropped_bgr"),
("deskewed", "deskewed_bgr"),
("dewarped", "dewarped_bgr"),
]:
png_data = await get_session_image(session_id, img_type)
if png_data:
arr = np.frombuffer(png_data, dtype=np.uint8)
bgr = cv2.imdecode(arr, cv2.IMREAD_COLOR)
cache_entry[bgr_key] = bgr
_cache[session_id] = cache_entry
return cache_entry
async def _append_pipeline_log(session_id: str, step: str, metrics: dict, duration_ms: int):
"""Append a step entry to the pipeline log."""
from datetime import datetime
session = await get_session_db(session_id)
if not session:
return
pipeline_log = session.get("pipeline_log") or {"steps": []}
pipeline_log["steps"].append({
"step": step,
"completed_at": datetime.utcnow().isoformat(),
"success": True,
"duration_ms": duration_ms,
"metrics": metrics,
})
await update_session_db(session_id, pipeline_log=pipeline_log)
# ---------------------------------------------------------------------------
# Step 1: Orientation
# ---------------------------------------------------------------------------
@router.post("/sessions/{session_id}/orientation")
async def detect_orientation(session_id: str):
"""Detect and fix 90/180/270 degree rotations from scanners.
Reads the original image, applies orientation correction,
stores the result as oriented_png.
"""
cached = await _ensure_cached(session_id)
img_bgr = cached.get("original_bgr")
if img_bgr is None:
raise HTTPException(status_code=400, detail="Original image not available")
t0 = time.time()
# Detect and fix orientation
oriented_bgr, orientation_deg = detect_and_fix_orientation(img_bgr.copy())
duration = time.time() - t0
orientation_result = {
"orientation_degrees": orientation_deg,
"corrected": orientation_deg != 0,
"duration_seconds": round(duration, 2),
}
# Encode oriented image
success, png_buf = cv2.imencode(".png", oriented_bgr)
oriented_png = png_buf.tobytes() if success else b""
# Update cache
cached["oriented_bgr"] = oriented_bgr
cached["orientation_result"] = orientation_result
# Persist to DB
await update_session_db(
session_id,
oriented_png=oriented_png,
orientation_result=orientation_result,
current_step=2,
)
logger.info(
"OCR Pipeline: orientation session %s: %d° (%s) in %.2fs",
session_id, orientation_deg,
"corrected" if orientation_deg else "no change",
duration,
)
await _append_pipeline_log(session_id, "orientation", {
"orientation_degrees": orientation_deg,
"corrected": orientation_deg != 0,
}, duration_ms=int(duration * 1000))
h, w = oriented_bgr.shape[:2]
return {
"session_id": session_id,
**orientation_result,
"image_width": w,
"image_height": h,
"oriented_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/oriented",
}
# ---------------------------------------------------------------------------
# Step 4 (UI index 3): Crop — runs after deskew + dewarp
# ---------------------------------------------------------------------------
@router.post("/sessions/{session_id}/crop")
async def auto_crop(session_id: str):
"""Auto-detect and crop scanner/book borders.
Reads the dewarped image (post-deskew + dewarp, so the page is straight).
Falls back to oriented → original if earlier steps were skipped.
If the image is a multi-page spread (e.g. book on scanner), it will
automatically split into separate sub-sessions per page, crop each
individually, and return the split info.
"""
cached = await _ensure_cached(session_id)
# Use dewarped (preferred), fall back to oriented, then original
img_bgr = next(
(v for k in ("dewarped_bgr", "oriented_bgr", "original_bgr")
if (v := cached.get(k)) is not None),
None,
)
if img_bgr is None:
raise HTTPException(status_code=400, detail="No image available for cropping")
t0 = time.time()
# --- Multi-page detection ---
page_splits = detect_page_splits(img_bgr)
if page_splits and len(page_splits) >= 2:
# Multi-page spread detected — create sub-sessions
sub_sessions = await _create_page_sub_sessions(
session_id, cached, img_bgr, page_splits,
)
duration = time.time() - t0
crop_info: Dict[str, Any] = {
"crop_applied": True,
"multi_page": True,
"page_count": len(page_splits),
"page_splits": page_splits,
"duration_seconds": round(duration, 2),
}
cached["crop_result"] = crop_info
# Store the first page as the main cropped image for backward compat
first_page = page_splits[0]
first_bgr = img_bgr[
first_page["y"]:first_page["y"] + first_page["height"],
first_page["x"]:first_page["x"] + first_page["width"],
].copy()
first_cropped, _ = detect_and_crop_page(first_bgr)
cached["cropped_bgr"] = first_cropped
ok, png_buf = cv2.imencode(".png", first_cropped)
await update_session_db(
session_id,
cropped_png=png_buf.tobytes() if ok else b"",
crop_result=crop_info,
current_step=5,
)
logger.info(
"OCR Pipeline: crop session %s: multi-page split into %d pages in %.2fs",
session_id, len(page_splits), duration,
)
await _append_pipeline_log(session_id, "crop", {
"multi_page": True,
"page_count": len(page_splits),
}, duration_ms=int(duration * 1000))
h, w = first_cropped.shape[:2]
return {
"session_id": session_id,
**crop_info,
"image_width": w,
"image_height": h,
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
"sub_sessions": sub_sessions,
}
# --- Single page (normal) ---
cropped_bgr, crop_info = detect_and_crop_page(img_bgr)
duration = time.time() - t0
crop_info["duration_seconds"] = round(duration, 2)
crop_info["multi_page"] = False
# Encode cropped image
success, png_buf = cv2.imencode(".png", cropped_bgr)
cropped_png = png_buf.tobytes() if success else b""
# Update cache
cached["cropped_bgr"] = cropped_bgr
cached["crop_result"] = crop_info
# Persist to DB
await update_session_db(
session_id,
cropped_png=cropped_png,
crop_result=crop_info,
current_step=5,
)
logger.info(
"OCR Pipeline: crop session %s: applied=%s format=%s in %.2fs",
session_id, crop_info["crop_applied"],
crop_info.get("detected_format", "?"),
duration,
)
await _append_pipeline_log(session_id, "crop", {
"crop_applied": crop_info["crop_applied"],
"detected_format": crop_info.get("detected_format"),
"format_confidence": crop_info.get("format_confidence"),
}, duration_ms=int(duration * 1000))
h, w = cropped_bgr.shape[:2]
return {
"session_id": session_id,
**crop_info,
"image_width": w,
"image_height": h,
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
}
async def _create_page_sub_sessions(
parent_session_id: str,
parent_cached: dict,
full_img_bgr: np.ndarray,
page_splits: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Create sub-sessions for each detected page in a multi-page spread.
Each page region is individually cropped, then stored as a sub-session
with its own cropped image ready for the rest of the pipeline.
"""
# Check for existing sub-sessions (idempotent)
existing = await get_sub_sessions(parent_session_id)
if existing:
return [
{"id": s["id"], "name": s["name"], "page_index": s.get("box_index", i)}
for i, s in enumerate(existing)
]
parent_name = parent_cached.get("name", "Scan")
parent_filename = parent_cached.get("filename", "scan.png")
sub_sessions: List[Dict[str, Any]] = []
for page in page_splits:
pi = page["page_index"]
px, py = page["x"], page["y"]
pw, ph = page["width"], page["height"]
# Extract page region
page_bgr = full_img_bgr[py:py + ph, px:px + pw].copy()
# Crop each page individually (remove its own borders)
cropped_page, page_crop_info = detect_and_crop_page(page_bgr)
# Encode as PNG
ok, png_buf = cv2.imencode(".png", cropped_page)
page_png = png_buf.tobytes() if ok else b""
sub_id = str(uuid_mod.uuid4())
sub_name = f"{parent_name} — Seite {pi + 1}"
await create_session_db(
session_id=sub_id,
name=sub_name,
filename=parent_filename,
original_png=page_png,
parent_session_id=parent_session_id,
box_index=pi,
)
# Pre-populate: set cropped = original (already cropped)
await update_session_db(
sub_id,
cropped_png=page_png,
crop_result=page_crop_info,
current_step=5,
)
ch, cw = cropped_page.shape[:2]
sub_sessions.append({
"id": sub_id,
"name": sub_name,
"page_index": pi,
"source_rect": page,
"cropped_size": {"width": cw, "height": ch},
"detected_format": page_crop_info.get("detected_format"),
})
logger.info(
"Page sub-session %s: page %d, region x=%d w=%d -> cropped %dx%d",
sub_id, pi + 1, px, pw, cw, ch,
)
return sub_sessions
class ManualCropRequest(BaseModel):
x: float # percentage 0-100
y: float # percentage 0-100
width: float # percentage 0-100
height: float # percentage 0-100
@router.post("/sessions/{session_id}/crop/manual")
async def manual_crop(session_id: str, req: ManualCropRequest):
"""Manually crop using percentage coordinates."""
cached = await _ensure_cached(session_id)
img_bgr = next(
(v for k in ("dewarped_bgr", "oriented_bgr", "original_bgr")
if (v := cached.get(k)) is not None),
None,
)
if img_bgr is None:
raise HTTPException(status_code=400, detail="No image available for cropping")
h, w = img_bgr.shape[:2]
# Convert percentages to pixels
px_x = int(w * req.x / 100.0)
px_y = int(h * req.y / 100.0)
px_w = int(w * req.width / 100.0)
px_h = int(h * req.height / 100.0)
# Clamp
px_x = max(0, min(px_x, w - 1))
px_y = max(0, min(px_y, h - 1))
px_w = max(1, min(px_w, w - px_x))
px_h = max(1, min(px_h, h - px_y))
cropped_bgr = img_bgr[px_y:px_y + px_h, px_x:px_x + px_w].copy()
success, png_buf = cv2.imencode(".png", cropped_bgr)
cropped_png = png_buf.tobytes() if success else b""
crop_result = {
"crop_applied": True,
"crop_rect": {"x": px_x, "y": px_y, "width": px_w, "height": px_h},
"crop_rect_pct": {"x": round(req.x, 2), "y": round(req.y, 2),
"width": round(req.width, 2), "height": round(req.height, 2)},
"original_size": {"width": w, "height": h},
"cropped_size": {"width": px_w, "height": px_h},
"method": "manual",
}
cached["cropped_bgr"] = cropped_bgr
cached["crop_result"] = crop_result
await update_session_db(
session_id,
cropped_png=cropped_png,
crop_result=crop_result,
current_step=5,
)
ch, cw = cropped_bgr.shape[:2]
return {
"session_id": session_id,
**crop_result,
"image_width": cw,
"image_height": ch,
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
}
@router.post("/sessions/{session_id}/crop/skip")
async def skip_crop(session_id: str):
"""Skip cropping — use dewarped (or oriented/original) image as-is."""
cached = await _ensure_cached(session_id)
img_bgr = next(
(v for k in ("dewarped_bgr", "oriented_bgr", "original_bgr")
if (v := cached.get(k)) is not None),
None,
)
if img_bgr is None:
raise HTTPException(status_code=400, detail="No image available")
h, w = img_bgr.shape[:2]
# Store the dewarped image as cropped (identity crop)
success, png_buf = cv2.imencode(".png", img_bgr)
cropped_png = png_buf.tobytes() if success else b""
crop_result = {
"crop_applied": False,
"skipped": True,
"original_size": {"width": w, "height": h},
"cropped_size": {"width": w, "height": h},
}
cached["cropped_bgr"] = img_bgr
cached["crop_result"] = crop_result
await update_session_db(
session_id,
cropped_png=cropped_png,
crop_result=crop_result,
current_step=5,
)
return {
"session_id": session_id,
**crop_result,
"image_width": w,
"image_height": h,
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
}