feat: auto-detect multi-page spreads and split into sub-sessions
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 28s
CI / test-go-edu-search (push) Successful in 29s
CI / test-python-klausur (push) Failing after 2m0s
CI / test-python-agent-core (push) Successful in 17s
CI / test-nodejs-website (push) Successful in 19s
Some checks failed
CI / go-lint (push) Has been skipped
CI / python-lint (push) Has been skipped
CI / nodejs-lint (push) Has been skipped
CI / test-go-school (push) Successful in 28s
CI / test-go-edu-search (push) Successful in 29s
CI / test-python-klausur (push) Failing after 2m0s
CI / test-python-agent-core (push) Successful in 17s
CI / test-nodejs-website (push) Successful in 19s
When a book scan (double-page spread) is detected during the crop step, the system automatically: 1. Detects vertical center gaps (spine area) via ink density projection 2. Splits into N page sub-sessions (reusing existing sub-session mechanism) 3. Individually crops each page (removing its own borders) 4. Returns sub-session IDs for downstream pipeline processing Detection: landscape images (w > h * 1.15), vertical gap < 15% peak density in center region (25-75%), gap width >= 0.8% of image width. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -9,7 +9,8 @@ These endpoints were extracted from the main pipeline to keep files manageable.
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, Optional
|
||||
import uuid as uuid_mod
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
@@ -17,10 +18,12 @@ from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
from cv_vocab_pipeline import detect_and_fix_orientation
|
||||
from page_crop import detect_and_crop_page
|
||||
from page_crop import detect_and_crop_page, detect_page_splits
|
||||
from ocr_pipeline_session_store import (
|
||||
create_session_db,
|
||||
get_session_db,
|
||||
get_session_image,
|
||||
get_sub_sessions,
|
||||
update_session_db,
|
||||
)
|
||||
|
||||
@@ -170,6 +173,10 @@ async def auto_crop(session_id: str):
|
||||
|
||||
Reads the dewarped image (post-deskew + dewarp, so the page is straight).
|
||||
Falls back to oriented → original if earlier steps were skipped.
|
||||
|
||||
If the image is a multi-page spread (e.g. book on scanner), it will
|
||||
automatically split into separate sub-sessions per page, crop each
|
||||
individually, and return the split info.
|
||||
"""
|
||||
cached = await _ensure_cached(session_id)
|
||||
|
||||
@@ -184,10 +191,68 @@ async def auto_crop(session_id: str):
|
||||
|
||||
t0 = time.time()
|
||||
|
||||
# --- Multi-page detection ---
|
||||
page_splits = detect_page_splits(img_bgr)
|
||||
|
||||
if page_splits and len(page_splits) >= 2:
|
||||
# Multi-page spread detected — create sub-sessions
|
||||
sub_sessions = await _create_page_sub_sessions(
|
||||
session_id, cached, img_bgr, page_splits,
|
||||
)
|
||||
duration = time.time() - t0
|
||||
|
||||
crop_info: Dict[str, Any] = {
|
||||
"crop_applied": True,
|
||||
"multi_page": True,
|
||||
"page_count": len(page_splits),
|
||||
"page_splits": page_splits,
|
||||
"duration_seconds": round(duration, 2),
|
||||
}
|
||||
cached["crop_result"] = crop_info
|
||||
|
||||
# Store the first page as the main cropped image for backward compat
|
||||
first_page = page_splits[0]
|
||||
first_bgr = img_bgr[
|
||||
first_page["y"]:first_page["y"] + first_page["height"],
|
||||
first_page["x"]:first_page["x"] + first_page["width"],
|
||||
].copy()
|
||||
first_cropped, _ = detect_and_crop_page(first_bgr)
|
||||
cached["cropped_bgr"] = first_cropped
|
||||
|
||||
ok, png_buf = cv2.imencode(".png", first_cropped)
|
||||
await update_session_db(
|
||||
session_id,
|
||||
cropped_png=png_buf.tobytes() if ok else b"",
|
||||
crop_result=crop_info,
|
||||
current_step=5,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"OCR Pipeline: crop session %s: multi-page split into %d pages in %.2fs",
|
||||
session_id, len(page_splits), duration,
|
||||
)
|
||||
|
||||
await _append_pipeline_log(session_id, "crop", {
|
||||
"multi_page": True,
|
||||
"page_count": len(page_splits),
|
||||
}, duration_ms=int(duration * 1000))
|
||||
|
||||
h, w = first_cropped.shape[:2]
|
||||
return {
|
||||
"session_id": session_id,
|
||||
**crop_info,
|
||||
"image_width": w,
|
||||
"image_height": h,
|
||||
"cropped_image_url": f"/api/v1/ocr-pipeline/sessions/{session_id}/image/cropped",
|
||||
"sub_sessions": sub_sessions,
|
||||
}
|
||||
|
||||
# --- Single page (normal) ---
|
||||
cropped_bgr, crop_info = detect_and_crop_page(img_bgr)
|
||||
|
||||
duration = time.time() - t0
|
||||
crop_info["duration_seconds"] = round(duration, 2)
|
||||
crop_info["multi_page"] = False
|
||||
|
||||
# Encode cropped image
|
||||
success, png_buf = cv2.imencode(".png", cropped_bgr)
|
||||
@@ -228,6 +293,83 @@ async def auto_crop(session_id: str):
|
||||
}
|
||||
|
||||
|
||||
async def _create_page_sub_sessions(
|
||||
parent_session_id: str,
|
||||
parent_cached: dict,
|
||||
full_img_bgr: np.ndarray,
|
||||
page_splits: List[Dict[str, Any]],
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Create sub-sessions for each detected page in a multi-page spread.
|
||||
|
||||
Each page region is individually cropped, then stored as a sub-session
|
||||
with its own cropped image ready for the rest of the pipeline.
|
||||
"""
|
||||
# Check for existing sub-sessions (idempotent)
|
||||
existing = await get_sub_sessions(parent_session_id)
|
||||
if existing:
|
||||
return [
|
||||
{"id": s["id"], "name": s["name"], "page_index": s.get("box_index", i)}
|
||||
for i, s in enumerate(existing)
|
||||
]
|
||||
|
||||
parent_name = parent_cached.get("name", "Scan")
|
||||
parent_filename = parent_cached.get("filename", "scan.png")
|
||||
|
||||
sub_sessions: List[Dict[str, Any]] = []
|
||||
|
||||
for page in page_splits:
|
||||
pi = page["page_index"]
|
||||
px, py = page["x"], page["y"]
|
||||
pw, ph = page["width"], page["height"]
|
||||
|
||||
# Extract page region
|
||||
page_bgr = full_img_bgr[py:py + ph, px:px + pw].copy()
|
||||
|
||||
# Crop each page individually (remove its own borders)
|
||||
cropped_page, page_crop_info = detect_and_crop_page(page_bgr)
|
||||
|
||||
# Encode as PNG
|
||||
ok, png_buf = cv2.imencode(".png", cropped_page)
|
||||
page_png = png_buf.tobytes() if ok else b""
|
||||
|
||||
sub_id = str(uuid_mod.uuid4())
|
||||
sub_name = f"{parent_name} — Seite {pi + 1}"
|
||||
|
||||
await create_session_db(
|
||||
session_id=sub_id,
|
||||
name=sub_name,
|
||||
filename=parent_filename,
|
||||
original_png=page_png,
|
||||
parent_session_id=parent_session_id,
|
||||
box_index=pi,
|
||||
)
|
||||
|
||||
# Pre-populate: set cropped = original (already cropped)
|
||||
await update_session_db(
|
||||
sub_id,
|
||||
cropped_png=page_png,
|
||||
crop_result=page_crop_info,
|
||||
current_step=5,
|
||||
)
|
||||
|
||||
ch, cw = cropped_page.shape[:2]
|
||||
sub_sessions.append({
|
||||
"id": sub_id,
|
||||
"name": sub_name,
|
||||
"page_index": pi,
|
||||
"source_rect": page,
|
||||
"cropped_size": {"width": cw, "height": ch},
|
||||
"detected_format": page_crop_info.get("detected_format"),
|
||||
})
|
||||
|
||||
logger.info(
|
||||
"Page sub-session %s: page %d, region x=%d w=%d -> cropped %dx%d",
|
||||
sub_id, pi + 1, px, pw, cw, ch,
|
||||
)
|
||||
|
||||
return sub_sessions
|
||||
|
||||
|
||||
class ManualCropRequest(BaseModel):
|
||||
x: float # percentage 0-100
|
||||
y: float # percentage 0-100
|
||||
|
||||
Reference in New Issue
Block a user